diff --git a/.github/workflows/tb_plugin_ci.yml b/.github/workflows/tb_plugin_ci.yml index 6931501e6..0bcc60cd7 100644 --- a/.github/workflows/tb_plugin_ci.yml +++ b/.github/workflows/tb_plugin_ci.yml @@ -5,13 +5,13 @@ on: branches: - master - release/** - - tb_plugin + - plugin/** pull_request: branches: - master - release/** - - tb_plugin + - plugin/** jobs: build: @@ -37,6 +37,6 @@ jobs: set -e cd tb_plugin sh ./ci_scripts/install_env.sh - pip install . + pip install .[gs] cd test pytest diff --git a/libkineto/CMakeLists.txt b/libkineto/CMakeLists.txt index 003de73b0..3f61dd92f 100644 --- a/libkineto/CMakeLists.txt +++ b/libkineto/CMakeLists.txt @@ -42,19 +42,19 @@ endif() # Set LIBKINETO_NOCUPTI to explicitly disable CUPTI # Otherwise, CUPTI is disabled if not found -IF (NOT CUDA_SOURCE_DIR AND NOT CUPTI_INCLUDE_DIR) +IF (NOT CUDA_SOURCE_DIR OR NOT CUPTI_INCLUDE_DIR OR NOT CUDA_cupti_LIBRARY) set(LIBKINETO_NOCUPTI ON CACHE BOOL "" FORCE) endif() # Define file lists if (LIBKINETO_NOCUPTI) - get_filelist("get_libkineto_cpu_only_srcs()" LIBKINETO_SRCS) + get_filelist("get_libkineto_cpu_only_srcs(with_api=False)" LIBKINETO_SRCS) message(INFO " CUPTI unavailable or disabled - not building GPU profilers") else() - get_filelist("get_libkineto_srcs()" LIBKINETO_SRCS) + get_filelist("get_libkineto_srcs(with_api=False)" LIBKINETO_SRCS) endif() get_filelist("get_libkineto_public_headers()" LIBKINETO_PUBLIC_HEADERS) -set(LIBKINETO_API_SRCS "${LIBKINETO_SOURCE_DIR}/libkineto_api.cpp") +get_filelist("get_libkineto_api_srcs()" LIBKINETO_API_SRCS) add_library(kineto_base OBJECT ${LIBKINETO_SRCS}) add_library(kineto_api OBJECT ${LIBKINETO_API_SRCS}) @@ -69,13 +69,22 @@ set_target_properties(kineto_base kineto_api PROPERTIES CXX_EXTENSIONS NO CXX_VISIBILITY_PRESET hidden) -target_compile_options(kineto_base PRIVATE "-DKINETO_NAMESPACE=libkineto" -"-std=gnu++14") -target_compile_options(kineto_api PRIVATE "-std=gnu++14") +set(KINETO_COMPILE_OPTIONS "-DKINETO_NAMESPACE=libkineto") +list(APPEND KINETO_COMPILE_OPTIONS "-DFMT_HEADER_ONLY") +if(NOT MSVC) + list(APPEND KINETO_COMPILE_OPTIONS "-std=c++14") +else() + list(APPEND KINETO_COMPILE_OPTIONS "/std:c++14") + list(APPEND KINETO_COMPILE_OPTIONS "-DWIN32_LEAN_AND_MEAN") + list(APPEND KINETO_COMPILE_OPTIONS "-DNOGDI") +endif() if (NOT LIBKINETO_NOCUPTI) - target_compile_options(kineto_base PRIVATE "-DHAS_CUPTI") + list(APPEND KINETO_COMPILE_OPTIONS "-DHAS_CUPTI") endif() +target_compile_options(kineto_base PRIVATE "${KINETO_COMPILE_OPTIONS}") +target_compile_options(kineto_api PRIVATE "${KINETO_COMPILE_OPTIONS}") + if(NOT TARGET fmt) if(NOT FMT_SOURCE_DIR) set(FMT_SOURCE_DIR "${LIBKINETO_THIRDPARTY_DIR}/fmt" @@ -95,6 +104,8 @@ if(NOT TARGET fmt) endif() set(FMT_INCLUDE_DIR "${FMT_SOURCE_DIR}/include") +message(STATUS "Kineto: FMT_SOURCE_DIR = ${FMT_SOURCE_DIR}") +message(STATUS "Kineto: FMT_INCLUDE_DIR = ${FMT_INCLUDE_DIR}") if (NOT CUPTI_INCLUDE_DIR) set(CUPTI_INCLUDE_DIR "${CUDA_SOURCE_DIR}/extras/CUPTI/include") endif() @@ -112,6 +123,7 @@ target_include_directories(kineto_base PUBLIC $) target_include_directories(kineto_api PUBLIC + $ $) if(KINETO_LIBRARY_TYPE STREQUAL "default") @@ -132,10 +144,11 @@ else() message(FATAL_ERROR "Unsupported library type ${KINETO_LIBRARY_TYPE}") endif() -target_link_libraries(kineto "${CUDA_cupti_LIBRARY}") - -target_link_libraries(kineto $) -add_dependencies(kineto fmt) +if(NOT LIBKINETO_NOCUPTI) + target_link_libraries(kineto "${CUDA_cupti_LIBRARY}") +endif() +target_link_libraries(kineto $) +add_dependencies(kineto fmt::fmt-header-only) install(TARGETS kineto EXPORT kinetoLibraryConfig ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} diff --git a/libkineto/include/ActivityProfilerInterface.h b/libkineto/include/ActivityProfilerInterface.h index 92cfc14d7..bc6a050e8 100644 --- a/libkineto/include/ActivityProfilerInterface.h +++ b/libkineto/include/ActivityProfilerInterface.h @@ -9,6 +9,7 @@ #include #include +#include #include #include "ActivityType.h" @@ -17,7 +18,7 @@ namespace libkineto { class ActivityProfilerController; -class CpuTraceBuffer; +struct CpuTraceBuffer; class Config; class ActivityProfilerInterface { @@ -76,6 +77,14 @@ class ActivityProfilerInterface { virtual bool enableForRegion(const std::string& match) { return true; } + + // Saves information for the current thread to be used in profiler output + // Client must record any new kernel thread where the activity has occured. + virtual void recordThreadInfo() {} + + // Record trace metadata, currently supporting only string key and values, + // values with the same key are overwritten + virtual void addMetadata(const std::string& key, const std::string& value) = 0; }; } // namespace libkineto diff --git a/libkineto/include/ActivityTraceInterface.h b/libkineto/include/ActivityTraceInterface.h index ebddcc7c1..28def766d 100644 --- a/libkineto/include/ActivityTraceInterface.h +++ b/libkineto/include/ActivityTraceInterface.h @@ -12,7 +12,7 @@ namespace libkineto { -class TraceActivity; +struct TraceActivity; class ActivityTraceInterface { public: diff --git a/libkineto/include/ActivityType.h b/libkineto/include/ActivityType.h index 6a377d29e..2dcf1d7f4 100644 --- a/libkineto/include/ActivityType.h +++ b/libkineto/include/ActivityType.h @@ -7,15 +7,30 @@ #pragma once +#include +#include + namespace libkineto { enum class ActivityType { - CPU_OP, + CPU_OP = 0, // cpu side ops + USER_ANNOTATION, + GPU_USER_ANNOTATION, GPU_MEMCPY, GPU_MEMSET, - CONCURRENT_KERNEL, + CONCURRENT_KERNEL, // on-device kernels EXTERNAL_CORRELATION, - CUDA_RUNTIME + CUDA_RUNTIME, // host side cuda runtime events + GLOW_RUNTIME, // host side glow runtime events + CPU_INSTANT_EVENT, // host side point-like events + ENUM_COUNT }; +const char* toString(ActivityType t); +ActivityType toActivityType(const std::string& str); + +// Return an array of all activity types except COUNT +constexpr int activityTypeCount = (int)ActivityType::ENUM_COUNT; +const std::array activityTypes(); + } // namespace libkineto diff --git a/libkineto/include/ClientTraceActivity.h b/libkineto/include/ClientTraceActivity.h deleted file mode 100644 index aefd2f6dd..000000000 --- a/libkineto/include/ClientTraceActivity.h +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright (c) Facebook, Inc. and its affiliates. - * All rights reserved. - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. - */ - -#pragma once - -#include -#include - -#include "TraceActivity.h" - -namespace libkineto { - -struct ClientTraceActivity : TraceActivity { - ClientTraceActivity() = default; - ClientTraceActivity(ClientTraceActivity&&) = default; - ClientTraceActivity& operator=(ClientTraceActivity&&) = default; - ~ClientTraceActivity() override {} - - int64_t deviceId() const override { - return cachedPid(); - } - - int64_t resourceId() const override { - return sysThreadId; - } - - int64_t timestamp() const override { - return startTime; - } - - int64_t duration() const override { - return endTime - startTime; - } - - int64_t correlationId() const override { - return correlation; - } - - ActivityType type() const override { - return ActivityType::CPU_OP; - } - - const std::string name() const override { - return opType; - } - - const TraceActivity* linkedActivity() const override { - return nullptr; - } - - void log(ActivityLogger& logger) const override { - // Unimplemented by default - } - - int64_t startTime{0}; - int64_t endTime{0}; - int64_t correlation{0}; - int device{-1}; - // TODO: Add OS abstraction - pthread_t pthreadId{}; - int32_t sysThreadId{0}; - std::string opType; - std::string inputDims; - std::string inputTypes; - std::string arguments; - std::string outputDims; - std::string outputTypes; - std::string inputNames; - std::string outputNames; - std::string callStack; -}; - -} // namespace libkineto diff --git a/libkineto/include/GenericTraceActivity.h b/libkineto/include/GenericTraceActivity.h new file mode 100644 index 000000000..9eb7a6cb9 --- /dev/null +++ b/libkineto/include/GenericTraceActivity.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * All rights reserved. + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include +#include +#include +#include + +#include "ThreadUtil.h" +#include "TraceActivity.h" +#include "TraceSpan.h" + +namespace libkineto { + +// @lint-ignore-every CLANGTIDY cppcoreguidelines-non-private-member-variables-in-classes +// @lint-ignore-every CLANGTIDY cppcoreguidelines-pro-type-member-init +class GenericTraceActivity : public TraceActivity { + + public: + GenericTraceActivity() = delete; + + GenericTraceActivity( + const TraceSpan& trace, ActivityType type, const std::string& name) + : activityType(type), activityName(name), traceSpan_(&trace) { + } + + int64_t deviceId() const override { + return device; + } + + int64_t resourceId() const override { + return resource; + } + + int64_t timestamp() const override { + return startTime; + } + + int64_t duration() const override { + return endTime - startTime; + } + + int64_t correlationId() const override { + return id; + } + + ActivityType type() const override { + return activityType; + } + + const std::string name() const override { + return activityName; + } + + const TraceActivity* linkedActivity() const override { + return nullptr; + } + + const TraceSpan* traceSpan() const override { + return traceSpan_; + } + + void log(ActivityLogger& logger) const override; + + //Encode client side metadata as a key/value string. + void addMetadata(const std::string& key, const std::string& value) { + metadata_.push_back(fmt::format("\"{}\": {}", key, value)); + } + + const std::string getMetadata() const { + return fmt::format("{}", fmt::join(metadata_, ", ")); + } + + virtual ~GenericTraceActivity() {}; + + int64_t startTime{0}; + int64_t endTime{0}; + int32_t id{0}; + int32_t device{0}; + int32_t resource{0}; + ActivityType activityType; + std::string activityName; + + private: + const TraceSpan* traceSpan_; + std::vector metadata_; +}; + +} // namespace libkineto diff --git a/libkineto/include/IActivityProfiler.h b/libkineto/include/IActivityProfiler.h new file mode 100644 index 000000000..9af4535a9 --- /dev/null +++ b/libkineto/include/IActivityProfiler.h @@ -0,0 +1,102 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * All rights reserved. + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include +#include +#include + +#include "GenericTraceActivity.h" +#include "ActivityTraceInterface.h" + +/* This file includes an abstract base class for an activity profiler + * that can be implemented by multiple tracing agents in the application. + * The high level Kineto profiler can co-ordinate start and end of tracing + * and combine together events from multiple such activity profilers. + */ + +namespace libkineto { + +enum class TraceStatus { + READY, // Accepting trace requests + WARMUP, // Performing trace warmup + RECORDING, // Actively collecting activities + PROCESSING, // Recording is complete, preparing results + ERROR, // One or more errors (and possibly also warnings) occurred. + WARNING, // One or more warnings occurred. +}; + +/* IActivityProfilerSession: + * an opaque object that can be used by a high level profiler to + * start/stop and return trace events. + */ +class IActivityProfilerSession { + + public: + virtual ~IActivityProfilerSession() {} + + // start the trace collection synchronously + virtual void start() = 0; + + // stop the trace collection synchronously + virtual void stop() = 0; + + TraceStatus status() { + return status_; + } + + // returns list of Trace Activities + virtual std::vector& activities() = 0; + + // returns errors with this trace + virtual std::vector errors() = 0; + + // processes trace activities using logger + virtual void processTrace(ActivityLogger& logger) = 0; + + // XXX define trace formats + // virtual save(string name, TraceFormat format) + + protected: + TraceStatus status_ = TraceStatus::READY; +}; + + +/* Activity Profiler Plugins: + * These allow other frameworks to integrate into Kineto's primariy + * activity profiler. While the primary activity profiler handles + * timing the trace collections and correlating events the plugins + * can become source of new trace activity types. + */ +class IActivityProfiler { + + public: + + virtual ~IActivityProfiler() {} + + // name of profiler + virtual const std::string& name() const = 0; + + // returns activity types this profiler supports + virtual const std::set& availableActivities() const = 0; + + // Calls prepare() on registered tracer providers passing in the relevant + // activity types. Returns a profiler session handle (including uuid?). + virtual std::unique_ptr configure( + const std::set& activity_types, + const std::string& config="") = 0; + + // asynchronous version of the above with future timestamp and duration. + virtual std::unique_ptr configure( + int64_t ts_ms, + int64_t duration_ms, + const std::set& activity_types, + const std::string& config = "") = 0; +}; + +} // namespace libkineto diff --git a/libkineto/include/ThreadUtil.h b/libkineto/include/ThreadUtil.h new file mode 100644 index 000000000..343680e33 --- /dev/null +++ b/libkineto/include/ThreadUtil.h @@ -0,0 +1,35 @@ +#pragma once + +#include +#include +#include +#include + +namespace libkineto { + +int32_t systemThreadId(); +int32_t threadId(); +bool setThreadName(const std::string& name); +std::string getThreadName(); + +int32_t processId(); +std::string processName(int32_t pid); + +struct ProcessInfo { + int32_t pid; + const std::string name; + const std::string label; +}; + +struct ThreadInfo { + ThreadInfo(int32_t tid, const std::string& name) : + tid(tid), name(name) {} + int32_t tid; + const std::string name; +}; + +// Return a list of pids and process names for the current process +// and its parents. +std::vector> pidCommandPairsOfAncestors(); + +} // namespace libkineto diff --git a/libkineto/include/TraceActivity.h b/libkineto/include/TraceActivity.h index d93787071..fbf0dc673 100644 --- a/libkineto/include/TraceActivity.h +++ b/libkineto/include/TraceActivity.h @@ -8,14 +8,13 @@ #pragma once #include -#include -#include #include "ActivityType.h" namespace libkineto { class ActivityLogger; +struct TraceSpan; // Generic activity interface is borrowed from tensorboard protobuf format. struct TraceActivity { @@ -35,23 +34,16 @@ struct TraceActivity { virtual const std::string name() const = 0; // Optional linked activity virtual const TraceActivity* linkedActivity() const = 0; + // Optional containing trace object + virtual const TraceSpan* traceSpan() const = 0; // Log activity virtual void log(ActivityLogger& logger) const = 0; -}; - -namespace { - // Caching pid is not safe across forks and clones but we currently - // don't support an active profiler in a forked process. - static inline pid_t cachedPid() { - static pid_t pid = getpid(); - return pid; - } - static inline int64_t nsToUs(int64_t ns) { + static int64_t nsToUs(int64_t ns) { // It's important that this conversion is the same everywhere. // No rounding! return ns / 1000; } -} +}; } // namespace libkineto diff --git a/libkineto/include/TraceSpan.h b/libkineto/include/TraceSpan.h index eac3e793a..f33e18fb9 100644 --- a/libkineto/include/TraceSpan.h +++ b/libkineto/include/TraceSpan.h @@ -7,12 +7,26 @@ #pragma once +#include #include #include namespace libkineto { struct TraceSpan { + TraceSpan() = delete; + TraceSpan( + int64_t startTime, int64_t endTime, std::string name) + : startTime(startTime), endTime(endTime), name(std::move(name)) { + } + TraceSpan( + int opCount, int it, std::string name, std::string prefix) + : opCount(opCount), + iteration(it), + name(std::move(name)), + prefix(std::move(prefix)) { + } + // FIXME: change to duration? int64_t startTime{0}; int64_t endTime{0}; @@ -20,8 +34,10 @@ struct TraceSpan { int iteration{-1}; // Name is used to identify timeline std::string name; - // Prefix used to distinguish sub-nets on the same timeline + // Prefix used to distinguish trace spans on the same timeline std::string prefix; + // Tracked by profiler for iteration trigger + bool tracked{false}; }; } // namespace libkineto diff --git a/libkineto/include/libkineto.h b/libkineto/include/libkineto.h index c7d723c72..f0ab1d619 100644 --- a/libkineto/include/libkineto.h +++ b/libkineto/include/libkineto.h @@ -9,7 +9,6 @@ #pragma once -#include #include #include #include @@ -24,9 +23,11 @@ #include "ActivityTraceInterface.h" #include "ActivityType.h" #include "ClientInterface.h" -#include "ClientTraceActivity.h" +#include "GenericTraceActivity.h" #include "TraceSpan.h" +#include "ThreadUtil.h" + extern "C" { void suppressLibkinetoLogMessages(); bool libkineto_init(bool cpuOnly, bool logOnError); @@ -37,9 +38,9 @@ namespace libkineto { class Config; struct CpuTraceBuffer { - TraceSpan span; + TraceSpan span{0, 0, "none"}; int gpuOpCount; - std::vector activities; + std::vector activities; }; class LibkinetoApi { @@ -98,7 +99,7 @@ class LibkinetoApi { std::unique_ptr activityProfiler_{}; ClientInterface* client_{}; - pthread_t clientRegisterThread_{0}; + int32_t clientRegisterThread_{0}; bool isLoaded_{false}; std::atomic_int netSizeThreshold_{}; @@ -108,4 +109,3 @@ class LibkinetoApi { LibkinetoApi& api(); } // namespace libkineto - diff --git a/libkineto/include/time_since_epoch.h b/libkineto/include/time_since_epoch.h index 5a813ada4..e90388d95 100644 --- a/libkineto/include/time_since_epoch.h +++ b/libkineto/include/time_since_epoch.h @@ -12,7 +12,7 @@ namespace libkineto { inline int64_t timeSinceEpoch( - const std::chrono::time_point& t) { + const std::chrono::time_point& t) { return std::chrono::duration_cast( t.time_since_epoch()) .count(); diff --git a/libkineto/libkineto_defs.bzl b/libkineto/libkineto_defs.bzl index 333f9f8c9..fcaa2d5fe 100644 --- a/libkineto/libkineto_defs.bzl +++ b/libkineto/libkineto_defs.bzl @@ -3,7 +3,13 @@ # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -def get_libkineto_srcs(): +def get_libkineto_api_srcs(): + return [ + "src/ThreadUtil.cpp", + "src/libkineto_api.cpp", + ] + +def get_libkineto_srcs(with_api = True): return [ "src/AbstractConfig.cpp", "src/ActivityProfiler.cpp", @@ -11,6 +17,7 @@ def get_libkineto_srcs(): "src/ActivityProfilerProxy.cpp", "src/Config.cpp", "src/ConfigLoader.cpp", + "src/CudaDeviceProperties.cpp", "src/CuptiActivityInterface.cpp", "src/CuptiEventInterface.cpp", "src/CuptiMetricInterface.cpp", @@ -19,17 +26,14 @@ def get_libkineto_srcs(): "src/EventProfilerController.cpp", "src/GenericTraceActivity.cpp", "src/Logger.cpp", - "src/ProcessInfo.cpp", - "src/ThreadName.cpp", "src/WeakSymbols.cpp", "src/cupti_strings.cpp", "src/init.cpp", - "src/libkineto_api.cpp", "src/output_csv.cpp", "src/output_json.cpp", - ] + ] + (get_libkineto_api_srcs() if with_api else []) -def get_libkineto_cpu_only_srcs(): +def get_libkineto_cpu_only_srcs(with_api = True): return [ "src/AbstractConfig.cpp", "src/ActivityProfiler.cpp", @@ -41,21 +45,22 @@ def get_libkineto_cpu_only_srcs(): "src/Demangle.cpp", "src/GenericTraceActivity.cpp", "src/Logger.cpp", - "src/ProcessInfo.cpp", - "src/ThreadName.cpp", "src/init.cpp", - "src/libkineto_api.cpp", "src/output_csv.cpp", "src/output_json.cpp", - ] + ] + (get_libkineto_api_srcs() if with_api else []) def get_libkineto_public_headers(): return [ "include/ActivityProfilerInterface.h", "include/ActivityType.h", "include/ClientInterface.h", + "include/GenericTraceActivity.h", "include/TraceActivity.h", + "include/GenericTraceActivity.h", + "include/IActivityProfiler.h", "include/TraceSpan.h", + "include/ThreadUtil.h", "include/libkineto.h", "include/time_since_epoch.h", ] diff --git a/libkineto/src/AbstractConfig.cpp b/libkineto/src/AbstractConfig.cpp index ebb18ba9c..5d36a909f 100644 --- a/libkineto/src/AbstractConfig.cpp +++ b/libkineto/src/AbstractConfig.cpp @@ -7,6 +7,7 @@ #include "AbstractConfig.h" +#include #include #include diff --git a/libkineto/src/AbstractConfig.h b/libkineto/src/AbstractConfig.h index a4477d9ed..7b631ec39 100644 --- a/libkineto/src/AbstractConfig.h +++ b/libkineto/src/AbstractConfig.h @@ -52,6 +52,7 @@ class AbstractConfig { return timestamp_; } + // Source config string that this was parsed from const std::string& source() const { return source_; } diff --git a/libkineto/src/ActivityBuffers.h b/libkineto/src/ActivityBuffers.h index f1004bb15..e482be217 100644 --- a/libkineto/src/ActivityBuffers.h +++ b/libkineto/src/ActivityBuffers.h @@ -10,7 +10,6 @@ #include #include -#include #include "libkineto.h" #include "CuptiActivityBuffer.h" @@ -19,7 +18,7 @@ namespace KINETO_NAMESPACE { struct ActivityBuffers { std::list> cpu; - std::unique_ptr> gpu; + std::unique_ptr gpu; }; } // namespace KINETO_NAMESPACE diff --git a/libkineto/src/ActivityLoggerFactory.h b/libkineto/src/ActivityLoggerFactory.h new file mode 100644 index 000000000..c22d07c2c --- /dev/null +++ b/libkineto/src/ActivityLoggerFactory.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * All rights reserved. + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include +#include +#include +#include +#include + +namespace KINETO_NAMESPACE { + +class ActivityLogger; + +class ActivityLoggerFactory { + + public: + using FactoryFunc = + std::function(const std::string& url)>; + + // Add logger factory for a protocol prefix + void addProtocol(const std::string& protocol, FactoryFunc f) { + factories_[tolower(protocol)] = f; + } + + // Create a logger, invoking the factory for the protocol specified in url + std::unique_ptr makeLogger(const std::string& url) const { + std::string protocol = extractProtocol(url); + auto it = factories_.find(tolower(protocol)); + if (it != factories_.end()) { + return it->second(stripProtocol(url)); + } + throw std::invalid_argument(fmt::format( + "No logger registered for the {} protocol prefix", + protocol)); + return nullptr; + } + + private: + static std::string tolower(std::string s) { + std::transform(s.begin(), s.end(), s.begin(), + [](unsigned char c) { return std::tolower(c); } + ); + return s; + } + + static std::string extractProtocol(std::string url) { + return url.substr(0, url.find("://")); + } + + static std::string stripProtocol(std::string url) { + size_t pos = url.find("://"); + return pos == url.npos ? url : url.substr(pos + 3); + } + + std::map factories_; +}; + +} // namespace KINETO_NAMESPACE diff --git a/libkineto/src/ActivityProfiler.cpp b/libkineto/src/ActivityProfiler.cpp index d4e343946..cb4237370 100644 --- a/libkineto/src/ActivityProfiler.cpp +++ b/libkineto/src/ActivityProfiler.cpp @@ -8,10 +8,7 @@ #include "ActivityProfiler.h" #include -#include -#include #include -#include #include #include #include @@ -32,6 +29,7 @@ #include "output_base.h" #include "Logger.h" +#include "ThreadUtil.h" using namespace std::chrono; using namespace libkineto; @@ -40,7 +38,7 @@ using std::string; namespace KINETO_NAMESPACE { bool ActivityProfiler::iterationTargetMatch( - const libkineto::CpuTraceBuffer& trace) { + libkineto::CpuTraceBuffer& trace) { const string& name = trace.span.name; bool match = (name == netIterationsTarget_); if (!match && applyNetFilterInternal(name) && @@ -55,6 +53,7 @@ bool ActivityProfiler::iterationTargetMatch( } if (match) { netIterationsTarget_ = name; + trace.span.tracked = true; LOG(INFO) << "Tracking net " << name << " for " << netIterationsTargetCount_ << " iterations"; } @@ -127,6 +126,7 @@ void ActivityProfiler::processTraceInternal(ActivityLogger& logger) { << " CPU buffers"; VLOG(0) << "Profile time range: " << captureWindowStartTime_ << " - " << captureWindowEndTime_; + logger.handleTraceStart(metadata_); for (auto& cpu_trace : traceBuffers_->cpu) { string trace_name = cpu_trace->span.name; VLOG(0) << "Processing CPU buffer for " << trace_name << " (" @@ -159,13 +159,17 @@ void ActivityProfiler::processTraceInternal(ActivityLogger& logger) { } #endif // HAS_CUPTI + for (const auto& session : sessions_){ + LOG(INFO) << "Processing child profiler trace"; + session->processTrace(logger); + } + finalizeTrace(*config_, logger); } ActivityProfiler::CpuGpuSpanPair& ActivityProfiler::recordTraceSpan( TraceSpan& span, int gpuOpCount) { - TraceSpan gpu_span{ - 0, 0, gpuOpCount, span.iteration, span.name, "GPU: "}; + TraceSpan gpu_span(gpuOpCount, span.iteration, span.name, "GPU: "); auto& iterations = traceSpans_[span.name]; iterations.push_back({span, gpu_span}); return iterations.back(); @@ -183,11 +187,9 @@ void ActivityProfiler::processCpuTrace( CpuGpuSpanPair& span_pair = recordTraceSpan(cpuTrace.span, cpuTrace.gpuOpCount); TraceSpan& cpu_span = span_pair.first; for (auto const& act : cpuTrace.activities) { - VLOG(2) << act.correlationId() << ": OP " << act.opType - << " tid: " << act.pthreadId; - if (logTrace) { - logger.handleCpuActivity(act, cpu_span); - recordThreadInfo(act.sysThreadId, act.pthreadId); + VLOG(2) << act.correlationId() << ": OP " << act.activityName; + if (logTrace && config_->selectedActivityTypes().count(act.type())) { + act.log(logger); } // Stash event so we can look it up later when processing GPU trace externalEvents_.insertEvent(&act); @@ -195,9 +197,6 @@ void ActivityProfiler::processCpuTrace( } if (logTrace) { logger.handleTraceSpan(cpu_span); - if (cpu_span.name == netIterationsTarget_) { - logger.handleIterationStart(cpu_span); - } } else { disabledTraceSpans_.insert(cpu_span.name); } @@ -206,48 +205,28 @@ void ActivityProfiler::processCpuTrace( #ifdef HAS_CUPTI inline void ActivityProfiler::handleCorrelationActivity( const CUpti_ActivityExternalCorrelation* correlation) { - switch(correlation->externalKind) { - case CUPTI_EXTERNAL_CORRELATION_KIND_CUSTOM0: - externalEvents_.addCorrelation( - correlation->externalId, - correlation->correlationId, - ExternalEventMap::CorrelationFlowType::Default); - break; - case CUPTI_EXTERNAL_CORRELATION_KIND_CUSTOM1: - externalEvents_.addCorrelation( - correlation->externalId, - correlation->correlationId, - ExternalEventMap::CorrelationFlowType::User); - break; - default: - LOG(ERROR) << "Received correlation activity with undefined kind: " - << correlation->externalKind; - break; - } - VLOG(2) << correlation->correlationId - << ": CUPTI_ACTIVITY_KIND_EXTERNAL_CORRELATION"; + externalEvents_.addCorrelation( + correlation->externalId, correlation->correlationId); } #endif // HAS_CUPTI -const libkineto::ClientTraceActivity& -ActivityProfiler::ExternalEventMap::getClientTraceActivity( - uint32_t id, CorrelationFlowType flowType) { - static const libkineto::ClientTraceActivity nullOp_{}; +const libkineto::GenericTraceActivity& +ActivityProfiler::ExternalEventMap::correlatedActivity(uint32_t id) { + static const libkineto::GenericTraceActivity nullOp_( + defaultTraceSpan().first, ActivityType::CPU_OP, "NULL"); - auto& correlationMap = getCorrelationMap(flowType); - - auto* res = events_[correlationMap[id]]; + auto* res = events_[correlationMap_[id]]; if (res == nullptr) { // Entry may be missing because cpu trace hasn't been processed yet // Insert a dummy element so that we can check for this in insertEvent - events_[correlationMap[id]] = &nullOp_; + events_[correlationMap_[id]] = &nullOp_; res = &nullOp_; } return *res; } void ActivityProfiler::ExternalEventMap::insertEvent( - const libkineto::ClientTraceActivity* op) { + const libkineto::GenericTraceActivity* op) { if (events_[op->correlationId()] != nullptr) { LOG_EVERY_N(WARNING, 100) << "Events processed out of order - link will be missing"; @@ -256,52 +235,59 @@ void ActivityProfiler::ExternalEventMap::insertEvent( } void ActivityProfiler::ExternalEventMap::addCorrelation( - uint64_t external_id, uint32_t cuda_id, CorrelationFlowType flowType) { - switch(flowType){ - case Default: - defaultCorrelationMap_[cuda_id] = external_id; - break; - case User: - userCorrelationMap_[cuda_id] = external_id; - break; - } + uint64_t external_id, uint32_t cuda_id) { + correlationMap_[cuda_id] = external_id; } -static void initUserGpuSpan(GenericTraceActivity& userTraceActivity, - const libkineto::TraceActivity& cpuTraceActivity, - const libkineto::TraceActivity& gpuTraceActivity) { - userTraceActivity.device = gpuTraceActivity.deviceId(); - userTraceActivity.resource = gpuTraceActivity.resourceId(); - userTraceActivity.startTime = gpuTraceActivity.timestamp(); - userTraceActivity.endTime = gpuTraceActivity.timestamp() + gpuTraceActivity.duration(); - userTraceActivity.correlation = cpuTraceActivity.correlationId(); - userTraceActivity.activityType = cpuTraceActivity.type(); - userTraceActivity.activityName = cpuTraceActivity.name(); +static GenericTraceActivity createUserGpuSpan( + const libkineto::TraceActivity& cpuTraceActivity, + const libkineto::TraceActivity& gpuTraceActivity) { + GenericTraceActivity res( + *cpuTraceActivity.traceSpan(), + ActivityType::GPU_USER_ANNOTATION, + cpuTraceActivity.name()); + res.startTime = gpuTraceActivity.timestamp(); + res.device = gpuTraceActivity.deviceId(); + res.resource = gpuTraceActivity.resourceId(); + res.endTime = + gpuTraceActivity.timestamp() + gpuTraceActivity.duration(); + res.id = cpuTraceActivity.correlationId(); + return res; } void ActivityProfiler::GpuUserEventMap::insertOrExtendEvent( - const TraceActivity& cpuTraceActivity, - const TraceActivity& gpuTraceActivity) { - StreamKey key(gpuTraceActivity.deviceId(), gpuTraceActivity.resourceId()); - CorrelationSpanMap& correlationSpanMap = streamSpanMap[key]; - if (correlationSpanMap.count(cpuTraceActivity.correlationId()) == 0) { - GenericTraceActivity& userTraceActivity = correlationSpanMap[cpuTraceActivity.correlationId()]; - initUserGpuSpan(userTraceActivity, cpuTraceActivity, gpuTraceActivity); - } - GenericTraceActivity& userTraceActivity = correlationSpanMap[cpuTraceActivity.correlationId()]; - if (gpuTraceActivity.timestamp() < userTraceActivity.startTime || userTraceActivity.startTime == 0) { - userTraceActivity.startTime = gpuTraceActivity.timestamp(); - } - if ((gpuTraceActivity.timestamp() + gpuTraceActivity.duration()) > userTraceActivity.endTime) { - userTraceActivity.endTime = gpuTraceActivity.timestamp() + gpuTraceActivity.duration(); + const TraceActivity&, + const TraceActivity& gpuActivity) { + const TraceActivity& cpuActivity = *gpuActivity.linkedActivity(); + StreamKey key(gpuActivity.deviceId(), gpuActivity.resourceId()); + CorrelationSpanMap& correlationSpanMap = streamSpanMap_[key]; + auto it = correlationSpanMap.find(cpuActivity.correlationId()); + if (it == correlationSpanMap.end()) { + auto it_success = correlationSpanMap.insert({ + cpuActivity.correlationId(), createUserGpuSpan(cpuActivity, gpuActivity) + }); + it = it_success.first; + } + GenericTraceActivity& span = it->second; + if (gpuActivity.timestamp() < span.startTime || span.startTime == 0) { + span.startTime = gpuActivity.timestamp(); + } + int64_t gpu_activity_end = gpuActivity.timestamp() + gpuActivity.duration(); + if (gpu_activity_end > span.endTime) { + span.endTime = gpu_activity_end; } } +const ActivityProfiler::CpuGpuSpanPair& ActivityProfiler::defaultTraceSpan() { + static TraceSpan span(0, 0, "Unknown", ""); + static CpuGpuSpanPair span_pair(span, span); + return span_pair; +} + void ActivityProfiler::GpuUserEventMap::logEvents(ActivityLogger *logger) { - for (auto const& streamMapPair : streamSpanMap) { + for (auto const& streamMapPair : streamSpanMap_) { for (auto const& correlationSpanPair : streamMapPair.second) { - logger->handleGenericActivity( - correlationSpanPair.second); + correlationSpanPair.second.log(*logger); } } } @@ -332,9 +318,8 @@ inline void ActivityProfiler::handleRuntimeActivity( VLOG(2) << activity->correlationId << ": CUPTI_ACTIVITY_KIND_RUNTIME, cbid=" << activity->cbid << " tid=" << activity->threadId; - const ClientTraceActivity& ext = - externalEvents_.getClientTraceActivity(activity->correlationId, - ExternalEventMap::CorrelationFlowType::Default); + const GenericTraceActivity& ext = + externalEvents_.correlatedActivity(activity->correlationId); int32_t tid = activity->threadId; const auto& it = threadInfo_.find(tid); if (it != threadInfo_.end()) { @@ -398,26 +383,31 @@ inline void ActivityProfiler::handleGpuActivity( if (!loggingDisabled(ext)) { act.log(*logger); updateGpuNetSpan(act); - const ClientTraceActivity& extUser = - externalEvents_.getClientTraceActivity(act.correlationId(), - ExternalEventMap::CorrelationFlowType::User); + /* + const GenericTraceActivity& extUser = + externalEvents_.correlatedActivity(act.correlationId()); // Correlated CPU activity cannot have timestamp greater than the GPU activity's if (!timestampsInCorrectOrder(extUser, act)) { return; } - if (extUser.correlationId() != 0) { VLOG(2) << extUser.correlationId() << "," << act.correlationId() << " (user): "<< act.name(); - gpuUserEventMap_.insertOrExtendEvent(extUser, act); +*/ + if (config_->selectedActivityTypes().count(ActivityType::GPU_USER_ANNOTATION) && + act.linkedActivity() && + act.linkedActivity()->type() == ActivityType::USER_ANNOTATION) { + //gpuUserEventMap_.insertOrExtendEvent(act, act); } +// } } } template -inline void ActivityProfiler::handleGpuActivity(const T* act, ActivityLogger* logger) { - const ClientTraceActivity& extDefault = externalEvents_.getClientTraceActivity(act->correlationId, - ExternalEventMap::CorrelationFlowType::Default); +inline void ActivityProfiler::handleGpuActivity( + const T* act, ActivityLogger* logger) { + const GenericTraceActivity& extDefault = + externalEvents_.correlatedActivity(act->correlationId); handleGpuActivity(GpuActivity(act, extDefault), logger); } @@ -455,6 +445,24 @@ void ActivityProfiler::handleCuptiActivity(const CUpti_Activity* record, Activit } #endif // HAS_CUPTI +void ActivityProfiler::configureChildProfilers() { + // If child profilers are enabled create profiler sessions + for (auto& profiler: profilers_) { + int64_t start_time_ms = duration_cast( + profileStartTime_.time_since_epoch()).count(); + LOG(INFO) << "Running child profiler " << profiler->name() << " for " + << config_->activitiesOnDemandDuration().count() << " ms"; + auto session = profiler->configure( + start_time_ms, + config_->activitiesOnDemandDuration().count(), + std::set{ActivityType::CPU_OP} // TODO make configurable + ); + if (session) { + sessions_.push_back(std::move(session)); + } + } +} + void ActivityProfiler::configure( const Config& config, const time_point& now) { @@ -500,13 +508,13 @@ void ActivityProfiler::configure( LOG(INFO) << "Enabling GPU tracing"; cupti_.setMaxBufferSize(config_->activitiesMaxGpuBufferSize()); - time_point timestamp; + time_point timestamp; if (VLOG_IS_ON(1)) { - timestamp = high_resolution_clock::now(); + timestamp = system_clock::now(); } cupti_.enableCuptiActivities(config_->selectedActivityTypes()); if (VLOG_IS_ON(1)) { - auto t2 = high_resolution_clock::now(); + auto t2 = system_clock::now(); addOverheadSample( setupOverhead_, duration_cast(t2 - timestamp).count()); } @@ -518,6 +526,11 @@ void ActivityProfiler::configure( if (profileStartTime_ < now) { profileStartTime_ = now + config_->activitiesWarmupDuration(); } + + if (profilers_.size() > 0) { + configureChildProfilers(); + } + LOG(INFO) << "Tracing starting in " << duration_cast(profileStartTime_ - now).count() << "s"; @@ -532,6 +545,10 @@ void ActivityProfiler::startTraceInternal(const time_point& now) { libkineto::api().client()->start(); } VLOG(0) << "Warmup -> CollectTrace"; + for (auto& session: sessions_){ + LOG(INFO) << "Starting child profiler session"; + session->start(); + } currentRunloopState_ = RunloopState::CollectTrace; } @@ -541,13 +558,13 @@ void ActivityProfiler::stopTraceInternal(const time_point& now) { } #ifdef HAS_CUPTI if (!cpuOnly_) { - time_point timestamp; + time_point timestamp; if (VLOG_IS_ON(1)) { - timestamp = high_resolution_clock::now(); + timestamp = system_clock::now(); } cupti_.disableCuptiActivities(config_->selectedActivityTypes()); if (VLOG_IS_ON(1)) { - auto t2 = high_resolution_clock::now(); + auto t2 = system_clock::now(); addOverheadSample( setupOverhead_, duration_cast(t2 - timestamp).count()); } @@ -560,6 +577,10 @@ void ActivityProfiler::stopTraceInternal(const time_point& now) { static_cast::type>( currentRunloopState_.load()); } + for (auto& session: sessions_){ + LOG(INFO) << "Stopping child profiler session"; + session->stop(); + } currentRunloopState_ = RunloopState::ProcessTrace; } @@ -593,6 +614,7 @@ const time_point ActivityProfiler::performRunLoopStep( stopTraceInternal(now); resetInternal(); VLOG(0) << "Warmup -> WaitForRequest"; + break; } #endif // HAS_CUPTI @@ -618,7 +640,7 @@ const time_point ActivityProfiler::performRunLoopStep( // FIXME: Is this a good idea for synced start? { std::lock_guard guard(mutex_); - profileEndTime_ = time_point( + profileEndTime_ = time_point( microseconds(captureWindowStartTime_)) + config_->activitiesOnDemandDuration(); } @@ -656,23 +678,6 @@ const time_point ActivityProfiler::performRunLoopStep( return new_wakeup_time; } -// Extract process name from /proc/pid/cmdline. This does not have -// the 16 character limit that /proc/pid/status and /prod/pid/comm has. -const string processName(pid_t pid) { - FILE* cmdfile = fopen(fmt::format("/proc/{}/cmdline", pid).c_str(), "r"); - if (cmdfile != nullptr) { - char* command = nullptr; - int scanned = fscanf(cmdfile, "%ms", &command); - if (scanned > 0 && command) { - string ret(basename(command)); - free(command); - return ret; - } - } - VLOG(1) << "Failed to read process name for pid " << pid; - return ""; -} - void ActivityProfiler::finalizeTrace(const Config& config, ActivityLogger& logger) { LOG(INFO) << "Recorded nets:"; { @@ -683,9 +688,9 @@ void ActivityProfiler::finalizeTrace(const Config& config, ActivityLogger& logge } // Process names - string process_name = processName(getpid()); + string process_name = processName(processId()); if (!process_name.empty()) { - pid_t pid = getpid(); + int32_t pid = processId(); logger.handleProcessInfo( {pid, process_name, "CPU"}, captureWindowStartTime_); if (!cpuOnly_) { @@ -698,6 +703,7 @@ void ActivityProfiler::finalizeTrace(const Config& config, ActivityLogger& logge } } } + // Thread info for (auto pair : threadInfo_) { const auto& thread_info = pair.second; @@ -730,6 +736,8 @@ void ActivityProfiler::resetTraceData() { clientActivityTraceMap_.clear(); disabledTraceSpans_.clear(); traceBuffers_ = nullptr; + metadata_.clear(); + sessions_.clear(); } diff --git a/libkineto/src/ActivityProfiler.h b/libkineto/src/ActivityProfiler.h index 9d9958ee6..8c06e29e2 100644 --- a/libkineto/src/ActivityProfiler.h +++ b/libkineto/src/ActivityProfiler.h @@ -21,12 +21,12 @@ #include #include -#include "ProcessInfo.h" -#include "ThreadName.h" +#include "ThreadUtil.h" #include "TraceSpan.h" #include "libkineto.h" #include "output_base.h" #include "GenericTraceActivity.h" +#include "IActivityProfiler.h" namespace KINETO_NAMESPACE { @@ -102,27 +102,48 @@ class ActivityProfiler { return *config_; } + inline void recordThreadInfo() { + int32_t sysTid = systemThreadId(); + int32_t tid = threadId(); + std::lock_guard guard(mutex_); + if (threadInfo_.find(tid) == threadInfo_.end()) { + threadInfo_.emplace( + tid, + ThreadInfo(sysTid, getThreadName())); + } + } + + void addMetadata(const std::string& key, const std::string& value) { + std::lock_guard guard(mutex_); + metadata_[key] = value; + } + + void addActivityProfiler( + std::shared_ptr profiler) { + std::lock_guard guard(mutex_); + profilers_.push_back(profiler); + } + + protected: + + using CpuGpuSpanPair = std::pair; + static const CpuGpuSpanPair& defaultTraceSpan(); + private: + class ExternalEventMap { public: - enum CorrelationFlowType { - // Default flow type - Default, - // User annotated flow type - User - }; // The correlation id of the GPU activity - const libkineto::ClientTraceActivity& getClientTraceActivity( - uint32_t correlation_id, CorrelationFlowType flowType); - void insertEvent(const libkineto::ClientTraceActivity* op); + const libkineto::GenericTraceActivity& correlatedActivity( + uint32_t correlation_id); + void insertEvent(const libkineto::GenericTraceActivity* op); - void addCorrelation(uint64_t external_id, uint32_t cuda_id, CorrelationFlowType flowType); + void addCorrelation(uint64_t external_id, uint32_t cuda_id); void clear() { events_.clear(); - defaultCorrelationMap_.clear(); - userCorrelationMap_.clear(); + correlationMap_.clear(); } private: @@ -131,7 +152,7 @@ class ActivityProfiler { // but this class also fully owns the objects it is pointing to so // it's not so bad. This is done for performance reasons and is an // implementation detail of this class that might change. - std::unordered_map + std::unordered_map events_; // Cuda correlation id -> external correlation id for default events @@ -142,23 +163,7 @@ class ActivityProfiler { std::unordered_map< uint32_t, // Cuda correlation ID uint64_t> // External correlation ID - defaultCorrelationMap_; - - // Cuda correlation id -> external correlation id for user annotated - // events - // CUPTI provides a mechanism for correlating Cuda events to arbitrary - // external events, e.g.operator events from Caffe2. - // It also marks GPU activities with the Cuda event correlation ID. - // So by connecting the two, we get the complete picture. - std::unordered_map< - uint32_t, // Cuda correlation ID - uint64_t> // External correlation ID - userCorrelationMap_; - - std::unordered_map& - getCorrelationMap(CorrelationFlowType flowType) { - return flowType == User ? userCorrelationMap_ : defaultCorrelationMap_; - } + correlationMap_; }; // Map of gpu activities to user defined events @@ -173,16 +178,17 @@ class ActivityProfiler { void logEvents(ActivityLogger *logger); void clear() { - streamSpanMap.clear(); + streamSpanMap_.clear(); } private: // device id and stream name - typedef std::pair StreamKey; + using StreamKey = std::pair; // map of correlation id to TraceSpan - typedef std::unordered_map CorrelationSpanMap; - std::map streamSpanMap; + using CorrelationSpanMap = + std::unordered_map; + std::map streamSpanMap_; }; GpuUserEventMap gpuUserEventMap_; @@ -205,6 +211,8 @@ class ActivityProfiler { void finalizeTrace(const Config& config, ActivityLogger& logger); + void configureChildProfilers(); + // Process a single CPU trace void processCpuTrace( libkineto::CpuTraceBuffer& cpuTrace, @@ -219,13 +227,11 @@ class ActivityProfiler { // Record client trace span for subsequent lookups from activities // Also creates a corresponding GPU-side span. - using CpuGpuSpanPair = std::pair; CpuGpuSpanPair& recordTraceSpan(TraceSpan& span, int gpuOpCount); // Returns true if net name is to be tracked for a specified number of // iterations. - bool iterationTargetMatch( - const libkineto::CpuTraceBuffer& trace); + bool iterationTargetMatch(libkineto::CpuTraceBuffer& trace); // net name to id int netId(const std::string& netName); @@ -249,21 +255,13 @@ class ActivityProfiler { // Is logging disabled for this event? // Logging can be disabled due to operator count, net name filter etc. - inline bool loggingDisabled(const libkineto::TraceActivity& act) { + inline bool loggingDisabled(const libkineto::TraceActivity& act) const { const auto& it = clientActivityTraceMap_.find(act.correlationId()); return it != clientActivityTraceMap_.end() && disabledTraceSpans_.find(it->second->first.name) != disabledTraceSpans_.end(); } - inline void recordThreadInfo(pid_t tid, pthread_t pthreadId) { - if (threadInfo_.find((int32_t)pthreadId) == threadInfo_.end()) { - threadInfo_.emplace( - (int32_t)pthreadId, - ThreadInfo((int32_t) tid, getThreadName(tid))); - } - } - void resetTraceData(); void addOverheadSample(profilerOverhead& counter, int64_t overhead) { @@ -307,7 +305,8 @@ class ActivityProfiler { // Maintain a map of client trace activity to trace span. // Maps correlation id -> TraceSpan* held by traceSpans_. - std::unordered_map clientActivityTraceMap_; + using ActivityTraceMap = std::unordered_map; + ActivityTraceMap clientActivityTraceMap_; // Cache thread names and system thread ids for pthread ids // Note we're using the lower 32 bits of the (opaque) pthread id @@ -365,6 +364,14 @@ class ActivityProfiler { // Buffers where trace data is stored std::unique_ptr traceBuffers_; + // Trace metadata + std::unordered_map metadata_; + + // child activity profilers + std::vector> profilers_; + + // a vector of active profiler plugin sessions + std::vector> sessions_; }; } // namespace KINETO_NAMESPACE diff --git a/libkineto/src/ActivityProfilerController.cpp b/libkineto/src/ActivityProfilerController.cpp index e85421442..64b049b74 100644 --- a/libkineto/src/ActivityProfilerController.cpp +++ b/libkineto/src/ActivityProfilerController.cpp @@ -10,9 +10,10 @@ #include #include +#include "ActivityLoggerFactory.h" #include "ActivityTrace.h" #include "CuptiActivityInterface.h" -#include "ThreadName.h" +#include "ThreadUtil.h" #include "output_json.h" #include "output_membuf.h" @@ -22,8 +23,7 @@ using namespace std::chrono; namespace KINETO_NAMESPACE { -constexpr milliseconds kDefaultInactiveProfilerIntervalMsecs(1000); -constexpr milliseconds kDefaultActiveProfilerIntervalMsecs(200); +constexpr milliseconds kProfilerIntervalMsecs(1000); ActivityProfilerController::ActivityProfilerController(bool cpuOnly) { profiler_ = std::make_unique(CuptiActivityInterface::singleton(), cpuOnly); @@ -40,31 +40,29 @@ ActivityProfilerController::~ActivityProfilerController() { VLOG(0) << "Stopped activity profiler"; } +static ActivityLoggerFactory initLoggerFactory() { + ActivityLoggerFactory factory; + factory.addProtocol("file", [](const std::string& url) { + return std::unique_ptr(new ChromeTraceLogger(url)); + }); + return factory; +} + static ActivityLoggerFactory& loggerFactory() { - static ActivityLoggerFactory factory{nullptr}; + static ActivityLoggerFactory factory = initLoggerFactory(); return factory; } -void ActivityProfilerController::setLoggerFactory( - const ActivityLoggerFactory& factory) { - loggerFactory() = factory; +void ActivityProfilerController::addLoggerFactory( + const std::string& protocol, ActivityLoggerFactory::FactoryFunc factory) { + loggerFactory().addProtocol(protocol, factory); } static std::unique_ptr makeLogger(const Config& config) { if (config.activitiesLogToMemory()) { return std::make_unique(config); } - if (loggerFactory()) { - return loggerFactory()(config); - } - return std::make_unique( - config.activitiesLogFile(), - CuptiActivityInterface::singleton().smCount()); -} - -static milliseconds profilerInterval(bool profilerActive) { - return profilerActive ? kDefaultActiveProfilerIntervalMsecs - : kDefaultInactiveProfilerIntervalMsecs; + return loggerFactory().makeLogger(config.activitiesLogUrl()); } void ActivityProfilerController::profilerLoop() { @@ -72,7 +70,7 @@ void ActivityProfilerController::profilerLoop() { VLOG(0) << "Entering activity profiler loop"; auto now = system_clock::now(); - auto next_wakeup_time = now + profilerInterval(false); + auto next_wakeup_time = now + kProfilerIntervalMsecs; while (!stopRunloop_) { now = system_clock::now(); @@ -94,7 +92,7 @@ void ActivityProfilerController::profilerLoop() { } while (next_wakeup_time < now) { - next_wakeup_time += kDefaultActiveProfilerIntervalMsecs; + next_wakeup_time += kProfilerIntervalMsecs; } if (profiler_->isActive()) { @@ -144,7 +142,12 @@ std::unique_ptr ActivityProfilerController::stopTrace() auto logger = std::make_unique(profiler_->config()); profiler_->processTrace(*logger); profiler_->reset(); - return std::make_unique(std::move(logger), CuptiActivityInterface::singleton()); + return std::make_unique(std::move(logger), loggerFactory()); +} + +void ActivityProfilerController::addMetadata( + const std::string& key, const std::string& value) { + profiler_->addMetadata(key, value); } } // namespace KINETO_NAMESPACE diff --git a/libkineto/src/ActivityProfilerController.h b/libkineto/src/ActivityProfilerController.h index 33540109f..56eab1109 100644 --- a/libkineto/src/ActivityProfilerController.h +++ b/libkineto/src/ActivityProfilerController.h @@ -11,6 +11,7 @@ #include #include +#include "ActivityLoggerFactory.h" #include "ActivityProfiler.h" #include "ActivityProfilerInterface.h" #include "ActivityTraceInterface.h" @@ -20,9 +21,6 @@ namespace KINETO_NAMESPACE { class Config; -using ActivityLoggerFactory = - std::function(const Config&)>; - class ActivityProfilerController { public: explicit ActivityProfilerController(bool cpuOnly); @@ -32,7 +30,9 @@ class ActivityProfilerController { ~ActivityProfilerController(); - static void setLoggerFactory(const ActivityLoggerFactory& factory); + static void addLoggerFactory( + const std::string& protocol, + ActivityLoggerFactory::FactoryFunc factory); void scheduleTrace(const Config& config); @@ -57,6 +57,12 @@ class ActivityProfilerController { return profiler_->transferCpuTrace(std::move(cpuTrace)); } + void recordThreadInfo() { + profiler_->recordThreadInfo(); + } + + void addMetadata(const std::string& key, const std::string& value); + private: void profilerLoop(); diff --git a/libkineto/src/ActivityProfilerProxy.cpp b/libkineto/src/ActivityProfilerProxy.cpp index 3eddbe41c..bb8cbb06e 100644 --- a/libkineto/src/ActivityProfilerProxy.cpp +++ b/libkineto/src/ActivityProfilerProxy.cpp @@ -84,4 +84,13 @@ bool ActivityProfilerProxy::enableForRegion(const std::string& match) { return controller_->traceInclusionFilter(match); } +void ActivityProfilerProxy::addMetadata( + const std::string& key, const std::string& value) { + controller_->addMetadata(key, value); +} + +void ActivityProfilerProxy::recordThreadInfo() { + controller_->recordThreadInfo(); +} + } // namespace libkineto diff --git a/libkineto/src/ActivityProfilerProxy.h b/libkineto/src/ActivityProfilerProxy.h index 1eebfd63c..639c6d8d4 100644 --- a/libkineto/src/ActivityProfilerProxy.h +++ b/libkineto/src/ActivityProfilerProxy.h @@ -40,6 +40,8 @@ class ActivityProfilerProxy : public ActivityProfilerInterface { bool isActive() override; + void recordThreadInfo() override; + void scheduleTrace(const std::string& configStr) override; void scheduleTrace(const Config& config); @@ -58,6 +60,8 @@ class ActivityProfilerProxy : public ActivityProfilerInterface { bool enableForRegion(const std::string& match) override; + void addMetadata(const std::string& key, const std::string& value) override; + private: bool cpuOnly_{true}; ActivityProfilerController* controller_{nullptr}; diff --git a/libkineto/src/ActivityTrace.h b/libkineto/src/ActivityTrace.h index a10a25cfc..cd730c2e8 100644 --- a/libkineto/src/ActivityTrace.h +++ b/libkineto/src/ActivityTrace.h @@ -10,8 +10,8 @@ #include #include +#include "ActivityLoggerFactory.h" #include "ActivityTraceInterface.h" -#include "CuptiActivityInterface.h" #include "output_json.h" #include "output_membuf.h" @@ -20,22 +20,31 @@ namespace libkineto { class ActivityTrace : public ActivityTraceInterface { public: ActivityTrace( - std::unique_ptr logger, - CuptiActivityInterface& cuptiActivities) - : logger_(std::move(logger)), cuptiActivities_(cuptiActivities) {} + std::unique_ptr tmpLogger, + const ActivityLoggerFactory& factory) + : memLogger_(std::move(tmpLogger)), + loggerFactory_(factory) { + } const std::vector>* activities() override { - return logger_->traceActivities(); + return memLogger_->traceActivities(); }; - void save(const std::string& path) override { - ChromeTraceLogger chrome_logger(path, cuptiActivities_.smCount()); - return logger_->log(chrome_logger); + void save(const std::string& url) override { + std::string prefix; + // if no protocol is specified, default to file + if (url.find("://") == url.npos) { + prefix = "file://"; + } + memLogger_->log(*loggerFactory_.makeLogger(prefix + url)); }; private: - std::unique_ptr logger_; - CuptiActivityInterface& cuptiActivities_; + // Activities are logged into a buffer + std::unique_ptr memLogger_; + + // Alternative logger used by save() if protocol prefix is specified + const ActivityLoggerFactory& loggerFactory_; }; } // namespace libkineto diff --git a/libkineto/src/ActivityType.cpp b/libkineto/src/ActivityType.cpp new file mode 100644 index 000000000..403aa2d1b --- /dev/null +++ b/libkineto/src/ActivityType.cpp @@ -0,0 +1,60 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * All rights reserved. + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "ActivityType.h" + +#include + +namespace libkineto { + +struct ActivityTypeName { + const char* name; + ActivityType type; +}; + +static constexpr std::array map{{ + {"cpu_op", ActivityType::CPU_OP}, + {"user_annotation", ActivityType::USER_ANNOTATION}, + {"gpu_user_Annotation", ActivityType::GPU_USER_ANNOTATION}, + {"gpu_memcpy", ActivityType::GPU_MEMCPY}, + {"gpu_memset", ActivityType::GPU_MEMSET}, + {"kernel", ActivityType::CONCURRENT_KERNEL}, + {"external_correlation", ActivityType::EXTERNAL_CORRELATION}, + {"cuda_runtime", ActivityType::CUDA_RUNTIME}, + {"glow_runtime", ActivityType::GLOW_RUNTIME}, + {"cpu_instant_event", ActivityType::CPU_INSTANT_EVENT}, + {"ENUM_COUNT", ActivityType::ENUM_COUNT} +}}; + +static constexpr bool matchingOrder(int idx = 0) { + return map[idx].type == ActivityType::ENUM_COUNT || + ((idx == (int) map[idx].type) && matchingOrder(idx + 1)); +} +static_assert(matchingOrder(), "ActivityTypeName map is out of order"); + +const char* toString(ActivityType t) { + return map[(int)t].name; +} + +ActivityType toActivityType(const std::string& str) { + for (int i = 0; i < activityTypeCount; i++) { + if (str == map[i].name) { + return map[i].type; + } + } + throw std::invalid_argument(fmt::format("Invalid activity type: {}", str)); +} + +const std::array activityTypes() { + std::array res; + for (int i = 0; i < activityTypeCount; i++) { + res[i] = map[i].type; + } + return res; +} + +} // namespace libkineto diff --git a/libkineto/src/Config.cpp b/libkineto/src/Config.cpp index 4fb60e71b..154ed5ac4 100644 --- a/libkineto/src/Config.cpp +++ b/libkineto/src/Config.cpp @@ -8,8 +8,8 @@ #include "Config.h" #include -#include +#include #include #include #include @@ -17,8 +17,10 @@ #include #include #include +#include #include "Logger.h" +#include "ThreadUtil.h" using namespace std::chrono; @@ -34,7 +36,7 @@ constexpr int kDefaultActivitiesExternalAPIIterations(3); constexpr int kDefaultActivitiesExternalAPINetSizeThreshold(0); constexpr int kDefaultActivitiesExternalAPIGpuOpCountThreshold(0); constexpr int kDefaultActivitiesMaxGpuBufferSize(128 * 1024 * 1024); -constexpr seconds kDefaultActivitiesWarmupDurationSecs(15); +constexpr seconds kDefaultActivitiesWarmupDurationSecs(5); constexpr seconds kDefaultReportPeriodSecs(1); constexpr int kDefaultSamplesPerReport(1); constexpr int kDefaultMaxEventProfilersPerGpu(1); @@ -59,6 +61,7 @@ const string kHeartbeatMonitorPeriodKey = const string kActivitiesEnabledKey = "ACTIVITIES_ENABLED"; const string kActivityTypesKey = "ACTIVITY_TYPES"; const string kActivitiesLogFileKey = "ACTIVITIES_LOG_FILE"; +const string kActivitiesLogUrlKey = "ACTIVITIES_LOG_URL"; const string kActivitiesDurationKey = "ACTIVITIES_DURATION_SECS"; const string kActivitiesDurationMsecsKey = "ACTIVITIES_DURATION_MSECS"; const string kActivitiesIterationsKey = "ACTIVITIES_ITERATIONS"; @@ -70,13 +73,6 @@ const string kActivitiesWarmupDurationSecsKey = "ACTIVITIES_WARMUP_PERIOD_SECS"; const string kActivitiesMaxGpuBufferSizeKey = "ACTIVITIES_MAX_GPU_BUFFER_SIZE_MB"; -// Valid configuration file entries for activity types -const string kActivityMemcpy = "gpu_memcpy"; -const string kActivityMemset = "gpu_memset"; -const string kActivityConcurrentKernel = "concurrent_kernel"; -const string kActivityExternalCorrelation = "external_correlation"; -const string kActivityRuntime = "cuda_runtime"; - const string kDefaultLogFileFmt = "/tmp/libkineto_activities_{}.json"; // Common @@ -115,21 +111,21 @@ const string kConfigFile = "/etc/libkineto.conf"; // Max devices supported on any system constexpr uint8_t kMaxDevices = 8; -static std::map>& +static std::map>& configFactories() { - static std::map> + static std::map> factories; return factories; } void Config::addConfigFactory( std::string name, - std::function factory) { + std::function factory) { configFactories()[name] = factory; } static string defaultTraceFileName() { - return fmt::format(kDefaultLogFileFmt, getpid()); + return fmt::format(kDefaultLogFileFmt, processId()); } Config::Config() @@ -172,55 +168,38 @@ const seconds Config::maxRequestAge() const { return kMaxRequestAge; } -static char* printTime(time_point t, char* buf, int size) { +std::string getTimeStr(time_point t) { std::time_t t_c = system_clock::to_time_t(t); - std::tm lt; - localtime_r(&t_c, <); - std::strftime(buf, size, "%H:%M:%S", <); - return buf; + return fmt::format("{:%H:%M:%S}", fmt::localtime(t_c)); } static time_point handleRequestTimestamp(int64_t ms) { auto t = time_point(milliseconds(ms)); auto now = system_clock::now(); - char buf[32]; if (t > now) { throw std::invalid_argument(fmt::format( "Invalid {}: {} - time is in future", kRequestTimestampKey, - printTime(t, buf, sizeof(buf)))); + getTimeStr(t))); } else if ((now - t) > kMaxRequestAge) { throw std::invalid_argument(fmt::format( "Invalid {}: {} - time is more than {}s in the past", kRequestTimestampKey, - printTime(t, buf, sizeof(buf)), + getTimeStr(t), kMaxRequestAge.count())); } return t; } -void Config::addActivityTypes( +void Config::setActivityTypes( const std::vector& selected_activities) { + selectedActivityTypes_.clear(); if (selected_activities.size() > 0) { for (const auto& activity : selected_activities) { if (activity == "") { continue; - } else if (activity == kActivityMemcpy) { - selectedActivityTypes_.insert(ActivityType::GPU_MEMCPY); - } else if (activity == kActivityMemset) { - selectedActivityTypes_.insert(ActivityType::GPU_MEMSET); - } else if (activity == kActivityConcurrentKernel) { - selectedActivityTypes_.insert(ActivityType::CONCURRENT_KERNEL); - } else if (activity == kActivityExternalCorrelation) { - selectedActivityTypes_.insert(ActivityType::EXTERNAL_CORRELATION); - } else if (activity == kActivityRuntime) { - selectedActivityTypes_.insert(ActivityType::CUDA_RUNTIME); - } else { - throw std::invalid_argument(fmt::format( - "Invalid activity type selected: {}", - activity - )); } + selectedActivityTypes_.insert(toActivityType(activity)); } } } @@ -233,9 +212,6 @@ bool Config::handleOption(const std::string& name, std::string& val) { } else if (name == kMetricsKey) { vector metric_names = splitAndTrim(val, ','); metricNames_.insert(metric_names.begin(), metric_names.end()); - } else if (name == kActivityTypesKey) { - vector activity_types = splitAndTrim(toLower(val), ','); - addActivityTypes(activity_types); } else if (name == kSamplePeriodKey) { samplePeriod_ = milliseconds(toInt32(val)); } else if (name == kMultiplexPeriodKey) { @@ -262,6 +238,9 @@ bool Config::handleOption(const std::string& name, std::string& val) { activitiesOnDemandDuration_ = duration_cast(seconds(toInt32(val))); activitiesOnDemandTimestamp_ = timestamp(); + } else if (name == kActivityTypesKey) { + vector activity_types = splitAndTrim(toLower(val), ','); + setActivityTypes(activity_types); } else if (name == kActivitiesDurationMsecsKey) { activitiesOnDemandDuration_ = milliseconds(toInt32(val)); activitiesOnDemandTimestamp_ = timestamp(); @@ -284,6 +263,7 @@ bool Config::handleOption(const std::string& name, std::string& val) { activityProfilerEnabled_ = toBool(val); } else if (name == kActivitiesLogFileKey) { activitiesLogFile_ = val; + activitiesLogUrl_ = fmt::format("file://{}", val); activitiesOnDemandTimestamp_ = timestamp(); } else if (name == kActivitiesMaxGpuBufferSizeKey) { activitiesMaxGpuBufferSize_ = toInt32(val) * 1024 * 1024; @@ -309,7 +289,7 @@ std::chrono::milliseconds Config::activitiesOnDemandDurationDefault() const { }; void Config::updateActivityProfilerRequestReceivedTime() { - activitiesOnDemandTimestamp_ = high_resolution_clock::now(); + activitiesOnDemandTimestamp_ = system_clock::now(); } void Config::setClientDefaults() { @@ -386,9 +366,8 @@ void Config::printActivityProfilerConfig(std::ostream& s) const { << std::endl; if (hasRequestTimestamp()) { std::time_t t_c = system_clock::to_time_t(requestTimestamp()); - std::tm tm; s << "Trace request client timestamp: " - << std::put_time(localtime_r(&t_c, &tm), "%F %T") << std::endl; + << fmt::format("{:%Y-%m-%d %H:%M:%S}", fmt::localtime(t_c)) << std::endl; } s << "Trace duration: " << activitiesOnDemandDuration().count() << "ms" << std::endl; @@ -401,30 +380,12 @@ void Config::printActivityProfilerConfig(std::ostream& s) const { s << "Max GPU buffer size: " << activitiesMaxGpuBufferSize() / 1024 / 1024 << "MB" << std::endl; - s << "Enabled activities: "; + std::vector activities; for (const auto& activity : selectedActivityTypes_) { - switch(activity){ - case ActivityType::GPU_MEMCPY: - s << kActivityMemcpy << " "; - break; - case ActivityType::GPU_MEMSET: - s << kActivityMemset << " "; - break; - case ActivityType::CONCURRENT_KERNEL: - s << kActivityConcurrentKernel << " "; - break; - case ActivityType::EXTERNAL_CORRELATION: - s << kActivityExternalCorrelation << " "; - break; - case ActivityType::CUDA_RUNTIME: - s << kActivityRuntime << " "; - break; - default: - s << "UNKNOWN_ACTIVITY_NAME" << " "; - break; - } + activities.push_back(toString(activity)); } - s << std::endl; + s << "Enabled activities: " + << fmt::format("{}", fmt::join(activities, ",")) << std::endl; AbstractConfig::printActivityProfilerConfig(s); } diff --git a/libkineto/src/Config.h b/libkineto/src/Config.h index d2e1152d4..e8c94631f 100644 --- a/libkineto/src/Config.h +++ b/libkineto/src/Config.h @@ -53,6 +53,15 @@ class Config : public AbstractConfig { return activitiesLogFile_; } + // Log activitiy trace to this url + const std::string& activitiesLogUrl() const { + return activitiesLogUrl_; + } + + void setActivitiesLogUrl(const std::string& url) { + activitiesLogUrl_ = url; + } + bool activitiesLogToMemory() const { return activitiesLogToMemory_; } @@ -249,17 +258,17 @@ class Config : public AbstractConfig { return duration - (duration % alignment); } - std::chrono::time_point + std::chrono::time_point eventProfilerOnDemandStartTime() const { return eventProfilerOnDemandTimestamp_; } - std::chrono::time_point + std::chrono::time_point eventProfilerOnDemandEndTime() const { return eventProfilerOnDemandTimestamp_ + eventProfilerOnDemandDuration_; } - std::chrono::time_point + std::chrono::time_point activityProfilerRequestReceivedTime() const { return activitiesOnDemandTimestamp_; } @@ -272,7 +281,7 @@ class Config : public AbstractConfig { static void addConfigFactory( std::string name, - std::function factory); + std::function factory); void print(std::ostream& s) const; @@ -289,16 +298,14 @@ class Config : public AbstractConfig { // Adds valid activity types from the user defined string list in the // configuration file - void addActivityTypes(const std::vector& selected_activities); + void setActivityTypes(const std::vector& selected_activities); // Sets the default activity types to be traced void selectDefaultActivityTypes() { // If the user has not specified an activity list, add all types - selectedActivityTypes_.insert(ActivityType::GPU_MEMCPY); - selectedActivityTypes_.insert(ActivityType::GPU_MEMSET); - selectedActivityTypes_.insert(ActivityType::CONCURRENT_KERNEL); - selectedActivityTypes_.insert(ActivityType::EXTERNAL_CORRELATION); - selectedActivityTypes_.insert(ActivityType::CUDA_RUNTIME); + for (ActivityType t : activityTypes()) { + selectedActivityTypes_.insert(t); + } } int verboseLogLevel_; @@ -315,7 +322,7 @@ class Config : public AbstractConfig { // On-demand duration std::chrono::seconds eventProfilerOnDemandDuration_; // Last on-demand request - std::chrono::time_point + std::chrono::time_point eventProfilerOnDemandTimestamp_; int eventProfilerMaxInstancesPerGpu_; @@ -337,6 +344,8 @@ class Config : public AbstractConfig { // The activity profiler settings are all on-demand std::string activitiesLogFile_; + std::string activitiesLogUrl_; + // Log activities to memory buffer bool activitiesLogToMemory_{false}; @@ -355,7 +364,7 @@ class Config : public AbstractConfig { // Only profile nets with at least this many GPU operators int activitiesExternalAPIGpuOpCountThreshold_; // Last activity profiler request - std::chrono::time_point + std::chrono::time_point activitiesOnDemandTimestamp_; // Synchronized start timestamp diff --git a/libkineto/src/ConfigLoader.cpp b/libkineto/src/ConfigLoader.cpp index 79be0725b..e2726eeb2 100644 --- a/libkineto/src/ConfigLoader.cpp +++ b/libkineto/src/ConfigLoader.cpp @@ -7,7 +7,10 @@ #include "ConfigLoader.h" +#ifdef __linux__ #include +#endif + #include #include #include @@ -26,25 +29,37 @@ namespace KINETO_NAMESPACE { using namespace libkineto; const string kConfigFileEnvVar = "KINETO_CONFIG"; +#ifdef __linux__ const string kConfigFile = "/etc/libkineto.conf"; const string kOnDemandConfigFile = "/tmp/libkineto.conf"; +#else +const string kConfigFile = "libkineto.conf"; +const string kOnDemandConfigFile = "libkineto.conf"; +#endif constexpr std::chrono::seconds kConfigUpdateIntervalSecs(300); constexpr std::chrono::seconds kOnDemandConfigUpdateIntervalSecs(5); constexpr std::chrono::seconds kOnDemandConfigVerboseLogDurationSecs(120); +#ifdef __linux__ static struct sigaction originalUsr2Handler = {}; +#endif // Use SIGUSR2 to initiate profiling. // Look for an on-demand config file. // If none is found, default to base config. // Try to not affect existing handlers static bool hasOriginalSignalHandler() { +#ifdef __linux__ return originalUsr2Handler.sa_handler != nullptr || originalUsr2Handler.sa_sigaction != nullptr; +#else + return false; +#endif } static void handle_signal(int signal) { +#ifdef __linux__ if (signal == SIGUSR2) { ConfigLoader::instance().handleOnDemandSignal(); if (hasOriginalSignalHandler()) { @@ -55,9 +70,11 @@ static void handle_signal(int signal) { sigaction(SIGUSR2, &act, &originalUsr2Handler); } } +#endif } static void setupSignalHandler(bool enableSigUsr2) { +#ifdef __linux__ if (enableSigUsr2) { struct sigaction act = {}; act.sa_handler = &handle_signal; @@ -72,6 +89,7 @@ static void setupSignalHandler(bool enableSigUsr2) { sigaction(SIGUSR2, &originalUsr2Handler, nullptr); originalUsr2Handler = {}; } +#endif } // return an empty string if reading gets any errors. Otherwise a config string. @@ -83,8 +101,8 @@ static std::string readConfigFromConfigFile(const char* filename) { conf.assign( std::istreambuf_iterator(file), std::istreambuf_iterator()); } catch (std::exception& e) { - LOG(ERROR) << "Error in reading libkineto config from config file: " - << e.what(); + VLOG(0) << "Error reading " << filename << ": " + << e.what(); conf = ""; } return conf; @@ -108,7 +126,7 @@ ConfigLoader& ConfigLoader::instance() { // return an empty string if polling gets any errors. Otherwise a config string. std::string ConfigLoader::readOnDemandConfigFromDaemon( - time_point now) { + time_point now) { if (!daemonConfigLoader_) { return ""; } @@ -137,11 +155,12 @@ ConfigLoader::ConfigLoader(LibkinetoApi& api) if (configFileName_ == nullptr) { configFileName_ = kConfigFile.data(); } - config_.parse(readConfigFromConfigFile(configFileName_)); - SET_LOG_VERBOSITY_LEVEL(config_.verboseLogLevel(), config_.verboseLogModules()); - setupSignalHandler(config_.sigUsr2Enabled()); - if (daemonConfigLoaderFactory && daemonConfigLoaderFactory()) { + if (daemonConfigLoaderFactory()) { daemonConfigLoader_ = daemonConfigLoaderFactory()(); + } + updateBaseConfig(); + SET_LOG_VERBOSITY_LEVEL(config_.verboseLogLevel(), config_.verboseLogModules()); + if (daemonConfigLoader_) { daemonConfigLoader_->setCommunicationFabric(config_.ipcFabricEnabled()); } updateThread_ = @@ -168,7 +187,15 @@ void ConfigLoader::handleOnDemandSignal() { } void ConfigLoader::updateBaseConfig() { - const std::string config_str = readConfigFromConfigFile(configFileName_); + // First try reading local config file + // If that fails, read from daemon + // TODO: Invert these once daemon path fully rolled out + std::string config_str = readConfigFromConfigFile(configFileName_); + if (config_str.empty() && daemonConfigLoader_) { + // If local config file was not successfully loaded (e.g. not found) + // then try the daemon + config_str = daemonConfigLoader_->readBaseConfig(); + } if (config_str != config_.source()) { std::lock_guard lock(configLock_); config_.~Config(); @@ -177,12 +204,12 @@ void ConfigLoader::updateBaseConfig() { if (daemonConfigLoader_) { daemonConfigLoader_->setCommunicationFabric(config_.ipcFabricEnabled()); } + setupSignalHandler(config_.sigUsr2Enabled()); } - setupSignalHandler(config_.sigUsr2Enabled()); } void ConfigLoader::configureFromSignal( - time_point now, + time_point now, Config& config) { LOG(INFO) << "Received on-demand profiling signal, " << "reading config from " << kOnDemandConfigFile.data(); @@ -215,7 +242,7 @@ void ConfigLoader::configureFromSignal( } void ConfigLoader::configureFromDaemon( - time_point now, + time_point now, Config& config) { const std::string config_str = readOnDemandConfigFromDaemon(now); LOG_IF(INFO, !config_str.empty()) << "Received config from dyno:\n" @@ -241,14 +268,19 @@ void ConfigLoader::configureFromDaemon( } void ConfigLoader::updateConfigThread() { - auto now = high_resolution_clock::now(); + auto now = system_clock::now(); auto next_config_load_time = now + configUpdateIntervalSecs_; auto next_on_demand_load_time = now + onDemandConfigUpdateIntervalSecs_; auto next_log_level_reset_time = now; - seconds interval = - std::min(configUpdateIntervalSecs_, onDemandConfigUpdateIntervalSecs_); + seconds interval = configUpdateIntervalSecs_; + if (interval > onDemandConfigUpdateIntervalSecs_) { + interval = onDemandConfigUpdateIntervalSecs_; + } auto onDemandConfig = std::make_unique(); + // Refresh config before starting loop + updateBaseConfig(); + // This can potentially sleep for long periods of time, so allow // the desctructor to wake it to avoid a 5-minute long destruct period. for (;;) { @@ -259,7 +291,7 @@ void ConfigLoader::updateConfigThread() { if (stopFlag_) { break; } - now = high_resolution_clock::now(); + now = system_clock::now(); if (now > next_config_load_time) { updateBaseConfig(); next_config_load_time = now + configUpdateIntervalSecs_; diff --git a/libkineto/src/ConfigLoader.h b/libkineto/src/ConfigLoader.h index 6b1fdf072..eb953a6be 100644 --- a/libkineto/src/ConfigLoader.h +++ b/libkineto/src/ConfigLoader.h @@ -57,12 +57,12 @@ class ConfigLoader { // Create configuration when receiving SIGUSR2 void configureFromSignal( - std::chrono::time_point now, + std::chrono::time_point now, Config& config); // Create configuration when receiving request from a daemon void configureFromDaemon( - std::chrono::time_point now, + std::chrono::time_point now, Config& config); inline bool eventProfilerRequest(const Config& config) { @@ -72,7 +72,7 @@ class ConfigLoader { } std::string readOnDemandConfigFromDaemon( - std::chrono::time_point now); + std::chrono::time_point now); LibkinetoApi& libkinetoApi_; std::mutex configLock_; diff --git a/libkineto/src/CudaDeviceProperties.cpp b/libkineto/src/CudaDeviceProperties.cpp new file mode 100644 index 000000000..19591da93 --- /dev/null +++ b/libkineto/src/CudaDeviceProperties.cpp @@ -0,0 +1,129 @@ +/* + * Copyright (c) Kineto Contributors + * All rights reserved. + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "CudaDeviceProperties.h" + +#include +#include + +#include +#include + +#include "Logger.h" + +namespace KINETO_NAMESPACE { + +static const std::vector createDeviceProps() { + std::vector props; + int device_count; + cudaError_t error_id = cudaGetDeviceCount(&device_count); + // Return empty vector if error. + if (error_id != cudaSuccess) { + LOG(ERROR) << "cudaGetDeviceCount failed with code " << error_id; + return {}; + } + VLOG(0) << "Device count is " << device_count; + for (size_t i = 0; i < device_count; ++i) { + cudaDeviceProp prop; + error_id = cudaGetDeviceProperties(&prop, i); + // Return empty vector if any device property fail to get. + if (error_id != cudaSuccess) { + LOG(ERROR) << "cudaGetDeviceProperties failed with " << error_id; + return {}; + } + props.push_back(prop); + } + return props; +} + +static const std::vector& deviceProps() { + static const std::vector props = createDeviceProps(); + return props; +} + +static const std::string createDevicePropertiesJson( + size_t id, const cudaDeviceProp& props) { + return fmt::format(R"JSON( + {{ + "id": {}, "name": "{}", "totalGlobalMem": {}, + "computeMajor": {}, "computeMinor": {}, + "maxThreadsPerBlock": {}, "maxThreadsPerMultiprocessor": {}, + "regsPerBlock": {}, "regsPerMultiprocessor": {}, "warpSize": {}, + "sharedMemPerBlock": {}, "sharedMemPerMultiprocessor": {}, + "numSms": {}, "sharedMemPerBlockOptin": {} + }})JSON", + id, props.name, props.totalGlobalMem, + props.major, props.minor, + props.maxThreadsPerBlock, props.maxThreadsPerMultiProcessor, + props.regsPerBlock, props.regsPerMultiprocessor, props.warpSize, + props.sharedMemPerBlock, props.sharedMemPerMultiprocessor, + props.multiProcessorCount, props.sharedMemPerBlockOptin); +} + +static const std::string createDevicePropertiesJson() { + std::vector jsonProps; + const auto& props = deviceProps(); + for (size_t i = 0; i < props.size(); i++) { + jsonProps.push_back(createDevicePropertiesJson(i, props[i])); + } + return fmt::format("{}", fmt::join(jsonProps, ",")); +} + +const std::string& devicePropertiesJson() { + static std::string devicePropsJson = createDevicePropertiesJson(); + return devicePropsJson; +} + +int smCount(uint32_t deviceId) { + const std::vector &props = deviceProps(); + return deviceId >= props.size() ? 0 : + props[deviceId].multiProcessorCount; +} + +float kernelOccupancy( + uint32_t deviceId, + uint16_t registersPerThread, + int32_t staticSharedMemory, + int32_t dynamicSharedMemory, + int32_t blockX, + int32_t blockY, + int32_t blockZ, + float blocksPerSm) { + // Calculate occupancy + float occupancy = -1.0; + const std::vector &props = deviceProps(); + if (deviceId < props.size()) { + cudaOccFuncAttributes occFuncAttr; + occFuncAttr.maxThreadsPerBlock = INT_MAX; + occFuncAttr.numRegs = registersPerThread; + occFuncAttr.sharedSizeBytes = staticSharedMemory; + occFuncAttr.partitionedGCConfig = PARTITIONED_GC_OFF; + occFuncAttr.shmemLimitConfig = FUNC_SHMEM_LIMIT_DEFAULT; + occFuncAttr.maxDynamicSharedSizeBytes = 0; + const cudaOccDeviceState occDeviceState = {}; + int blockSize = blockX * blockY * blockZ; + size_t dynamicSmemSize = dynamicSharedMemory; + cudaOccResult occ_result; + cudaOccDeviceProp prop(props[deviceId]); + cudaOccError status = cudaOccMaxActiveBlocksPerMultiprocessor( + &occ_result, &prop, &occFuncAttr, &occDeviceState, + blockSize, dynamicSmemSize); + if (status == CUDA_OCC_SUCCESS) { + if (occ_result.activeBlocksPerMultiprocessor < blocksPerSm) { + blocksPerSm = occ_result.activeBlocksPerMultiprocessor; + } + occupancy = blocksPerSm * blockSize / + (float) props[deviceId].maxThreadsPerMultiProcessor; + } else { + LOG_EVERY_N(ERROR, 1000) << "Failed to calculate occupancy, status = " + << status; + } + } + return occupancy; +} + +} // namespace KINETO_NAMESPACE diff --git a/libkineto/src/CudaDeviceProperties.h b/libkineto/src/CudaDeviceProperties.h new file mode 100644 index 000000000..b731fde0c --- /dev/null +++ b/libkineto/src/CudaDeviceProperties.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) Kineto Contributors + * All rights reserved. + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include +#include + +namespace KINETO_NAMESPACE { + +int smCount(uint32_t deviceId); + +// Return estimated achieved occupancy for a kernel +float kernelOccupancy( + uint32_t deviceId, + uint16_t registersPerThread, + int32_t staticSharedMemory, + int32_t dynamicSharedMemory, + int32_t blockX, + int32_t blockY, + int32_t blockZ, + float blocks_per_sm); + +// Return compute properties for each device as a json string +const std::string& devicePropertiesJson(); + +} // namespace KINETO_NAMESPACE diff --git a/libkineto/src/CuptiActivity.h b/libkineto/src/CuptiActivity.h index b05e969a2..4ac38fe62 100644 --- a/libkineto/src/CuptiActivity.h +++ b/libkineto/src/CuptiActivity.h @@ -8,10 +8,9 @@ #pragma once #include -#include -#include #include "TraceActivity.h" +#include "ThreadUtil.h" #include "cupti_strings.h" namespace libkineto { @@ -21,6 +20,7 @@ namespace libkineto { namespace KINETO_NAMESPACE { using namespace libkineto; +struct TraceSpan; // These classes wrap the various CUPTI activity types // into subclasses of TraceActivity so that they can all be accessed @@ -31,13 +31,16 @@ template struct CuptiActivity : public TraceActivity { explicit CuptiActivity(const T* activity, const TraceActivity& linked) : activity_(*activity), linked_(linked) {} - int64_t timestamp() const override {return nsToUs(activity_.start);} + int64_t timestamp() const override { + return nsToUs(activity_.start); + } int64_t duration() const override { return nsToUs(activity_.end - activity_.start); } int64_t correlationId() const override {return activity_.correlationId;} const T& raw() const {return activity_;} const TraceActivity* linkedActivity() const override {return &linked_;} + const TraceSpan* traceSpan() const override {return nullptr;} protected: const T& activity_; @@ -51,7 +54,7 @@ struct RuntimeActivity : public CuptiActivity { const TraceActivity& linked, int32_t threadId) : CuptiActivity(activity, linked), threadId_(threadId) {} - int64_t deviceId() const override {return cachedPid();} + int64_t deviceId() const override {return processId();} int64_t resourceId() const override {return threadId_;} ActivityType type() const override {return ActivityType::CUDA_RUNTIME;} const std::string name() const override {return runtimeCbidName(activity_.cbid);} @@ -76,4 +79,3 @@ struct GpuActivity : public CuptiActivity { }; } // namespace KINETO_NAMESPACE - diff --git a/libkineto/src/CuptiActivityBuffer.h b/libkineto/src/CuptiActivityBuffer.h index 8dad1aa0f..d444b9399 100644 --- a/libkineto/src/CuptiActivityBuffer.h +++ b/libkineto/src/CuptiActivityBuffer.h @@ -8,28 +8,45 @@ #pragma once #include -#include -#include +#include #include +#include +#include +#include +#include namespace KINETO_NAMESPACE { class CuptiActivityBuffer { public: - // data must be allocated using malloc. - // Ownership is transferred to this object. - CuptiActivityBuffer(uint8_t* data, size_t validSize) - : data(data), validSize(validSize) {} + explicit CuptiActivityBuffer(size_t size) : size_(size) { + buf_.reserve(size); + } + CuptiActivityBuffer() = delete; + CuptiActivityBuffer& operator=(const CuptiActivityBuffer&) = delete; + CuptiActivityBuffer(CuptiActivityBuffer&&) = default; + CuptiActivityBuffer& operator=(CuptiActivityBuffer&&) = default; + + size_t size() const { + return size_; + } + + void setSize(size_t size) { + assert(size <= buf_.capacity()); + size_ = size; + } - ~CuptiActivityBuffer() { - free(data); + uint8_t* data() { + return buf_.data(); } - // Allocated by malloc - uint8_t* data{nullptr}; + private: - // Number of bytes used - size_t validSize; + std::vector buf_; + size_t size_; }; +using CuptiActivityBufferMap = + std::map>; + } // namespace KINETO_NAMESPACE diff --git a/libkineto/src/CuptiActivityInterface.cpp b/libkineto/src/CuptiActivityInterface.cpp index d34983710..987653964 100644 --- a/libkineto/src/CuptiActivityInterface.cpp +++ b/libkineto/src/CuptiActivityInterface.cpp @@ -7,10 +7,10 @@ #include "CuptiActivityInterface.h" +#include #include #include "cupti_call.h" - #include "Logger.h" using namespace std::chrono; @@ -30,6 +30,9 @@ CuptiActivityInterface& CuptiActivityInterface::singleton() { void CuptiActivityInterface::pushCorrelationID(int id, CorrelationFlowType type) { #ifdef HAS_CUPTI + if (!singleton().externalCorrelationEnabled_) { + return; + } VLOG(2) << "pushCorrelationID(" << id << ")"; switch(type) { case Default: @@ -45,6 +48,9 @@ void CuptiActivityInterface::pushCorrelationID(int id, CorrelationFlowType type) void CuptiActivityInterface::popCorrelationID(CorrelationFlowType type) { #ifdef HAS_CUPTI + if (!singleton().externalCorrelationEnabled_) { + return; + } switch(type) { case Default: CUPTI_CALL(cuptiActivityPopExternalCorrelationId( @@ -121,40 +127,53 @@ void CUPTIAPI CuptiActivityInterface::bufferRequestedTrampoline( singleton().bufferRequested(buffer, size, maxNumRecords); } -void CuptiActivityInterface::bufferRequested(uint8_t** buffer, size_t* size, size_t* maxNumRecords) { - if (allocatedGpuBufferCount >= maxGpuBufferCount_) { +void CuptiActivityInterface::bufferRequested( + uint8_t** buffer, size_t* size, size_t* maxNumRecords) { + std::lock_guard guard(mutex_); + if (allocatedGpuTraceBuffers_.size() >= maxGpuBufferCount_) { stopCollection = true; LOG(WARNING) << "Exceeded max GPU buffer count (" - << allocatedGpuBufferCount + << allocatedGpuTraceBuffers_.size() + << " > " << maxGpuBufferCount_ << ") - terminating tracing"; } + auto buf = std::make_unique(kBufSize); + *buffer = buf->data(); *size = kBufSize; - *maxNumRecords = 0; - // TODO(xdwang): create a list of buffers in advance so that we can reuse. - // This saves time to dynamically allocate new buffers (which could be costly - // if we allocated new space from the heap) - *buffer = (uint8_t*) malloc(kBufSize); + allocatedGpuTraceBuffers_[*buffer] = std::move(buf); - allocatedGpuBufferCount++; + *maxNumRecords = 0; } #endif -std::unique_ptr> CuptiActivityInterface::activityBuffers() { +std::unique_ptr +CuptiActivityInterface::activityBuffers() { + { + std::lock_guard guard(mutex_); + if (allocatedGpuTraceBuffers_.empty()) { + return nullptr; + } + } + #ifdef HAS_CUPTI VLOG(1) << "Flushing GPU activity buffers"; - time_point t1; + time_point t1; if (VLOG_IS_ON(1)) { - t1 = high_resolution_clock::now(); + t1 = system_clock::now(); } + // Can't hold mutex_ during this call, since bufferCompleted + // will be called by libcupti and mutex_ is acquired there. CUPTI_CALL(cuptiActivityFlushAll(CUPTI_ACTIVITY_FLAG_FLUSH_FORCED)); if (VLOG_IS_ON(1)) { flushOverhead = - duration_cast(high_resolution_clock::now() - t1).count(); + duration_cast(system_clock::now() - t1).count(); } #endif - return std::move(gpuTraceBuffers_); + std::lock_guard guard(mutex_); + // Transfer ownership of buffers to caller. A new map is created on-demand. + return std::move(readyGpuTraceBuffers_); } #ifdef HAS_CUPTI @@ -175,35 +194,37 @@ int CuptiActivityInterface::processActivitiesForBuffer( #endif const std::pair CuptiActivityInterface::processActivities( - std::list& buffers, + CuptiActivityBufferMap& buffers, std::function handler) { std::pair res{0, 0}; #ifdef HAS_CUPTI - for (auto& buf : buffers) { + for (auto& pair : buffers) { // No lock needed - only accessed from this thread - res.first += processActivitiesForBuffer(buf.data, buf.validSize, handler); - res.second += buf.validSize; + auto& buf = pair.second; + res.first += processActivitiesForBuffer(buf->data(), buf->size(), handler); + res.second += buf->size(); } #endif return res; } void CuptiActivityInterface::clearActivities() { + { + std::lock_guard guard(mutex_); + if (allocatedGpuTraceBuffers_.empty()) { + return; + } + } + // Can't hold mutex_ during this call, since bufferCompleted + // will be called by libcupti and mutex_ is acquired there. CUPTI_CALL(cuptiActivityFlushAll(0)); // FIXME: We might want to make sure we reuse // the same memory during warmup and tracing. // Also, try to use the amount of memory required // for active tracing during warmup. - if (gpuTraceBuffers_) { - gpuTraceBuffers_->clear(); - } -} - -void CuptiActivityInterface::addActivityBuffer(uint8_t* buffer, size_t validSize) { - if (!gpuTraceBuffers_) { - gpuTraceBuffers_ = std::make_unique>(); - } - gpuTraceBuffers_->emplace_back(buffer, validSize); + std::lock_guard guard(mutex_); + // Throw away ready buffers as a result of above flush + readyGpuTraceBuffers_ = nullptr; } #ifdef HAS_CUPTI @@ -222,12 +243,22 @@ void CuptiActivityInterface::bufferCompleted( uint8_t* buffer, size_t /* unused */, size_t validSize) { - allocatedGpuBufferCount--; - // lock should be uncessary here, because gpuTraceBuffers is read/written by - // profilerLoop only. CUPTI should handle the cuptiActivityFlushAll and - // bufferCompleted, so that there is no concurrency issues - addActivityBuffer(buffer, validSize); + std::lock_guard guard(mutex_); + auto it = allocatedGpuTraceBuffers_.find(buffer); + if (it == allocatedGpuTraceBuffers_.end()) { + LOG(ERROR) << "bufferCompleted called with unknown buffer: " + << (void*) buffer; + return; + } + + if (!readyGpuTraceBuffers_) { + readyGpuTraceBuffers_ = std::make_unique(); + } + // Set valid size of buffer before moving to ready map + it->second->setSize(validSize); + (*readyGpuTraceBuffers_)[it->first] = std::move(it->second); + allocatedGpuTraceBuffers_.erase(it); // report any records dropped from the queue; to avoid unnecessary cupti // API calls, we make it report only in verbose mode (it doesn't happen @@ -251,6 +282,7 @@ void CuptiActivityInterface::enableCuptiActivities( cuptiActivityRegisterCallbacks(bufferRequestedTrampoline, bufferCompletedTrampoline)); } + externalCorrelationEnabled_ = false; for (const auto& activity : selected_activities) { if (activity == ActivityType::GPU_MEMCPY) { CUPTI_CALL(cuptiActivityEnable(CUPTI_ACTIVITY_KIND_MEMCPY)); @@ -263,6 +295,7 @@ void CuptiActivityInterface::enableCuptiActivities( } if (activity == ActivityType::EXTERNAL_CORRELATION) { CUPTI_CALL(cuptiActivityEnable(CUPTI_ACTIVITY_KIND_EXTERNAL_CORRELATION)); + externalCorrelationEnabled_ = true; } if (activity == ActivityType::CUDA_RUNTIME) { CUPTI_CALL(cuptiActivityEnable(CUPTI_ACTIVITY_KIND_RUNTIME)); @@ -294,6 +327,7 @@ void CuptiActivityInterface::disableCuptiActivities( CUPTI_CALL(cuptiActivityDisable(CUPTI_ACTIVITY_KIND_RUNTIME)); } } + externalCorrelationEnabled_ = false; #endif } diff --git a/libkineto/src/CuptiActivityInterface.h b/libkineto/src/CuptiActivityInterface.h index d1684bdb2..af02570df 100644 --- a/libkineto/src/CuptiActivityInterface.h +++ b/libkineto/src/CuptiActivityInterface.h @@ -17,6 +17,7 @@ #include #include #include +#include #include namespace KINETO_NAMESPACE { @@ -52,11 +53,10 @@ class CuptiActivityInterface { const std::set& selected_activities); void clearActivities(); - void addActivityBuffer(uint8_t* buffer, size_t validSize); - virtual std::unique_ptr> activityBuffers(); + virtual std::unique_ptr activityBuffers(); virtual const std::pair processActivities( - std::list& buffers, + CuptiActivityBufferMap&, std::function handler); void setMaxBufferSize(int size); @@ -81,8 +81,10 @@ class CuptiActivityInterface { #endif // HAS_CUPTI int maxGpuBufferCount_{0}; - int allocatedGpuBufferCount{0}; - std::unique_ptr> gpuTraceBuffers_; + CuptiActivityBufferMap allocatedGpuTraceBuffers_; + std::unique_ptr readyGpuTraceBuffers_; + std::mutex mutex_; + bool externalCorrelationEnabled_{false}; protected: #ifdef HAS_CUPTI diff --git a/libkineto/src/DaemonConfigLoader.h b/libkineto/src/DaemonConfigLoader.h index 33947f690..072338ee2 100644 --- a/libkineto/src/DaemonConfigLoader.h +++ b/libkineto/src/DaemonConfigLoader.h @@ -16,6 +16,9 @@ class DaemonConfigLoader { public: virtual ~DaemonConfigLoader() {} + // Return the base config from the daemon + virtual std::string readBaseConfig() = 0; + // Return a configuration string from the daemon, if one has been posted. virtual std::string readOnDemandConfig(bool events, bool activities) = 0; diff --git a/libkineto/src/Demangle.cpp b/libkineto/src/Demangle.cpp index f508d5d81..1e52d5167 100644 --- a/libkineto/src/Demangle.cpp +++ b/libkineto/src/Demangle.cpp @@ -7,7 +7,9 @@ #include "Demangle.h" +#ifndef _MSC_VER #include +#endif #include #include @@ -16,6 +18,7 @@ namespace KINETO_NAMESPACE { static constexpr int kMaxSymbolSize = 1024; std::string demangle(const char* name) { +#ifndef _MSC_VER if (!name) { return ""; } @@ -34,6 +37,14 @@ std::string demangle(const char* name) { // The returned buffer must be freed! free(demangled); return res; +#else + // TODO: demangling on Windows + if (!name) { + return ""; + } else { + return name; + } +#endif } } // namespace KINETO_NAMESPACE diff --git a/libkineto/src/EventProfiler.cpp b/libkineto/src/EventProfiler.cpp index 4216097f8..2fe006c60 100644 --- a/libkineto/src/EventProfiler.cpp +++ b/libkineto/src/EventProfiler.cpp @@ -192,7 +192,7 @@ void EventGroupSet::setEnabled(bool enabled) { // Collect counter values for each counter in group set void EventGroupSet::collectSample() { - auto timestamp = high_resolution_clock::now(); + auto timestamp = system_clock::now(); for (int g = 0; g < set_.numEventGroups; g++) { CUpti_EventGroup grp = set_.eventGroups[g]; for (const auto& id : cuptiEvents_.eventsInGroup(grp)) { @@ -215,7 +215,7 @@ void EventGroupSet::collectSample() { } if (VLOG_IS_ON(1)) { - auto t2 = high_resolution_clock::now(); + auto t2 = system_clock::now(); VLOG(1) << "Device " << cuptiEvents_.device() << " Sample (us): " << duration_cast(t2 - timestamp).count(); } @@ -320,7 +320,7 @@ static unique_ptr alignAndValidateConfigs( Config& base, Config& onDemand) { if (onDemand.eventProfilerOnDemandDuration().count() == 0 || - high_resolution_clock::now() > + system_clock::now() > (onDemand.eventProfilerOnDemandStartTime() + onDemand.eventProfilerOnDemandDuration())) { base.validate(); @@ -530,7 +530,7 @@ void EventProfiler::printAllSamples(ostream& s, CUdevice device) const { void EventProfiler::enableNextCounterSet() { if (sets_.size() > 1) { - auto t1 = high_resolution_clock::now(); + auto t1 = system_clock::now(); VLOG(1) << "Disabling set " << curEnabledSet_; sets_[curEnabledSet_].setEnabled(false); @@ -539,7 +539,7 @@ void EventProfiler::enableNextCounterSet() { sets_[curEnabledSet_].setEnabled(true); if (VLOG_IS_ON(1)) { - auto t2 = high_resolution_clock::now(); + auto t2 = system_clock::now(); VLOG(1) << "Switch (us): " << duration_cast(t2 - t1).count(); } diff --git a/libkineto/src/EventProfilerController.cpp b/libkineto/src/EventProfilerController.cpp index 419c9bbce..596c78b60 100644 --- a/libkineto/src/EventProfilerController.cpp +++ b/libkineto/src/EventProfilerController.cpp @@ -10,17 +10,15 @@ #include #include #include -#include -#include #include "ConfigLoader.h" #include "CuptiEventInterface.h" #include "CuptiMetricInterface.h" #include "EventProfiler.h" -#include "ThreadName.h" #include "output_csv.h" #include "Logger.h" +#include "ThreadUtil.h" using namespace std::chrono; using std::unique_ptr; @@ -75,8 +73,12 @@ vector>& onDemandLoggers( return res; } +} // anon namespace + // Keep an eye on profiling threads. // We've observed deadlocks in Cuda11 in libcuda / libcupti.. +namespace detail { + class HeartbeatMonitor { public: @@ -90,7 +92,7 @@ class HeartbeatMonitor { } void profilerHeartbeat() { - pid_t tid = syscall(SYS_gettid); + int32_t tid = systemThreadId(); std::lock_guard lock(mutex_); profilerAliveMap_[tid]++; } @@ -119,7 +121,9 @@ class HeartbeatMonitor { auto cv_status = condVar_.wait_for(lock, seconds(period_)); // Don't perform check on spurious wakeup or on notify if (cv_status == std::cv_status::timeout) { - for (auto& [tid, i] : profilerAliveMap_) { + for (auto& pair : profilerAliveMap_) { + int32_t tid = pair.first; + int& i = pair.second; if (i == 0) { LOG(ERROR) << "Thread " << tid << " appears stuck!"; } @@ -149,7 +153,7 @@ class HeartbeatMonitor { } } - std::map profilerAliveMap_; + std::map profilerAliveMap_; std::unique_ptr monitorThread_; std::mutex mutex_; std::condition_variable condVar_; @@ -157,6 +161,9 @@ class HeartbeatMonitor { seconds period_{0}; }; +} // namespace detail + +namespace { // Profiler map singleton std::map>& profilerMap() { static std::map> instance; @@ -174,7 +181,7 @@ void reportLateSample( } void configureHeartbeatMonitor( - HeartbeatMonitor& monitor, const Config& base, const Config& onDemand) { + detail::HeartbeatMonitor& monitor, const Config& base, const Config& onDemand) { seconds base_period = base.eventProfilerHeartbeatMonitorPeriod(); seconds on_demand_period = @@ -198,7 +205,7 @@ void EventProfilerController::addOnDemandLoggerFactory( EventProfilerController::EventProfilerController( CUcontext context, ConfigLoader& configLoader, - HeartbeatMonitor& heartbeatMonitor) + detail::HeartbeatMonitor& heartbeatMonitor) : configLoader_(configLoader), heartbeatMonitor_(heartbeatMonitor) { auto cupti_events = std::make_unique(context); auto cupti_metrics = @@ -226,7 +233,7 @@ EventProfilerController::~EventProfilerController() { void EventProfilerController::start(CUcontext ctx) { profilerMap()[ctx] = unique_ptr( new EventProfilerController( - ctx, ConfigLoader::instance(), HeartbeatMonitor::instance())); + ctx, ConfigLoader::instance(), detail::HeartbeatMonitor::instance())); } // Must be called under lock @@ -266,10 +273,10 @@ void EventProfilerController::profilerLoop() { auto on_demand_config = std::make_unique(); - time_point next_sample_time; - time_point next_report_time; - time_point next_on_demand_report_time; - time_point next_multiplex_time; + time_point next_sample_time; + time_point next_report_time; + time_point next_on_demand_report_time; + time_point next_multiplex_time; bool reconfigure = true; bool restart = true; int report_count = 0; @@ -289,7 +296,7 @@ void EventProfilerController::profilerLoop() { reconfigure = true; } - auto now = high_resolution_clock::now(); + auto now = system_clock::now(); if (on_demand_config->eventProfilerOnDemandDuration().count() > 0 && now > (on_demand_config->eventProfilerOnDemandStartTime() + on_demand_config->eventProfilerOnDemandDuration())) { @@ -314,7 +321,7 @@ void EventProfilerController::profilerLoop() { } if (restart) { - now = high_resolution_clock::now(); + now = system_clock::now(); next_sample_time = now + profiler_->samplePeriod(); next_report_time = now + profiler_->reportPeriod(); next_on_demand_report_time = now + profiler_->onDemandReportPeriod(); @@ -330,13 +337,13 @@ void EventProfilerController::profilerLoop() { while (now < next_sample_time) { /* sleep override */ std::this_thread::sleep_for(next_sample_time - now); - now = high_resolution_clock::now(); + now = system_clock::now(); } int sleep_time = duration_cast(now - start_sleep).count(); auto start_sample = now; profiler_->collectSample(); - now = high_resolution_clock::now(); + now = system_clock::now(); int sample_time = duration_cast(now - start_sample).count(); next_sample_time += profiler_->samplePeriod(); @@ -359,7 +366,7 @@ void EventProfilerController::profilerLoop() { next_on_demand_report_time += profiler_->onDemandReportPeriod(); } profiler_->eraseReportedSamples(); - now = high_resolution_clock::now(); + now = system_clock::now(); int report_time = duration_cast(now - start_report).count(); if (now > next_sample_time) { @@ -373,7 +380,7 @@ void EventProfilerController::profilerLoop() { profiler_->enableNextCounterSet(); next_multiplex_time += profiler_->multiplexPeriod(); } - now = high_resolution_clock::now(); + now = system_clock::now(); int multiplex_time = duration_cast(now - start_multiplex).count(); diff --git a/libkineto/src/EventProfilerController.h b/libkineto/src/EventProfilerController.h index e8dd95120..6e9703fb6 100644 --- a/libkineto/src/EventProfilerController.h +++ b/libkineto/src/EventProfilerController.h @@ -21,7 +21,7 @@ class ConfigLoader; class EventProfiler; class SampleListener; -namespace { +namespace detail { class HeartbeatMonitor; } @@ -45,12 +45,12 @@ class EventProfilerController { explicit EventProfilerController( CUcontext context, ConfigLoader& configLoader, - HeartbeatMonitor& heartbeatMonitor); + detail::HeartbeatMonitor& heartbeatMonitor); bool enableForDevice(Config& cfg); void profilerLoop(); ConfigLoader& configLoader_; - HeartbeatMonitor& heartbeatMonitor_; + detail::HeartbeatMonitor& heartbeatMonitor_; std::unique_ptr profiler_; std::unique_ptr profilerThread_; std::atomic_bool stopRunloop_{false}; diff --git a/libkineto/src/GenericTraceActivity.cpp b/libkineto/src/GenericTraceActivity.cpp index b893fff4c..8df6581a6 100644 --- a/libkineto/src/GenericTraceActivity.cpp +++ b/libkineto/src/GenericTraceActivity.cpp @@ -8,10 +8,8 @@ #include "GenericTraceActivity.h" #include "output_base.h" -using namespace libkineto; - -namespace KINETO_NAMESPACE { +namespace libkineto { void GenericTraceActivity::log(ActivityLogger& logger) const { logger.handleGenericActivity(*this); } -} // namespace KINETO_NAMESPACE +} // namespace libkineto diff --git a/libkineto/src/GenericTraceActivity.h b/libkineto/src/GenericTraceActivity.h deleted file mode 100644 index f9a0e27ec..000000000 --- a/libkineto/src/GenericTraceActivity.h +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright (c) Facebook, Inc. and its affiliates. - * All rights reserved. - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. - */ - -#pragma once - -#include -#include - -#include "TraceActivity.h" - -namespace KINETO_NAMESPACE { - -// A generic trace activity that can be freely modified -struct GenericTraceActivity : libkineto::TraceActivity { - int64_t deviceId() const override { - return device; - } - - int64_t resourceId() const override { - return resource; - } - - int64_t timestamp() const override { - return startTime; - } - - int64_t duration() const override { - return endTime - startTime; - } - - int64_t correlationId() const override { - return correlation; - } - - libkineto::ActivityType type() const override { - return activityType; - } - - const std::string name() const override { - return activityName; - } - - const libkineto::TraceActivity* linkedActivity() const override { - return linked; - } - - void log(libkineto::ActivityLogger& logger) const override; - - int64_t device; - pthread_t resource; - - int64_t startTime; - int64_t endTime; - int64_t correlation; - - libkineto::ActivityType activityType; - std::string activityName; - - libkineto::TraceActivity* linked; -}; - -} // namespace KINETO_NAMESPACE diff --git a/libkineto/src/Logger.cpp b/libkineto/src/Logger.cpp index 4dac80770..a65dc67ee 100644 --- a/libkineto/src/Logger.cpp +++ b/libkineto/src/Logger.cpp @@ -9,14 +9,18 @@ #ifndef USE_GOOGLE_LOG -#include -#include #include #include #include #include +#include -namespace KINETO_NAMESPACE { +#include +#include + +#include "ThreadUtil.h" + +namespace libkineto { int Logger::severityLevel_{VERBOSE}; int Logger::verboseLogLevel_{-1}; @@ -25,6 +29,9 @@ uint64_t Logger::verboseLogModules_{~0ull}; Logger::Logger(int severity, int line, const char* filePath, int errnum) : buf_(), out_(LIBKINETO_DBG_STREAM), errnum_(errnum) { switch (severity) { + case VERBOSE: + buf_ << "V:"; + break; case INFO: buf_ << "INFO:"; break; @@ -34,9 +41,6 @@ Logger::Logger(int severity, int line, const char* filePath, int errnum) case ERROR: buf_ << "ERROR:"; break; - case VERBOSE: - buf_ << "V:"; - break; default: buf_ << "???:"; break; @@ -45,19 +49,20 @@ Logger::Logger(int severity, int line, const char* filePath, int errnum) const auto tt = std::chrono::system_clock::to_time_t(std::chrono::system_clock::now()); const char* file = strrchr(filePath, '/'); - std::tm tm; - buf_ << std::put_time(localtime_r(&tt, &tm), "%F %T") << " " << getpid() - << ":" << syscall(SYS_gettid) << " " << (file ? file + 1 : filePath) - << ":" << line << "] "; + buf_ << fmt::format("{:%Y-%m-%d %H:%M:%S}", fmt::localtime(tt)) << " " + << processId() << ":" << systemThreadId() << " " + << (file ? file + 1 : filePath) << ":" << line << "] "; } Logger::~Logger() { +#ifdef __linux__ if (errnum_ != 0) { thread_local char buf[1024]; buf_ << " : " << strerror_r(errnum_, buf, sizeof(buf)); } - buf_ << std::ends; - out_ << buf_.str() << std::endl; +#endif + buf_ << std::endl; + out_ << buf_.str(); } void Logger::setVerboseLogModules(const std::vector& modules) { @@ -71,6 +76,6 @@ void Logger::setVerboseLogModules(const std::vector& modules) { } } -} // namespace KINETO_NAMESPACE +} // namespace libkineto #endif // USE_GOOGLE_LOG diff --git a/libkineto/src/Logger.h b/libkineto/src/Logger.h index e980ecfd1..6329cb22e 100644 --- a/libkineto/src/Logger.h +++ b/libkineto/src/Logger.h @@ -35,12 +35,15 @@ #include #include -namespace KINETO_NAMESPACE { +// unset a predefined ERROR (windows) +#undef ERROR -constexpr int VERBOSE = 0; -constexpr int INFO = 1; -constexpr int WARNING = 2; -constexpr int ERROR = 3; +#define VERBOSE 0 +#define INFO 1 +#define WARNING 2 +#define ERROR 3 + +namespace libkineto { class Logger { public: @@ -111,7 +114,7 @@ class VoidLogger { void operator&(std::ostream&) {} }; -} // namespace KINETO_NAMESPACE +} // namespace libkineto #ifdef LOG // Undefine in case these are already defined (quite likely) #undef LOG @@ -136,11 +139,11 @@ class VoidLogger { #endif #define LOG_IS_ON(severity) \ - (severity >= KINETO_NAMESPACE::Logger::severityLevel()) + (severity >= libkineto::Logger::severityLevel()) #define LOG_IF(severity, condition) \ - !(LOG_IS_ON(severity) && (condition)) ? (void)0 : KINETO_NAMESPACE::VoidLogger() & \ - KINETO_NAMESPACE::Logger(severity, __LINE__, __FILE__).stream() + !(LOG_IS_ON(severity) && (condition)) ? (void)0 : libkineto::VoidLogger() & \ + libkineto::Logger(severity, __LINE__, __FILE__).stream() #define LOG(severity) LOG_IF(severity, true) @@ -160,11 +163,11 @@ struct __to_constant__ { static const uint64_t val = n; }; #define FILENAME_HASH \ - __to_constant__::val + __to_constant__::val #define VLOG_IS_ON(verbosity) \ - (KINETO_NAMESPACE::Logger::verboseLogLevel() >= verbosity && \ - (KINETO_NAMESPACE::Logger::verboseLogModules() & FILENAME_HASH) == FILENAME_HASH) + (libkineto::Logger::verboseLogLevel() >= verbosity && \ + (libkineto::Logger::verboseLogModules() & FILENAME_HASH) == FILENAME_HASH) #define VLOG_IF(verbosity, condition) \ LOG_IF(VERBOSE, VLOG_IS_ON(verbosity) && (condition)) @@ -177,13 +180,13 @@ struct __to_constant__ { << "(x" << LOG_OCCURRENCES << ") " #define PLOG(severity) \ - KINETO_NAMESPACE::Logger(severity, __LINE__, __FILE__, errno).stream() + libkineto::Logger(severity, __LINE__, __FILE__, errno).stream() #define SET_LOG_SEVERITY_LEVEL(level) \ - KINETO_NAMESPACE::Logger::setSeverityLevel(level) + libkineto::Logger::setSeverityLevel(level) #define SET_LOG_VERBOSITY_LEVEL(level, modules) \ - KINETO_NAMESPACE::Logger::setVerboseLogLevel(level); \ - KINETO_NAMESPACE::Logger::setVerboseLogModules(modules) + libkineto::Logger::setVerboseLogLevel(level); \ + libkineto::Logger::setVerboseLogModules(modules) #endif // USE_GOOGLE_LOG diff --git a/libkineto/src/ProcessInfo.cpp b/libkineto/src/ProcessInfo.cpp deleted file mode 100644 index b24affb9c..000000000 --- a/libkineto/src/ProcessInfo.cpp +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright (c) Facebook, Inc. and its affiliates. - * All rights reserved. - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. - */ - -#include "ProcessInfo.h" - -#include -#include -#include -#include - -#include "Logger.h" - -static const std::string kChronosJobIDEnvVar = "CHRONOS_JOB_INSTANCE_ID"; - -namespace KINETO_NAMESPACE { - -// Max number of parent pids to collect, just for extra safeguarding. -constexpr int kMaxParentPids = 10; - -// Return a pair of -static std::pair parentPidAndCommand(pid_t pid) { - FILE* statfile = fopen(fmt::format("/proc/{}/stat", pid).c_str(), "r"); - if (statfile == nullptr) { - return std::make_pair(0, ""); - } - pid_t parent_pid; - char* command = nullptr; - int scanned = fscanf(statfile, "%*d (%m[^)]) %*c %d", &command, &parent_pid); - fclose(statfile); - VLOG(2) << " Current PID: " << pid << " Command: " << command - << " Parent PID: " << parent_pid; - std::pair ret; - if (scanned == 2) { - ret = std::make_pair(parent_pid, std::string(command)); - } else { - LOG(ERROR) << "Failed to parse /proc/" << pid << "/stat"; - ret = std::make_pair(0, ""); - } - - // The 'm' character in the format tells fscanf to allocate memory - // for the parsed string, which we need to free here. - free(command); - return ret; -} - -std::vector> pidCommandPairsOfAncestors() { - std::vector> pairs; - pairs.reserve(kMaxParentPids + 1); - pid_t curr_pid = getpid(); - for (int i = 0; i <= kMaxParentPids && curr_pid > 1; i++) { - std::pair ppid_and_comm = parentPidAndCommand(curr_pid); - pairs.push_back(std::make_pair(curr_pid, ppid_and_comm.second)); - curr_pid = ppid_and_comm.first; - } - return pairs; -} - -} // namespace KINETO_NAMESPACE diff --git a/libkineto/src/ProcessInfo.h b/libkineto/src/ProcessInfo.h deleted file mode 100644 index b618f841e..000000000 --- a/libkineto/src/ProcessInfo.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright (c) Facebook, Inc. and its affiliates. - * All rights reserved. - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. - */ - -#pragma once - -#include -#include -#include -#include - -namespace KINETO_NAMESPACE { - -struct ProcessInfo { - pid_t pid; - const std::string name; - const std::string label; -}; - -struct ThreadInfo { - ThreadInfo(int64_t tid, const std::string name) : - tid(tid), name(name) {} - int32_t tid; - const std::string name; -}; - - -// Return a list of pids and process names for the current process -// and its parents. -std::vector> pidCommandPairsOfAncestors(); - -} // namespace KINETO_NAMESPACE diff --git a/libkineto/src/ThreadName.cpp b/libkineto/src/ThreadName.cpp deleted file mode 100644 index c6b905df2..000000000 --- a/libkineto/src/ThreadName.cpp +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright (c) Facebook, Inc. and its affiliates. - * All rights reserved. - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. - */ - -#include -#include -#include -#include -#include - -#include "Logger.h" - -namespace KINETO_NAMESPACE { - -static constexpr size_t kMaxThreadNameLength = 16; - -bool setThreadName(const std::string& name) { - return 0 == pthread_setname_np(pthread_self(), name.c_str()); -} - -std::string getThreadName(pid_t tid) { - char buf[kMaxThreadNameLength] = "Unknown"; - std::string filename = fmt::format("/proc/{}/task/{}/comm", getpid(), tid); - FILE* comm_file = fopen(filename.c_str(), "r"); - if (comm_file) { - size_t len = fread(buf, 1, kMaxThreadNameLength, comm_file); - fclose(comm_file); - // Remove newline - if (len > 0) { - buf[len - 1] = '\0'; - } - } else { - LOG(WARNING) << "Failed to open " << filename; - } - return buf; -} - -} // namespace KINETO_NAMESPACE diff --git a/libkineto/src/ThreadName.h b/libkineto/src/ThreadName.h deleted file mode 100644 index 35f6aa259..000000000 --- a/libkineto/src/ThreadName.h +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Copyright (c) Facebook, Inc. and its affiliates. - * All rights reserved. - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. - */ - -#pragma once - -#include - -#include - -namespace KINETO_NAMESPACE { - -bool setThreadName(const std::string& name); -std::string getThreadName(pid_t tid); - -} // namespace KINETO_NAMESPACE diff --git a/libkineto/src/ThreadUtil.cpp b/libkineto/src/ThreadUtil.cpp new file mode 100644 index 000000000..90d869bd9 --- /dev/null +++ b/libkineto/src/ThreadUtil.cpp @@ -0,0 +1,199 @@ +#include "ThreadUtil.h" + +#ifndef _MSC_VER +#include +#include +#include +#include +#else // _MSC_VER +#include +#include +#define WIN32_LEAN_AND_MEAN +#define NOGDI +#include +#include +#undef ERROR +#endif // _MSC_VER + +#ifdef __ANDROID__ +#include +#endif + +#include +#include +#include + +namespace libkineto { + +namespace { +thread_local int32_t _pid = 0; +thread_local int32_t _tid = 0; +thread_local int32_t _sysTid = 0; +} + +int32_t processId() { + if (!_pid) { +#ifndef _MSC_VER + _pid = (int32_t)getpid(); +#else + _pid = (int32_t)GetCurrentProcessId(); +#endif + } + return _pid; +} + +int32_t systemThreadId() { + if (!_sysTid) { +#ifdef __APPLE__ + _sysTid = (int32_t)syscall(SYS_thread_selfid); +#elif defined _MSC_VER + _sysTid = (int32_t)GetCurrentThreadId(); +#else + _sysTid = (int32_t)syscall(SYS_gettid); +#endif + } + return _sysTid; +} + +int32_t threadId() { + if (!_tid) { +#ifndef _MSC_VER + pthread_t pth = pthread_self(); + int32_t* ptr = reinterpret_cast(&pth); + _tid = *ptr; +#else + _tid = (int32_t)GetCurrentThreadId(); +#endif + } + return _tid; +} + +namespace { +static constexpr size_t kMaxThreadNameLength = 16; + +static constexpr const char* basename(const char* s, int off = 0) { + return !s[off] + ? s + : s[off] == '/' ? basename(&s[off + 1]) : basename(s, off + 1); +} +#if defined(_MSC_VER) +void *getKernel32Func(const char* procName) { + return GetProcAddress(GetModuleHandleA("KERNEL32.DLL"), procName); +} +#endif +} + +bool setThreadName(const std::string& name) { +#ifdef __APPLE__ + return 0 == pthread_setname_np(name.c_str()); +#elif defined _MSC_VER + // Per https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-setthreaddescription + // Use runtime linking to set thread description + static auto _SetThreadDescription = reinterpret_cast(getKernel32Func("SetThreadDescription")); + if (!_SetThreadDescription) { + return false; + } + std::wstring_convert> conv; + std::wstring wname = conv.from_bytes(name); + HRESULT hr = _SetThreadDescription(GetCurrentThread(), wname.c_str()); + return SUCCEEDED(hr); +#else + return 0 == pthread_setname_np(pthread_self(), name.c_str()); +#endif +} + +std::string getThreadName() { +#ifndef _MSC_VER + char buf[kMaxThreadNameLength] = ""; + if ( +#ifndef __ANDROID__ + pthread_getname_np(pthread_self(), buf, kMaxThreadNameLength) != 0 +#else + prctl(PR_GET_NAME, buf, kMaxThreadNameLength) != 0 +#endif + ) { + return "Unknown"; + } + return buf; +#else // _MSC_VER + static auto _GetThreadDescription = reinterpret_cast(getKernel32Func("GetThreadDescription")); + if (!_GetThreadDescription) { + return "Unknown"; + } + PWSTR data; + HRESULT hr = _GetThreadDescription(GetCurrentThread(), &data); + if (!SUCCEEDED(hr)) { + return ""; + } + std::wstring_convert> conv; + std::string name = conv.to_bytes(data); + LocalFree(data); + return name; +#endif +} + +// Linux: +// Extract process name from /proc/pid/cmdline. This does not have +// the 16 character limit that /proc/pid/status and /prod/pid/comm has. +std::string processName(int32_t pid) { +#ifdef __linux__ + FILE* cmdfile = fopen(fmt::format("/proc/{}/cmdline", pid).c_str(), "r"); + if (cmdfile != nullptr) { + char* command = nullptr; + int scanned = fscanf(cmdfile, "%ms", &command); + fclose(cmdfile); + if (scanned > 0 && command) { + std::string ret(basename(command)); + free(command); + return ret; + } + } + std::cerr << "Failed to read process name for pid " << pid << std::endl; +#endif + return ""; +} + +// Max number of parent pids to collect, just for extra safeguarding. +constexpr int kMaxParentPids = 10; + +// Return a pair of +static std::pair parentPidAndCommand(int32_t pid) { +#ifdef __linux__ + FILE* statfile = fopen(fmt::format("/proc/{}/stat", pid).c_str(), "r"); + if (statfile == nullptr) { + return std::make_pair(0, ""); + } + int32_t parent_pid; + char* command = nullptr; + int scanned = fscanf(statfile, "%*d (%m[^)]) %*c %d", &command, &parent_pid); + fclose(statfile); + std::pair ret; + if (scanned == 2) { + ret = std::make_pair(parent_pid, std::string(command)); + } else { + std::cerr << "Failed to parse /proc/" << pid << "/stat" << std::endl; + ret = std::make_pair(0, ""); + } + + // The 'm' character in the format tells fscanf to allocate memory + // for the parsed string, which we need to free here. + free(command); + return ret; +#else + return std::make_pair(0, ""); +#endif +} + +std::vector> pidCommandPairsOfAncestors() { + std::vector> pairs; + pairs.reserve(kMaxParentPids + 1); + int32_t curr_pid = processId(); + for (int i = 0; i <= kMaxParentPids && curr_pid > 1; i++) { + std::pair ppid_and_comm = parentPidAndCommand(curr_pid); + pairs.push_back(std::make_pair(curr_pid, ppid_and_comm.second)); + curr_pid = ppid_and_comm.first; + } + return pairs; +} + +} // namespace libkineto diff --git a/libkineto/src/init.cpp b/libkineto/src/init.cpp index c17d9a727..d12e45794 100644 --- a/libkineto/src/init.cpp +++ b/libkineto/src/init.cpp @@ -25,7 +25,6 @@ * mechanism), but allows the application to continue. */ -#include #include #include diff --git a/libkineto/src/libkineto_api.cpp b/libkineto/src/libkineto_api.cpp index 0a8f4394b..33c5b6e41 100644 --- a/libkineto/src/libkineto_api.cpp +++ b/libkineto/src/libkineto_api.cpp @@ -7,6 +7,8 @@ #include "libkineto.h" +#include "ThreadUtil.h" + namespace libkineto { LibkinetoApi& api() { @@ -16,12 +18,12 @@ LibkinetoApi& api() { void LibkinetoApi::initClientIfRegistered() { if (client_) { - if (clientRegisterThread_ != pthread_self()) { + if (clientRegisterThread_ != threadId()) { fprintf( stderr, "ERROR: External init callback must run in same thread as registerClient " "(%d != %d)\n", - (int)pthread_self(), + threadId(), (int)clientRegisterThread_); } else { client_->init(); @@ -37,7 +39,7 @@ void LibkinetoApi::registerClient(ClientInterface* client) { } // Assume here that the external init callback is *not* threadsafe // and only call it if it's the same thread that called registerClient - clientRegisterThread_ = pthread_self(); + clientRegisterThread_ = threadId(); } } // namespace libkineto diff --git a/libkineto/src/output_base.h b/libkineto/src/output_base.h index 14775b5b8..050a0828b 100644 --- a/libkineto/src/output_base.h +++ b/libkineto/src/output_base.h @@ -18,15 +18,14 @@ #include "CuptiActivity.h" #endif // HAS_CUPTI #include "ActivityBuffers.h" -#include "ClientTraceActivity.h" #include "GenericTraceActivity.h" -#include "ProcessInfo.h" +#include "ThreadUtil.h" #include "TraceSpan.h" namespace KINETO_NAMESPACE { class Config; - class RuntimeActivity; class GpuKernelActivity; + struct RuntimeActivity; } namespace libkineto { @@ -45,14 +44,8 @@ class ActivityLogger { virtual void handleTraceSpan(const TraceSpan& span) = 0; - virtual void handleIterationStart(const TraceSpan& span) = 0; - - virtual void handleCpuActivity( - const libkineto::ClientTraceActivity& activity, - const TraceSpan& span) = 0; - virtual void handleGenericActivity( - const GenericTraceActivity& activity) = 0; + const libkineto::GenericTraceActivity& activity) = 0; #ifdef HAS_CUPTI virtual void handleRuntimeActivity(const RuntimeActivity& activity) = 0; @@ -67,6 +60,13 @@ class ActivityLogger { const GpuActivity& activity) = 0; #endif // HAS_CUPTI + virtual void handleTraceStart( + const std::unordered_map& metadata) = 0; + + void handleTraceStart() { + handleTraceStart(std::unordered_map()); + } + virtual void finalizeTrace( const KINETO_NAMESPACE::Config& config, std::unique_ptr buffers, diff --git a/libkineto/src/output_csv.cpp b/libkineto/src/output_csv.cpp index a12b591fc..67868cf4b 100644 --- a/libkineto/src/output_csv.cpp +++ b/libkineto/src/output_csv.cpp @@ -11,8 +11,10 @@ #include #include -#include "Config.h" +#include +#include +#include "Config.h" #include "Logger.h" namespace KINETO_NAMESPACE { @@ -44,12 +46,11 @@ void EventCSVLogger::handleSample(int device, const Sample& sample, bool from_ne if (out_) { auto now = system_clock::now(); auto time = system_clock::to_time_t(now); - struct tm tm; for (const Stat& s : sample.stats) { if (eventNames_.find(s.name) == eventNames_.end()) { continue; } - *out_ << std::put_time(localtime_r(&time, &tm), "%F %T") << ","; + *out_ << fmt::format("{:%Y-%m-%d %H:%M:%S}", fmt::localtime(time)) << ","; *out_ << sample.deltaMsec << ","; *out_ << device << ","; *out_ << s.name; diff --git a/libkineto/src/output_json.cpp b/libkineto/src/output_json.cpp index 54eff581a..76f0bca0d 100644 --- a/libkineto/src/output_json.cpp +++ b/libkineto/src/output_json.cpp @@ -11,13 +11,13 @@ #include #include #include -#include #include "Config.h" #ifdef HAS_CUPTI #include "CuptiActivity.h" #include "CuptiActivity.tpp" #include "CuptiActivityInterface.h" +#include "CudaDeviceProperties.h" #endif // HAS_CUPTI #include "Demangle.h" #include "TraceSpan.h" @@ -30,32 +30,51 @@ using namespace libkineto; namespace KINETO_NAMESPACE { static constexpr int kSchemaVersion = 1; +#ifdef __linux__ +static const std::string kDefaultLogFileFmt = + "/tmp/libkineto_activities_{}.json"; +#else +static const std::string kDefaultLogFileFmt = "libkineto_activities_{}.json"; +#endif -static void writeHeader(std::ofstream& stream) { - stream << fmt::format(R"JSON( +void ChromeTraceLogger::handleTraceStart( + const std::unordered_map& metadata) { + traceOf_ << fmt::format(R"JSON( {{ - "schemaVersion": {}, - "traceEvents": [ - )JSON", kSchemaVersion); + "schemaVersion": {},)JSON", kSchemaVersion); + + for (const auto& kv : metadata) { + traceOf_ << fmt::format(R"JSON( + "{}": {},)JSON", kv.first, kv.second); + } + +#ifdef HAS_CUPTI + traceOf_ << fmt::format(R"JSON( + "deviceProperties": [{} + ],)JSON", devicePropertiesJson()); +#endif + + traceOf_ << R"JSON( + "traceEvents": [)JSON"; } -static void openTraceFile(std::string& name, std::ofstream& stream) { - stream.open(name, std::ofstream::out | std::ofstream::trunc); - if (!stream) { - PLOG(ERROR) << "Failed to open '" << name << "'"; +static std::string defaultFileName() { + return fmt::format(kDefaultLogFileFmt, processId()); +} + +void ChromeTraceLogger::openTraceFile() { + traceOf_.open(fileName_, std::ofstream::out | std::ofstream::trunc); + if (!traceOf_) { + PLOG(ERROR) << "Failed to open '" << fileName_ << "'"; } else { - LOG(INFO) << "Tracing to " << name; - writeHeader(stream); + LOG(INFO) << "Tracing to " << fileName_; } } -ChromeTraceLogger::ChromeTraceLogger(const std::string& traceFileName, int smCount) - : fileName_(traceFileName), pid_(getpid()) { +ChromeTraceLogger::ChromeTraceLogger(const std::string& traceFileName) { + fileName_ = traceFileName.empty() ? defaultFileName() : traceFileName; traceOf_.clear(std::ios_base::badbit); - openTraceFile(fileName_, traceOf_); -#ifdef HAS_CUPTI - smCount_ = CuptiActivityInterface::singleton().smCount(); -#endif + openTraceFile(); } static int64_t us(int64_t timestamp) { @@ -111,7 +130,7 @@ void ChromeTraceLogger::handleThreadInfo( "name": "thread {} ({})" }} }},)JSON", - time, pid_, threadInfo.tid, + time, processId(), threadInfo.tid, threadInfo.tid, threadInfo.name); // clang-format on } @@ -136,9 +155,13 @@ void ChromeTraceLogger::handleTraceSpan(const TraceSpan& span) { span.prefix, span.name, span.iteration, span.opCount); // clang-format on + + if (span.tracked) { + addIterationMarker(span); + } } -void ChromeTraceLogger::handleIterationStart(const TraceSpan& span) { +void ChromeTraceLogger::addIterationMarker(const TraceSpan& span) { if (!traceOf_) { return; } @@ -154,65 +177,72 @@ void ChromeTraceLogger::handleIterationStart(const TraceSpan& span) { // clang-format on } -static std::string traceActivityJson(const TraceActivity& activity, std::string tidPrefix) { +static std::string traceActivityJson( + const TraceActivity& activity, std::string tid) { // clang-format off return fmt::format(R"JSON( - "name": "{}", "pid": {}, "tid": "{}{}", + "name": "{}", "pid": {}, "tid": "{}", "ts": {}, "dur": {})JSON", - activity.name(), activity.deviceId(), tidPrefix, (uint32_t)activity.resourceId(), + activity.name(), activity.deviceId(), tid, activity.timestamp(), activity.duration()); // clang-format on } -void ChromeTraceLogger::handleCpuActivity( - const libkineto::ClientTraceActivity& op, - const TraceSpan& span) { +void ChromeTraceLogger::handleGenericInstantEvent( + const libkineto::GenericTraceActivity& op) { if (!traceOf_) { return; } - // clang-format off traceOf_ << fmt::format(R"JSON( {{ - "ph": "X", "cat": "Operator", {}, + "ph": "i", "s": "t", "name": "{}", + "pid": {}, "tid": {}, + "ts": {}, "args": {{ - "Input dims": {}, "Input type": {}, "Input names": {}, - "Output dims": {}, "Output type": {}, "Output names": {}, - "Device": {}, "External id": {}, "Extra arguments": {}, - "Call stack": "{}", - "Trace name": "{}", "Trace iteration": {} + {} }} }},)JSON", - traceActivityJson(op, ""), - // args - op.inputDims, op.inputTypes, op.inputNames, - op.outputDims, op.outputTypes, op.outputNames, - op.device, op.correlation, op.arguments, - op.callStack, - span.name, span.iteration); - // clang-format on + op.name(), op.deviceId(), op.resourceId(), + op.timestamp(), op.getMetadata()); } void ChromeTraceLogger::handleGenericActivity( - const GenericTraceActivity& op) { - if (!traceOf_) { + const libkineto::GenericTraceActivity& op) { + if (!traceOf_) { return; } - // FIXME: Make cat and tid customizable + if (op.activityType == ActivityType::CPU_INSTANT_EVENT) { + handleGenericInstantEvent(op); + return; + } + + auto op_metadata = op.getMetadata(); + std::string separator = ""; + if (op_metadata.find_first_not_of(" \t\n") != std::string::npos) { + separator = ","; + } + const std::string tid = + op.type() == ActivityType::GPU_USER_ANNOTATION ? + fmt::format("stream {} annotations", op.resourceId()) : + fmt::format("{}", op.resourceId()); + // clang-format off traceOf_ << fmt::format(R"JSON( {{ - "ph": "X", "cat": "User", "name": "{}", - "pid": {}, "tid": "stream {} user", - "ts": {}, "dur": {}, + "ph": "X", "cat": "{}", {}, "args": {{ - "External id": {} + "External id": {}, + "Trace name": "{}", "Trace iteration": {}{} + {} }} }},)JSON", - op.name(), op.deviceId(), op.resourceId(), - op.timestamp(), op.duration(), - op.correlationId()); + toString(op.type()), traceActivityJson(op, tid), + // args + op.id, + op.traceSpan()->name, op.traceSpan()->iteration, separator, + op_metadata); // clang-format on } @@ -228,7 +258,7 @@ void ChromeTraceLogger::handleLinkStart(const RuntimeActivity& s) { "ph": "s", "id": {}, "pid": {}, "tid": {}, "ts": {}, "cat": "async", "name": "launch" }},)JSON", - s.correlationId(), pid_, s.resourceId(), s.timestamp()); + s.correlationId(), processId(), s.resourceId(), s.timestamp()); // clang-format on } @@ -264,7 +294,7 @@ void ChromeTraceLogger::handleRuntimeActivity( "external id": {}, "external ts": {} }} }},)JSON", - traceActivityJson(activity, ""), + traceActivityJson(activity, fmt::format("{}", activity.resourceId())), // args cbid, activity.raw().correlationId, ext.correlationId(), ext.timestamp()); @@ -283,6 +313,10 @@ void ChromeTraceLogger::handleRuntimeActivity( } } +static std::string streamName(const TraceActivity& act) { + return fmt::format("stream {}", act.resourceId()); +} + // GPU side kernel activity void ChromeTraceLogger::handleGpuActivity( const GpuActivity& activity) { @@ -292,12 +326,28 @@ void ChromeTraceLogger::handleGpuActivity( const CUpti_ActivityKernel4* kernel = &activity.raw(); const TraceActivity& ext = *activity.linkedActivity(); constexpr int threads_per_warp = 32; + float blocks_per_sm = -1.0; float warps_per_sm = -1.0; - if (smCount_) { - warps_per_sm = (kernel->gridX * kernel->gridY * kernel->gridZ) * - (kernel->blockX * kernel->blockY * kernel->blockZ) / - (float) threads_per_warp / smCount_; + int sm_count = smCount(kernel->deviceId); + if (sm_count) { + blocks_per_sm = + (kernel->gridX * kernel->gridY * kernel->gridZ) / (float) sm_count; + warps_per_sm = + blocks_per_sm * (kernel->blockX * kernel->blockY * kernel->blockZ) + / threads_per_warp; } + + // Calculate occupancy + float occupancy = KINETO_NAMESPACE::kernelOccupancy( + kernel->deviceId, + kernel->registersPerThread, + kernel->staticSharedMemory, + kernel->dynamicSharedMemory, + kernel->blockX, + kernel->blockY, + kernel->blockZ, + blocks_per_sm); + // clang-format off traceOf_ << fmt::format(R"JSON( {{ @@ -307,20 +357,24 @@ void ChromeTraceLogger::handleGpuActivity( "stream": {}, "correlation": {}, "external id": {}, "registers per thread": {}, "shared memory": {}, + "blocks per SM": {}, "warps per SM": {}, "grid": [{}, {}, {}], - "block": [{}, {}, {}] + "block": [{}, {}, {}], + "est. achieved occupancy %": {} }} }},)JSON", - traceActivityJson(activity, "stream "), + traceActivityJson(activity, streamName(activity)), // args us(kernel->queued), kernel->deviceId, kernel->contextId, kernel->streamId, kernel->correlationId, ext.correlationId(), kernel->registersPerThread, kernel->staticSharedMemory + kernel->dynamicSharedMemory, + blocks_per_sm, warps_per_sm, kernel->gridX, kernel->gridY, kernel->gridZ, - kernel->blockX, kernel->blockY, kernel->blockZ); + kernel->blockX, kernel->blockY, kernel->blockZ, + (int) (0.5 + occupancy * 100.0)); // clang-format on handleLinkEnd(activity); @@ -349,7 +403,7 @@ void ChromeTraceLogger::handleGpuActivity( "bytes": {}, "memory bandwidth (GB/s)": {} }} }},)JSON", - traceActivityJson(activity, "stream "), + traceActivityJson(activity, streamName(activity)), // args memcpy.deviceId, memcpy.contextId, memcpy.streamId, memcpy.correlationId, ext.correlationId(), @@ -378,7 +432,7 @@ void ChromeTraceLogger::handleGpuActivity( "bytes": {}, "memory bandwidth (GB/s)": {} }} }},)JSON", - traceActivityJson(activity, "stream "), + traceActivityJson(activity, streamName(activity)), // args memcpy.srcDeviceId, memcpy.deviceId, memcpy.dstDeviceId, memcpy.srcContextId, memcpy.contextId, memcpy.dstContextId, @@ -406,7 +460,7 @@ void ChromeTraceLogger::handleGpuActivity( "bytes": {}, "memory bandwidth (GB/s)": {} }} }},)JSON", - traceActivityJson(activity, "stream "), + traceActivityJson(activity, streamName(activity)), // args memset.deviceId, memset.contextId, memset.streamId, memset.correlationId, ext.correlationId(), diff --git a/libkineto/src/output_json.h b/libkineto/src/output_json.h index 84ac673c9..a0f2e6ae0 100644 --- a/libkineto/src/output_json.h +++ b/libkineto/src/output_json.h @@ -16,7 +16,7 @@ #ifdef HAS_CUPTI #include #endif -#include "ClientTraceActivity.h" +#include "GenericTraceActivity.h" #include "output_base.h" namespace libkineto { @@ -29,7 +29,7 @@ class Config; class ChromeTraceLogger : public libkineto::ActivityLogger { public: - explicit ChromeTraceLogger(const std::string& traceFileName, int smCount); + explicit ChromeTraceLogger(const std::string& traceFileName); // Note: the caller of these functions should handle concurrency // i.e., we these functions are not thread-safe @@ -41,14 +41,7 @@ class ChromeTraceLogger : public libkineto::ActivityLogger { void handleTraceSpan(const TraceSpan& span) override; - void handleIterationStart(const TraceSpan& span) override; - - void handleCpuActivity( - const libkineto::ClientTraceActivity& activity, - const TraceSpan& span) override; - - void handleGenericActivity( - const GenericTraceActivity& activity) override; + void handleGenericActivity(const GenericTraceActivity& activity) override; #ifdef HAS_CUPTI void handleRuntimeActivity( @@ -60,11 +53,18 @@ class ChromeTraceLogger : public libkineto::ActivityLogger { void handleGpuActivity(const GpuActivity& activity) override; #endif // HAS_CUPTI + void handleTraceStart( + const std::unordered_map& metadata) override; + void finalizeTrace( const Config& config, std::unique_ptr buffers, int64_t endTime) override; + std::string traceFileName() const { + return fileName_; + } + private: #ifdef HAS_CUPTI @@ -73,16 +73,14 @@ class ChromeTraceLogger : public libkineto::ActivityLogger { void handleLinkEnd(const TraceActivity& e); #endif // HAS_CUPTI - std::string fileName_; - std::ofstream traceOf_; + void addIterationMarker(const TraceSpan& span); - // Cache pid to avoid repeated calls to getpid() - pid_t pid_; + void openTraceFile(); -#ifdef HAS_CUPTI - // Number of SMs on current device - int smCount_{0}; -#endif + void handleGenericInstantEvent(const GenericTraceActivity& op); + + std::string fileName_; + std::ofstream traceOf_; }; } // namespace KINETO_NAMESPACE diff --git a/libkineto/src/output_membuf.h b/libkineto/src/output_membuf.h index 3e3c76e64..5c7ed36cd 100644 --- a/libkineto/src/output_membuf.h +++ b/libkineto/src/output_membuf.h @@ -17,7 +17,7 @@ #endif #include "Config.h" -#include "ClientTraceActivity.h" +#include "GenericTraceActivity.h" #ifdef HAS_CUPTI #include "CuptiActivity.h" #include "CuptiActivity.tpp" @@ -47,22 +47,10 @@ class MemoryTraceLogger : public ActivityLogger { } void handleTraceSpan(const TraceSpan& span) override { - traceSpanList_.push_back(span); + // Handled separately } - void handleIterationStart(const TraceSpan& span) override { - iterationList_.push_back(span); - } - - void handleCpuActivity( - const libkineto::ClientTraceActivity& activity, - const TraceSpan& span) override { - activities_.push_back( - std::make_unique(activity, span)); - } - - void handleGenericActivity( - const GenericTraceActivity& activity) override { + void handleGenericActivity(const GenericTraceActivity& activity) override { activities_.push_back( std::make_unique(activity)); } @@ -87,6 +75,11 @@ class MemoryTraceLogger : public ActivityLogger { } #endif // HAS_CUPTI + void handleTraceStart( + const std::unordered_map& metadata) override { + metadata_ = metadata; + } + void finalizeTrace( const Config& config, std::unique_ptr buffers, @@ -100,6 +93,7 @@ class MemoryTraceLogger : public ActivityLogger { } void log(ActivityLogger& logger) { + logger.handleTraceStart(metadata_); for (auto& activity : activities_) { activity->log(logger); } @@ -109,11 +103,8 @@ class MemoryTraceLogger : public ActivityLogger { for (auto& p : threadInfoList_) { logger.handleThreadInfo(p.first, p.second); } - for (auto& span : traceSpanList_) { - logger.handleTraceSpan(span); - } - for (auto& it : iterationList_) { - logger.handleIterationStart(it); + for (auto& cpu_trace_buffer : buffers_->cpu) { + logger.handleTraceSpan(cpu_trace_buffer->span); } // Hold on to the buffers logger.finalizeTrace(*config_, nullptr, endTime_); @@ -121,36 +112,13 @@ class MemoryTraceLogger : public ActivityLogger { private: - struct CpuActivityDecorator : public libkineto::TraceActivity { - CpuActivityDecorator( - const libkineto::ClientTraceActivity& activity, - const TraceSpan& span) - : wrappee_(activity), span_(span) {} - int64_t deviceId() const override {return wrappee_.deviceId();} - int64_t resourceId() const override {return wrappee_.resourceId();} - int64_t timestamp() const override {return wrappee_.timestamp();} - int64_t duration() const override {return wrappee_.duration();} - int64_t correlationId() const override {return wrappee_.correlationId();} - ActivityType type() const override {return wrappee_.type();} - const std::string name() const override {return wrappee_.name();} - const TraceActivity* linkedActivity() const override { - return wrappee_.linkedActivity(); - } - void log(ActivityLogger& logger) const override { - logger.handleCpuActivity(wrappee_, span_); - } - const libkineto::ClientTraceActivity& wrappee_; - const TraceSpan span_; - }; - std::unique_ptr config_; // Optimization: Remove unique_ptr by keeping separate vector per type std::vector> activities_; std::vector> processInfoList_; std::vector> threadInfoList_; - std::vector traceSpanList_; - std::vector iterationList_; std::unique_ptr buffers_; + std::unordered_map metadata_; int64_t endTime_{0}; }; diff --git a/libkineto/test/ActivityProfilerTest.cpp b/libkineto/test/ActivityProfilerTest.cpp index b99d4ef0f..053c4e610 100644 --- a/libkineto/test/ActivityProfilerTest.cpp +++ b/libkineto/test/ActivityProfilerTest.cpp @@ -5,16 +5,19 @@ * LICENSE file in the root directory of this source tree. */ -#include #include #include #include #include -#include -#include #include #include +#ifdef __linux__ +#include +#include +#include +#endif + #include "include/libkineto.h" #include "src/ActivityProfiler.h" #include "src/ActivityTrace.h" @@ -25,6 +28,7 @@ #include "src/output_membuf.h" #include "src/Logger.h" +#include "test/MockActivitySubProfiler.h" using namespace std::chrono; using namespace KINETO_NAMESPACE; @@ -32,22 +36,26 @@ using namespace KINETO_NAMESPACE; #define CUDA_LAUNCH_KERNEL CUPTI_RUNTIME_TRACE_CBID_cudaLaunchKernel_v7000 #define CUDA_MEMCPY CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy_v3020 +namespace { +const TraceSpan& defaultTraceSpan() { + static TraceSpan span(0, 0, "Unknown", ""); + return span; +} +} + // Provides ability to easily create a few test CPU-side ops struct MockCpuActivityBuffer : public CpuTraceBuffer { MockCpuActivityBuffer(int64_t startTime, int64_t endTime) { - span = {startTime, endTime, 0, 1, "Test trace", ""}; + span = TraceSpan(startTime, endTime,"Test trace"); gpuOpCount = 0; } void addOp(std::string name, int64_t startTime, int64_t endTime, int64_t correlation) { - ClientTraceActivity op; - op.opType = name; + GenericTraceActivity op(span, ActivityType::CPU_OP, name); op.startTime = startTime; op.endTime = endTime; - op.device = 0; - op.pthreadId = pthread_self(); - op.sysThreadId = 123; - op.correlation = correlation; + op.resource = systemThreadId(); + op.id = correlation; activities.push_back(std::move(op)); span.opCount++; } @@ -71,7 +79,7 @@ struct MockCuptiActivityBuffer { start_us, end_us, correlation); act.kind = CUPTI_ACTIVITY_KIND_RUNTIME; act.cbid = cbid; - act.threadId = pthread_self(); + act.threadId = threadId(); activities.push_back(reinterpret_cast(&act)); } @@ -129,7 +137,7 @@ class MockCuptiActivities : public CuptiActivityInterface { } virtual const std::pair processActivities( - std::list& /*unused*/, + CuptiActivityBufferMap&, /*unused*/ std::function handler) override { for (CUpti_Activity* act : activityBuffer->activities) { handler(act); @@ -137,11 +145,13 @@ class MockCuptiActivities : public CuptiActivityInterface { return {activityBuffer->activities.size(), 100}; } - virtual std::unique_ptr> + virtual std::unique_ptr activityBuffers() override { - auto list = std::make_unique>(); - list->emplace_back(nullptr, 100); - return list; + auto map = std::make_unique(); + auto buf = std::make_unique(100); + uint8_t* addr = buf->data(); + (*map)[addr] = std::move(buf); + return map; } void bufferRequestedOverride(uint8_t** buffer, size_t* size, size_t* maxNumRecords) { @@ -159,17 +169,16 @@ class ActivityProfilerTest : public ::testing::Test { profiler_ = std::make_unique( cuptiActivities_, /*cpu only*/ false); cfg_ = std::make_unique(); - } - - std::list createCuptiActivityBuffers() { - std::list res; - res.emplace_back(nullptr, 100); - return res; + cfg_->validate(); + loggerFactory.addProtocol("file", [](const std::string& url) { + return std::unique_ptr(new ChromeTraceLogger(url)); + }); } std::unique_ptr cfg_; MockCuptiActivities cuptiActivities_; std::unique_ptr profiler_; + ActivityLoggerFactory loggerFactory; }; @@ -194,7 +203,7 @@ TEST(ActivityProfiler, AsyncTrace) { EXPECT_TRUE(success); EXPECT_FALSE(profiler.isActive()); - auto logger = std::make_unique(cfg.activitiesLogFile(), 10); + auto logger = std::make_unique(cfg.activitiesLogFile()); auto now = system_clock::now(); profiler.configure(cfg, now); profiler.setLogger(logger.get()); @@ -222,6 +231,7 @@ TEST(ActivityProfiler, AsyncTrace) { // Assert that tracing has completed EXPECT_FALSE(profiler.isActive()); +#ifdef __linux__ // Check that the expected file was written and that it has some content int fd = open(filename, O_RDONLY); if (!fd) { @@ -232,6 +242,7 @@ TEST(ActivityProfiler, AsyncTrace) { struct stat buf{}; fstat(fd, &buf); EXPECT_GT(buf.st_size, 100); +#endif } @@ -253,6 +264,8 @@ TEST_F(ActivityProfilerTest, SyncTrace) { profiler.startTrace(start_time); profiler.stopTrace(start_time + microseconds(duration_us)); + profiler.recordThreadInfo(); + // Log some cpu ops auto cpuOps = std::make_unique( start_time_us, start_time_us + duration_us); @@ -279,7 +292,7 @@ TEST_F(ActivityProfilerTest, SyncTrace) { profiler_->reset(); // Wrapper that allows iterating over the activities - ActivityTrace trace(std::move(logger), cuptiActivities_); + ActivityTrace trace(std::move(logger), loggerFactory); EXPECT_EQ(trace.activities()->size(), 9); std::map activityCounts; std::map resourceIds; @@ -298,12 +311,14 @@ TEST_F(ActivityProfilerTest, SyncTrace) { EXPECT_EQ(activityCounts["kernel"], 2); EXPECT_EQ(activityCounts["Memcpy HtoD (Pinned -> Device)"], 1); - // Ops and runtime events are on thread 123 - EXPECT_EQ(resourceIds[123], 6); + auto sysTid = systemThreadId(); + // Ops and runtime events are on thread sysTid + EXPECT_EQ(resourceIds[sysTid], 6); // Kernels are on stream 1, memcpy on stream 2 EXPECT_EQ(resourceIds[1], 2); EXPECT_EQ(resourceIds[2], 1); +#ifdef __linux__ char filename[] = "/tmp/libkineto_testXXXXXX.json"; mkstemps(filename, 5); trace.save(filename); @@ -317,6 +332,7 @@ TEST_F(ActivityProfilerTest, SyncTrace) { struct stat buf{}; fstat(fd, &buf); EXPECT_GT(buf.st_size, 100); +#endif } TEST_F(ActivityProfilerTest, CorrelatedTimestampTest) { @@ -338,6 +354,8 @@ TEST_F(ActivityProfilerTest, CorrelatedTimestampTest) { // When launching kernel, the CPU event should always precede the GPU event. int64_t kernelLaunchTime = 120; + profiler.recordThreadInfo(); + // set up CPU event auto cpuOps = std::make_unique( start_time_us, start_time_us + duration_us); @@ -354,7 +372,7 @@ TEST_F(ActivityProfilerTest, CorrelatedTimestampTest) { auto logger = std::make_unique(*cfg_); profiler.processTrace(*logger); - ActivityTrace trace(std::move(logger), cuptiActivities_); + ActivityTrace trace(std::move(logger), loggerFactory); std::map counts; for (auto& activity : *trace.activities()) { counts[activity->name()]++; @@ -365,6 +383,78 @@ TEST_F(ActivityProfilerTest, CorrelatedTimestampTest) { EXPECT_EQ(counts["launchKernel"], 1); } +TEST_F(ActivityProfilerTest, SubActivityProfilers) { + using ::testing::Return; + using ::testing::ByMove; + + // Verbose logging is useful for debugging + std::vector log_modules( + {"ActivityProfiler.cpp"}); + SET_LOG_VERBOSITY_LEVEL(2, log_modules); + + // Setup example events to test + GenericTraceActivity ev{defaultTraceSpan(), ActivityType::GLOW_RUNTIME, ""}; + ev.device = 1; + ev.resource = 0; + + int64_t start_time_us = 100; + int64_t duration_us = 1000; + auto start_time = time_point(microseconds(start_time_us)); + + std::vector test_activities{3, ev}; + test_activities[0].startTime = start_time_us; + test_activities[0].endTime = start_time_us + 5000; + test_activities[0].activityName = "SubGraph A execution"; + test_activities[1].startTime = start_time_us; + test_activities[1].endTime = start_time_us + 2000; + test_activities[1].activityName = "Operator foo"; + test_activities[2].startTime = start_time_us + 2500; + test_activities[2].endTime = start_time_us + 2900; + test_activities[2].activityName = "Operator bar"; + + auto mock_activity_profiler = + std::make_shared(test_activities); + + MockCuptiActivities activities; + ActivityProfiler profiler(activities, /*cpu only*/ true); + profiler.addActivityProfiler(mock_activity_profiler); + + profiler.configure(*cfg_, start_time); + profiler.startTrace(start_time); + EXPECT_TRUE(profiler.isActive()); + + profiler.stopTrace(start_time + microseconds(duration_us)); + EXPECT_TRUE(profiler.isActive()); + + char filename[] = "/tmp/libkineto_testXXXXXX.json"; + mkstemps(filename, 5); + LOG(INFO) << "Logging to tmp file " << filename; + + // process trace + auto logger = std::make_unique(*cfg_); + profiler.processTrace(*logger); + profiler.setLogger(logger.get()); + + ActivityTrace trace(std::move(logger), loggerFactory); + trace.save(filename); + const auto& traced_activites = trace.activities(); + + // Test we have all the events + EXPECT_EQ(traced_activites->size(), test_activities.size()); + + // Check that the expected file was written and that it has some content + int fd = open(filename, O_RDONLY); + if (!fd) { + perror(filename); + } + EXPECT_TRUE(fd); + + // Should expect at least 100 bytes + struct stat buf{}; + fstat(fd, &buf); + EXPECT_GT(buf.st_size, 100); +} + TEST_F(ActivityProfilerTest, BufferSizeLimitTestWarmup) { ActivityProfiler profiler(cuptiActivities_, /*cpu only*/ false); @@ -383,9 +473,6 @@ TEST_F(ActivityProfilerTest, BufferSizeLimitTestWarmup) { size_t gpuBufferSize; size_t maxNumRecords; cuptiActivities_.bufferRequestedOverride(&buf, &gpuBufferSize, &maxNumRecords); - - // we don't actually do anything with the buf so just free it to prevent leaks in tests - free(buf); } profiler.performRunLoopStep(now, now); diff --git a/libkineto/test/ConfigTest.cpp b/libkineto/test/ConfigTest.cpp index e0c469b08..81f09cb25 100644 --- a/libkineto/test/ConfigTest.cpp +++ b/libkineto/test/ConfigTest.cpp @@ -72,12 +72,9 @@ TEST(ParseTest, Format) { TEST(ParseTest, DefaultActivityTypes) { Config cfg; cfg.validate(); + auto all_activities = activityTypes(); EXPECT_EQ(cfg.selectedActivityTypes(), - std::set({ActivityType::GPU_MEMCPY, - ActivityType::GPU_MEMSET, - ActivityType::CONCURRENT_KERNEL, - ActivityType::EXTERNAL_CORRELATION, - ActivityType::CUDA_RUNTIME})); + std::set(all_activities.begin(), all_activities.end())); } TEST(ParseTest, ActivityTypes) { @@ -87,14 +84,19 @@ TEST(ParseTest, ActivityTypes) { EXPECT_FALSE(cfg.parse("=ACTIVITY_TYPES=")); EXPECT_EQ(cfg.selectedActivityTypes(), - std::set({ActivityType::GPU_MEMCPY, + std::set({ActivityType::CPU_OP, + ActivityType::CPU_INSTANT_EVENT, + ActivityType::USER_ANNOTATION, + ActivityType::GPU_USER_ANNOTATION, + ActivityType::GPU_MEMCPY, ActivityType::GPU_MEMSET, ActivityType::CONCURRENT_KERNEL, ActivityType::EXTERNAL_CORRELATION, + ActivityType::GLOW_RUNTIME, ActivityType::CUDA_RUNTIME})); Config cfg2; - EXPECT_TRUE(cfg2.parse("ACTIVITY_TYPES=gpu_memcpy,gpu_MeMsEt,concurrent_kernel")); + EXPECT_TRUE(cfg2.parse("ACTIVITY_TYPES=gpu_memcpy,gpu_MeMsEt,kernel")); EXPECT_EQ(cfg2.selectedActivityTypes(), std::set({ActivityType::GPU_MEMCPY, ActivityType::GPU_MEMSET, @@ -102,21 +104,14 @@ TEST(ParseTest, ActivityTypes) { EXPECT_TRUE(cfg2.parse("ACTIVITY_TYPES = cuda_Runtime,")); EXPECT_EQ(cfg2.selectedActivityTypes(), - std::set({ActivityType::GPU_MEMCPY, - ActivityType::GPU_MEMSET, - ActivityType::CUDA_RUNTIME, - ActivityType::CONCURRENT_KERNEL})); + std::set({ActivityType::CUDA_RUNTIME})); // Should throw an exception because incorrect activity name EXPECT_FALSE(cfg2.parse("ACTIVITY_TYPES = memcopy,cuda_runtime")); - EXPECT_TRUE(cfg2.parse("ACTIVITY_TYPES = external_correlation")); + EXPECT_TRUE(cfg2.parse("ACTIVITY_TYPES = cpu_op")); EXPECT_EQ(cfg2.selectedActivityTypes(), - std::set({ActivityType::GPU_MEMCPY, - ActivityType::GPU_MEMSET, - ActivityType::CONCURRENT_KERNEL, - ActivityType::EXTERNAL_CORRELATION, - ActivityType::CUDA_RUNTIME})); + std::set({ActivityType::CPU_OP})); } TEST(ParseTest, SamplePeriod) { diff --git a/libkineto/test/EventProfilerTest.cpp b/libkineto/test/EventProfilerTest.cpp index ef3f46142..ca1f20087 100644 --- a/libkineto/test/EventProfilerTest.cpp +++ b/libkineto/test/EventProfilerTest.cpp @@ -59,7 +59,7 @@ TEST(PercentileTest, Normalize) { TEST(EventTest, SumSamples) { Event ev; ev.instanceCount = 4; - auto t = high_resolution_clock::now(); + auto t = system_clock::now(); ev.addSample(t, {1, 2, 3, 4}); ev.addSample(t, {10, 20, 30, 40}); ev.addSample(t, {100, 200, 300, 400}); @@ -94,7 +94,7 @@ TEST(EventTest, SumSamples) { TEST(EventTest, Percentiles) { Event ev; ev.instanceCount = 4; - auto t = high_resolution_clock::now(); + auto t = system_clock::now(); ev.addSample(t, {3, 2, 1, 4}); ev.addSample(t, {30, 20, 10, 40}); ev.addSample(t, {300, 200, 100, 400}); diff --git a/libkineto/test/MockActivitySubProfiler.cpp b/libkineto/test/MockActivitySubProfiler.cpp new file mode 100644 index 000000000..cecae5cee --- /dev/null +++ b/libkineto/test/MockActivitySubProfiler.cpp @@ -0,0 +1,54 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * All rights reserved. + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include + +#include "test/MockActivitySubProfiler.h" + +namespace libkineto { + +const std::set supported_activities {ActivityType::CPU_OP}; +const std::string profile_name{"MockProfiler"}; + +void MockProfilerSession::processTrace(ActivityLogger& logger) { + for (const auto& activity: activities()) { + activity.log(logger); + } +} + +const std::string& MockActivityProfiler::name() const { + return profile_name; +} + +const std::set& MockActivityProfiler::availableActivities() const { + return supported_activities; +} + +MockActivityProfiler::MockActivityProfiler( + std::vector& activities) : + test_activities_(activities) {}; + +std::unique_ptr MockActivityProfiler::configure( + const std::set& /*activity_types*/, + const std::string& /*config*/) { + auto session = std::make_unique(); + session->set_test_activities(std::move(test_activities_)); + return session; +}; + +std::unique_ptr MockActivityProfiler::configure( + int64_t /*ts_ms*/, + int64_t /*duration_ms*/, + const std::set& activity_types, + const std::string& config) { + return configure(activity_types, config); +}; + +} // namespace libkineto + diff --git a/libkineto/test/MockActivitySubProfiler.h b/libkineto/test/MockActivitySubProfiler.h new file mode 100644 index 000000000..10af468ab --- /dev/null +++ b/libkineto/test/MockActivitySubProfiler.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * All rights reserved. + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include +#include +#include + +#include "include/IActivityProfiler.h" + +namespace libkineto { + +class MockProfilerSession: public IActivityProfilerSession { + + public: + explicit MockProfilerSession() {} + + void start() override { + start_count++; + status_ = TraceStatus::RECORDING; + } + + void stop() override { + stop_count++; + status_ = TraceStatus::PROCESSING; + } + + std::vector& activities() override { + return test_activities_; + } + + std::vector errors() override { + return {}; + } + + void processTrace(ActivityLogger& logger) override; + + void set_test_activities(std::vector&& acs) { + test_activities_ = std::move(acs); + } + + int start_count = 0; + int stop_count = 0; + private: + std::vector test_activities_; +}; + + +class MockActivityProfiler: public IActivityProfiler { + + public: + explicit MockActivityProfiler(std::vector& activities); + + const std::string& name() const override; + + const std::set& availableActivities() const override; + + std::unique_ptr configure( + const std::set& activity_types, + const std::string& config = "") override; + + std::unique_ptr configure( + int64_t ts_ms, + int64_t duration_ms, + const std::set& activity_types, + const std::string& config = "") override; + + private: + std::vector test_activities_; +}; + +} // namespace libkineto diff --git a/libkineto/test/PidInfoTest.cpp b/libkineto/test/PidInfoTest.cpp index c3a04342e..324b45e40 100644 --- a/libkineto/test/PidInfoTest.cpp +++ b/libkineto/test/PidInfoTest.cpp @@ -5,10 +5,9 @@ * LICENSE file in the root directory of this source tree. */ -#include "src/ThreadName.h" +#include "include/ThreadUtil.h" #include -#include #include #include @@ -18,54 +17,16 @@ using namespace KINETO_NAMESPACE; TEST(ThreadNameTest, setAndGet) { setThreadName("ThreadNameTest"); - EXPECT_EQ(getThreadName(getpid()), "ThreadNameTest"); + EXPECT_EQ(getThreadName(), "ThreadNameTest"); setThreadName(""); - EXPECT_EQ(getThreadName(getpid()), ""); + EXPECT_EQ(getThreadName(), ""); // Spaces etc are ok setThreadName("Name w/ spaces"); - EXPECT_EQ(getThreadName(getpid()), "Name w/ spaces"); + EXPECT_EQ(getThreadName(), "Name w/ spaces"); // More than 16 chars is not OK setThreadName("More than 16 characters"); - EXPECT_EQ(getThreadName(getpid()), "Name w/ spaces"); + EXPECT_EQ(getThreadName(), "Name w/ spaces"); } - -TEST(ThreadNameTest, invalidThread) { - EXPECT_EQ(getThreadName(123456789), "Unknown"); -} - -TEST(ThreadNameTest, otherThread) { - std::atomic_bool stop_flag; - std::atomic_int tid = 0; - std::thread thread([&stop_flag, &tid]() { - setThreadName("New Thread"); - tid = syscall(SYS_gettid); - while (!stop_flag) {} - }); - while (!tid) {} - EXPECT_EQ(getThreadName(tid), "New Thread"); - stop_flag = true; - thread.join(); -} - -TEST(ThreadNameTest, deadThread) { - std::atomic_bool stop_flag; - std::atomic_int tid = 0; - std::thread thread([&stop_flag, &tid]() { - setThreadName("New Thread"); - tid = syscall(SYS_gettid); - while (!stop_flag) {} - }); - while (!tid) {} - stop_flag = true; - thread.join(); - // There appears to be a delay before the thread info is - // removed from proc - we can therefore expect either - // "Unknown" or "New Thread" to be returned. - std::string name = getThreadName(tid); - EXPECT_TRUE(name == "Unknown" || name == "New Thread") - << "Where name = " << name; -} - diff --git a/tb_plugin/README.md b/tb_plugin/README.md index 051e910c9..e0fbdb2b0 100644 --- a/tb_plugin/README.md +++ b/tb_plugin/README.md @@ -1,7 +1,7 @@ # PyTorch Profiler TensorBoard Plugin -This is a plugin that provides visualization of PyTorch profiling. -It can parse, process and visualize the PyTorch Profiler's dumped result, +This is a Tensoboard Plugin that provides visualization of PyTorch profiling. +It can parse, process and visualize the PyTorch Profiler's dumped profiling result, and give optimization recommendations. ### Quick Installation Instructions @@ -10,12 +10,6 @@ and give optimization recommendations. `pip install torch-tb-profiler` -* Verify installation is complete - - `pip list | grep torch-tb-profiler` - - Should display "torch-tb-profiler" - * Or you can install from source Clone the git repository: @@ -24,27 +18,31 @@ and give optimization recommendations. Navigate to the kineto/tb_plugin directory. - Install the profiler: + Install with cmd: `pip install .` +* Build the wheel + - `python setup.py build_fe sdist bdist_wheel` \ + **_Note_**: the build_fe step need setup yarn and nodejs + - `python setup.py sdist bdist_wheel` + ### Quick Start Instructions * Prepare profiling data - You can download [kineto/tb_plugin/samples](https://github.com/pytorch/kineto/tree/master/tb_plugin/samples) - to your local and specify it as an example. - These profiling samples are produced by + We have prepared some sample profiling data at [kineto/tb_plugin/samples](https://github.com/pytorch/kineto/tree/master/tb_plugin/samples) + You can download it directly. + Or you can generate these profiling samples yourself by running [kineto/tb_plugin/examples/resnet50_profiler_api.py](https://github.com/pytorch/kineto/blob/master/tb_plugin/examples/resnet50_profiler_api.py). - You can learn how to profile your model from this example code - or learn from [PyTorch Profiler](https://pytorch.org/tutorials/recipes/recipes/profiler_recipe.html). + Also you can learn how to profile your model and generate profiling data from [PyTorch Profiler](https://pytorch.org/tutorials/intermediate/tensorboard_profiler_tutorial.html?highlight=tensorboard). Note: The recommended way to produce profiling data is assigning "torch.profiler.tensorboard_trace_handler" to "on_trace_ready" on creation of "torch.profiler.schedule". * Start TensorBoard - Specify your profiling data folder to "logdir". If you use the above samples data, start TensorBoard with: + Specify the profiling data folder to "logdir" in Tensorboard. If you use the above samples data, start TensorBoard with: `tensorboard --logdir=./samples` @@ -58,12 +56,45 @@ and give optimization recommendations. * Open TensorBoard in Chrome browser Open URL `http://localhost:6006` in the browser. + If you use '--bind_all' in tensorboard start cmd, the hostname may not be 'localhost'. You may find it in the log printed after the cmd. * Navigate to PYTORCH_PROFILER tab If the files under `--logdir` are too big or too many, please wait a while and refresh the browser to check latest loaded result. - +* Also support loading profiling data stored in AWS(S3://), Azure blob(https://\.blob.core.windows.net) and Google Cloud(GS://) + * S3: install boto3. set environment variables: `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`. Optionally, `S3_ENDPOINT` can be set as well.\ + For minio, the S3 url should start with the bucket name `s3:////` instead of minio prefix `s3://minio//`. At the same time, the `S3_ENDPOINT` is needed as well. \ + For example, the following command can be used to create minio storage after following guides: + * Server: https://docs.min.io/docs/minio-quickstart-guide.html + * MC Client: https://docs.min.io/docs/minio-client-quickstart-guide.html + + ```bash + ./mc alias set s3 http://10.150.148.189:9000 minioadmin minioadmin + ./mc mb s3/profiler --region=us-east-1 + ./mc cp ~/notebook/version_2 s3/profiler/ --recursive + export AWS_ACCESS_KEY_ID=minioadmin + export AWS_SECRET_ACCESS_KEY=minioadmin + export AWS_REGION=us-east-1 + export S3_USE_HTTPS=0 + export S3_VERIFY_SSL=0 + export S3_ENDPOINT=http://localhost:9000 + tensorboard --logdir=s3://profiler/version_2/ --bind_all + ``` + * Azure Blob: install azure-storage-blob. Optionally, set environment variable `AZURE_STORAGE_CONNECTION_STRING` + * Google Cloud: install google-cloud-storage. + --- + > **_NOTES:_** For AWS, Google Cloud and Azure Blob, the trace files need to be put on a top level folder under bucket/container. + --- + + We prepared some sample data in blob, you can also access it using cmd + + tensorboard --logdir=https://torchtbprofiler.blob.core.windows.net/torchtbprofiler/demo/ --bind_all + + and open tensorboard in browser to see all the views described below. + + Note: for accessing data in azure blob, you need to install torch-tb-profiler with cmd: `pip install torch-tb-profiler[blob]` + ### Quick Usage Instructions We regard each running with profiler enabled as a "run". @@ -76,20 +107,25 @@ The kineto/tb_plugin/samples is an example of how the files are organized. You can select the run and worker on the left control panel. -![Alt text](https://github.com/pytorch/kineto/blob/master/tb_plugin/docs/images/control_panel.PNG) +![Alt text](https://github.com/pytorch/kineto/blob/plugin/0.2/tb_plugin/docs/images/control_panel.PNG) -Runs: Select a run. Each run is a PyTorch workload with profiling enabled. - -Worker: Select a worker. Each worker is a process. There could be multiple workers when DPP is used. +Runs: Select a run. Each run is one execution of a PyTorch application with profiling enabled. Views: We organize the profiling result into multiple views, from coarse-grained (overview-level) to fine-grained (kernel-level). +Workers: Select a worker. Each worker is a process. There could be multiple workers when DDP is used. + +Span: There may be multiple profiling trace files of different spans to be generated when using [torch.profiler.schedule](https://github.com/pytorch/pytorch/blob/master/torch/profiler/profiler.py#L24) as schedule of torch.profiler. +You can select them with this selection box. + Currently we have the following performance diagnosis views: - Overall View - Operator View - Kernel View - Trace View +- Memory View +- Distributed View We describe each of these views below. @@ -100,9 +136,14 @@ It shows an overview of time cost, including both host and GPU devices. You can select the current worker in the left panel's "Workers" dropdown menu. An example of overall view: -![Alt text](https://github.com/pytorch/kineto/blob/master/tb_plugin/docs/images/overall_view.PNG) +![Alt text](https://github.com/pytorch/kineto/blob/plugin/0.2/tb_plugin/docs/images/overall_view.PNG) + +The 'GPU Summary' panel shows GPU information and usage metrics of this run, include name, global memory, compute capability of this GPU. +The 'GPU Utilization', 'Est. SM Efficiency' and 'Est. Achieved Occupancy' shows GPU usage efficiency of this run at different levels. +The detailed information about these three metrics can be found at [gpu_utilization](https://github.com/pytorch/kineto/blob/plugin/0.2/tb_plugin/docs/gpu_utilization.md). + -Step Time Breakdown: This shows the performance summary. We regard each iteration (usually a mini-batch) as a step. +The 'Step Time Breakdown' panel shows the performance summary. We regard each iteration (usually a mini-batch) as a step. The time spent on each step is broken down into multiple categories as follows: 1. Kernel: Kernels execution time on GPU device; @@ -111,14 +152,16 @@ The time spent on each step is broken down into multiple categories as follows: 3. Memset: GPU involved memory set time; -4. Runtime: CUDA runtime execution time on host side; +4. Communication: Communication time only appear in DDP case; + +5. Runtime: CUDA runtime execution time on host side; Such as cudaLaunchKernel, cudaMemcpyAsync, cudaStreamSynchronize, ... -5. DataLoader: The data loading time spent in PyTorch DataLoader object; +6. DataLoader: The data loading time spent in PyTorch DataLoader object; -6. CPU Exec: Host compute time, including every PyTorch operator running time; +7. CPU Exec: Host compute time, including every PyTorch operator running time; -7. Other: The time not included in any of the above. +8. Other: The time not included in any of the above. Note: The summary of all the above categories is end-to-end wall-clock time. @@ -132,7 +175,7 @@ Then "CPU Exec" is counted as 2-1=1 seconds, because the [2,3] interval is hidde In this way, summarization of all the 7 categories' counted time in a step will be the same with this step's total wall clock time. -![Alt text](https://github.com/pytorch/kineto/blob/master/tb_plugin/docs/images/time_breakdown_priority.PNG) +![Alt text](https://github.com/pytorch/kineto/blob/plugin/0.2/tb_plugin/docs/images/time_breakdown_priority.PNG) Performance Recommendation: Leverage the profiling result to automatically highlight likely bottlenecks, and give users actionable optimization suggestions. @@ -141,11 +184,11 @@ and give users actionable optimization suggestions. This view displays the performance of every PyTorch operator that is executed either on the host or device. -![Alt text](https://github.com/pytorch/kineto/blob/master/tb_plugin/docs/images/operator_view.PNG) +![Alt text](https://github.com/pytorch/kineto/blob/plugin/0.2/tb_plugin/docs/images/operator_view.PNG) Each table row is a PyTorch operator, which is a computation operator implemented by C++, such as “aten::relu_”, “aten::convolution”. -Calls: The operator's number of calls. +Calls: How many times the operator is called in this run. Device Self Duration: The accumulated time spent on GPU, not including this operator’s child operators. @@ -155,6 +198,12 @@ Host Self Duration: The accumulated time spent on Host, not including this opera Host Total Duration: The accumulated time spent on Host, including this operator’s child operators. +CallStack: All call stacks of this operator if it has been recorded in profiling trace file. + To dump this call stack information, you should set the 'with_stack' parameter in torch.profiler API. + The TensorBoard has integrated to VSCode, if you launch TensorBoard in VSCode, clicking this CallStack will forward to corresponding line of source code as below: + + ![Alt text](https://github.com/pytorch/kineto/blob/plugin/0.2/tb_plugin/docs/images/vscode_stack.PNG) + Note: Each above duration means wall-clock time. It doesn't mean the GPU or CPU during this period is fully utilized. The top 4 pie charts are visualizations of the above 4 columns of durations. @@ -172,31 +221,38 @@ means this operator has 9 input arguments, 2nd is a tensor of size 1024\*256\*1\*1, the following 7 ones are scalar variables. -![Alt text](https://github.com/pytorch/kineto/blob/master/tb_plugin/docs/images/operator_view_group_by_inputshape.PNG) +![Alt text](https://github.com/pytorch/kineto/blob/plugin/0.2/tb_plugin/docs/images/operator_view_group_by_inputshape.PNG) * Kernel View -This view shows all kernels’ time spent on GPU. -The time is calculated by subtracting the kernel's start time from the end time. + This view shows all kernels’ time spent on GPU. + The time is calculated by subtracting the kernel's start time from the end time. + + Note: This view does not include cudaMemcpy or cudaMemset. Because they are not kernels. + + ![Alt text](https://github.com/pytorch/kineto/blob/plugin/0.2/tb_plugin/docs/images/kernel_view.PNG) + + * Total Duration: The accumulated time of all calls of this kernel. -Note: This view does not include cudaMemcpy or cudaMemset. Because they are not kernels. + * Mean Duration: The average time duration of all calls. That's "Total Duration" divided by "Calls". -![Alt text](https://github.com/pytorch/kineto/blob/master/tb_plugin/docs/images/kernel_view.PNG) + * Max Duration: The maximum time duration among all calls. -Total Duration: The accumulated time of all calls of this kernel. + * Min Duration: The minimum time duration among all calls. -Mean Duration: The average time duration of all calls. That's "Total Duration" divided by "Calls". + Note: These duration only includes a kernel's elapsed time on GPU device. + It does not mean the GPU is fully busy executing instructions during this time interval. + Some of the GPU cores may be idle due to reasons such as memory access latency or insufficient parallelism. + For example, there may be insufficient number of available warps per SM for the GPU to effectively + hide memory access latencies, or some SMs may be entirely idle due to an insufficient number of blocks. + Please refer to [Nvidia's best-practices guide](https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html). + To investigate efficiency for each kernel, we calculate and show the 'Mean Blocks Per SM' and 'Mean Est. Achieved Occupancy' in the last two column. -Max Duration: The maximum time duration among all calls. + * Mean Blocks Per SM: Blocks per SM = Blocks of this kernel / SM number of this GPU. If this number is less than 1, it indicates the GPU multiprocessors are not fully utilized. “Mean Blocks per SM” is weighted average of all runs of this kernel name, using each run’s duration as weight. + + * Mean Est. Achieved Occupancy: The definition of Est. Achieved Occupancy can refer to [gpu_utilization](https://github.com/pytorch/kineto/blob/plugin/0.2/tb_plugin/docs/gpu_utilization.md), It is weighted average of all runs of this kernel name, using each run’s duration as weight. -Min Duration: The minimum time duration among all calls. -Note: This duration only includes a kernel's elapsed time on GPU device. -It does not mean the GPU is fully busy executing instructions during this time interval. -Some of the GPU cores may be idle due to reasons such as memory access latency or insufficient parallelism. -For example, there may be insufficient number of available warps per SM for the GPU to effectively -hide memory access latencies, or some SMs may be entirely idle due to an insufficient number of blocks. -Please refer to [Nvidia's best-practices guide](https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html). The top pie chart is a visualization of "Total Duration" column. It makes the breakdowns visible at a glance. @@ -213,13 +269,13 @@ This view shows timeline using the chrome tracing plugin. Each horizontal area r Each colored rectangle represents an operator, or a CUDA runtime, or a GPU op which executes on GPU (such as a kernel, a CUDA memory copy, a CUDA memory set, ...) -![Alt text](https://github.com/pytorch/kineto/blob/master/tb_plugin/docs/images/trace_view.PNG) +![Alt text](https://github.com/pytorch/kineto/blob/plugin/0.2/tb_plugin/docs/images/trace_view.PNG) In the above example: -The “thread 0” is the CPU thread that do “backward” of neural network. +The “thread 25772” is the CPU thread that do “backward” of neural network. -The “thread 1” is the main CPU thread, which mainly do data loading, forward of neural network, and model update. +The “thread 25738” is the main CPU thread, which mainly do data loading, forward of neural network, and model update. The “stream 7” is a CUDA stream, which shows all kernels of this stream. @@ -229,11 +285,76 @@ The suspended toolbar has functionalities to help view the trace line. For example, when the up-down arrow is enabled, you can zoom in by dragging the mouse up and keeping mouse's left button pushed down. -![Alt text](https://github.com/pytorch/kineto/blob/master/tb_plugin/docs/images/trace_view_one_step.PNG) +![Alt text](https://github.com/pytorch/kineto/blob/plugin/0.2/tb_plugin/docs/images/trace_view_one_step.PNG) The “Optimizer.step#SGD.step” and ”enumerate(DataLoader)#_SingleProcessDataLoaderIter.\__next\__” are high-level python side functions. When you select the top-right corner's “Flow events” to ”async”, you can see the relationship between an operator and its launched kernels. -![Alt text](https://github.com/pytorch/kineto/blob/master/tb_plugin/docs/images/trace_view_launch.PNG) +![Alt text](https://github.com/pytorch/kineto/blob/plugin/0.2/tb_plugin/docs/images/trace_view_launch.PNG) + +You can also view the gpu utilization and Est. SM Efficiency in the trace view. They are drawn alongside the timeline: + +![Alt text](https://github.com/pytorch/kineto/blob/plugin/0.2/tb_plugin/docs/images/trace_view_gpu_utilization.PNG) + +* Memory View + + Pytorch profiler records all memory allocation/release events during profiling. For each operator, the plugin aggregates all the events + inside its life span. + + ![Alt text](https://github.com/pytorch/kineto/blob/plugin/0.2/tb_plugin/docs/images/memory_view.PNG) + + The memory kind could be selected in “Device” selection box. For example, “GPU0” means the following table only shows each operator’s memory usage on GPU 0, not including CPU or other GPUs. + + Definition of each field in the table: + + + * Calls: How many times this operator is called. + + * Size Increase: The memory increase size include all children operators. It sums up all allocation bytes and minus all the memory release bytes. + + * Self Size Increase: The memory increase size associated with the operator itself excluding that of its children. It sums up all allocation bytes and minus all the memory release bytes. + + * Allocation Count: The allocation count including all children operators. + + * Self Allocation Count: The allocation count belonging to the operator itself excluding its chilren. + + * Allocation Size: The allocation size including all children operators. It sums up all allocation bytes without considering the memory free. + + * Self Allocation Size: The allocation size belonging to the operator itself. It sums up all allocation bytes without considering the memory free. + + +* Distributed View + + This view will appear automatically only for DDP jobs that use nccl for communication. + There are four panels in this view: + + ![Alt text](https://github.com/pytorch/kineto/blob/plugin/0.2/tb_plugin/docs/images/distributed_view.PNG) + + * The top panel shows the information about nodes/processes/GPU hierarchy of this job. + + * The left panel in the middle is 'Computation/Communication Overview'. Definition of each legend: + * Computation: the sum of kernel time on GPU minus the overlapping time + * Overlapping: the overlapping time of computation and communication. More overlapping represents better parallelism between computation and communication. Ideally the communication could be totally overlapped with computation. + * Communication: the total communication time minus the overlapping time + * Other: step time minus computation and communication time. Maybe includes initialization, data loader, CPU computation, and so on. + + From this view, you can know computation-to-communication ratio of each worker and load balance between workers. For example, if the computation + overlapping time of +one worker is much larger than others, there may be a problem of loading balance or this worker may be a straggler. + + * The right panel in the middle is 'Synchronizing/Communication Overview'. Definition of each legend: + * Data Transfer Time: part in the total communication time for actual data exchanging + * Synchronizing Time: part in the total communication time for waiting and synchronizing with other workers. + + From this view, you can know the efficiency of communication (how much ratio of total communication time is really used for exchanging data and how much is just waiting for data from other workers) + + * The 'Communication Operations Stats' summarizes the detailed statistics of all communication ops in each worker. Definition of each field: + * Calls: How many times this operator is called in this run. + * Total Size (bytes): Total data size transfered in operators of this type. + * Avg Size (bytes): Average data size transfered in each operator of this type. + * Total Latency (us): Total latency of all operators of this type. + * Avg Latency (us): Average latency of each operator of this type. + * Data Transfer Time (us): Total time actually used for data transfer in operator of this type. + * Ave Data Transfer Time (us): Average time actually used for data transfer in each operator of this type. + diff --git a/tb_plugin/docs/gpu_utilization.md b/tb_plugin/docs/gpu_utilization.md new file mode 100644 index 000000000..6b9c5e780 --- /dev/null +++ b/tb_plugin/docs/gpu_utilization.md @@ -0,0 +1,17 @@ +* GPU Utilization: GPU busy time / all steps time. The bigger, the better. All steps time is the total time of all profiler steps(or called as iterations). + GPU busy time is the time during “all steps time” when is at least one GPU kernel running on this GPU. + However, this high-level utilization metric is coarse. It can’t tell how many SMs(Stream Multiprocessors) are in use. + For example, a kernel with a single thread running continuously will get 100% GPU utilization. + +* Est. SM Efficiency: Estimated Stream Multiprocessor Efficiency. The bigger, the better. This metric of a kernel, SM_Eff_K = min(blocks of this kernel / SM number of this GPU, 100%). + This overall number is the sum of all kernels' SM_Eff_K weighted by kernel's execution duration, divided by “all steps time”. + It shows GPU Stream Multiprocessors’ utilization. + Although it is finer grained than above “GPU Utilization”, it still can’t tell the whole story. + For example, a kernel with only one thread per block can’t fully utilize each SM. + +* Est. Achieved Occupancy: The bigger, the better. The definition of occupancy is [here](https://docs.nvidia.com/gameworks/content/developertools/desktop/analysis/report/cudaexperiments/kernellevel/achievedoccupancy.htm). + Occupancy is the ratio of active warps on an SM to the maximum number of + active warps supported by the SM. The theoretical occupancy of a kernel is upper limit occupancy of this kernel, limited by multiple + factors such as kernel shape, kernel used resource, and the GPU compute capability. + Est. Achieved Occupancy of a kernel, OCC_K = min(threads of the kernel / SM number / max threads per SM, theoretical occupancy of the kernel). + This overall number is the weighted sum of all kernels OCC_K using kernel's execution duration as weight. It shows fine-grained low-level GPU utilization. diff --git a/tb_plugin/docs/images/control_panel.PNG b/tb_plugin/docs/images/control_panel.PNG index bc1a9e5ad..31bd12d9c 100644 Binary files a/tb_plugin/docs/images/control_panel.PNG and b/tb_plugin/docs/images/control_panel.PNG differ diff --git a/tb_plugin/docs/images/distributed_view.PNG b/tb_plugin/docs/images/distributed_view.PNG new file mode 100644 index 000000000..95bf38565 Binary files /dev/null and b/tb_plugin/docs/images/distributed_view.PNG differ diff --git a/tb_plugin/docs/images/kernel_view.PNG b/tb_plugin/docs/images/kernel_view.PNG index 573299158..01892ea17 100644 Binary files a/tb_plugin/docs/images/kernel_view.PNG and b/tb_plugin/docs/images/kernel_view.PNG differ diff --git a/tb_plugin/docs/images/memory_view.PNG b/tb_plugin/docs/images/memory_view.PNG new file mode 100644 index 000000000..9fd23b7c6 Binary files /dev/null and b/tb_plugin/docs/images/memory_view.PNG differ diff --git a/tb_plugin/docs/images/operator_view.PNG b/tb_plugin/docs/images/operator_view.PNG index 83307481f..2ed06b924 100644 Binary files a/tb_plugin/docs/images/operator_view.PNG and b/tb_plugin/docs/images/operator_view.PNG differ diff --git a/tb_plugin/docs/images/overall_view.PNG b/tb_plugin/docs/images/overall_view.PNG index f3a091a0d..6486e7e24 100644 Binary files a/tb_plugin/docs/images/overall_view.PNG and b/tb_plugin/docs/images/overall_view.PNG differ diff --git a/tb_plugin/docs/images/trace_view.PNG b/tb_plugin/docs/images/trace_view.PNG index 1e0b241c2..aa1ced947 100644 Binary files a/tb_plugin/docs/images/trace_view.PNG and b/tb_plugin/docs/images/trace_view.PNG differ diff --git a/tb_plugin/docs/images/trace_view_gpu_utilization.PNG b/tb_plugin/docs/images/trace_view_gpu_utilization.PNG new file mode 100644 index 000000000..4c8bbb0f5 Binary files /dev/null and b/tb_plugin/docs/images/trace_view_gpu_utilization.PNG differ diff --git a/tb_plugin/docs/images/trace_view_launch.PNG b/tb_plugin/docs/images/trace_view_launch.PNG index 3c302923f..ec37f3a84 100644 Binary files a/tb_plugin/docs/images/trace_view_launch.PNG and b/tb_plugin/docs/images/trace_view_launch.PNG differ diff --git a/tb_plugin/docs/images/trace_view_one_step.PNG b/tb_plugin/docs/images/trace_view_one_step.PNG index a7fe98aa5..49690e3f5 100644 Binary files a/tb_plugin/docs/images/trace_view_one_step.PNG and b/tb_plugin/docs/images/trace_view_one_step.PNG differ diff --git a/tb_plugin/docs/images/vscode_stack.PNG b/tb_plugin/docs/images/vscode_stack.PNG new file mode 100644 index 000000000..afb99f069 Binary files /dev/null and b/tb_plugin/docs/images/vscode_stack.PNG differ diff --git a/tb_plugin/examples/resnet50_profiler_api.py b/tb_plugin/examples/resnet50_profiler_api.py index e91764812..3ecf00186 100644 --- a/tb_plugin/examples/resnet50_profiler_api.py +++ b/tb_plugin/examples/resnet50_profiler_api.py @@ -19,7 +19,7 @@ trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=32, - shuffle=True, num_workers=0) + shuffle=True, num_workers=4) criterion = nn.CrossEntropyLoss().cuda() optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9) @@ -34,8 +34,10 @@ wait=2, warmup=3, active=6), - on_trace_ready=torch.profiler.tensorboard_trace_handler('./result'), - record_shapes=True + on_trace_ready=torch.profiler.tensorboard_trace_handler('./result', worker_name='worker0'), + record_shapes=True, + profile_memory=True, + with_stack=True ) as p: for step, data in enumerate(trainloader, 0): print("step:{}".format(step)) @@ -47,6 +49,6 @@ optimizer.zero_grad() loss.backward() optimizer.step() - if step + 1 >= 11: + if step + 1 >= 22: break p.step() diff --git a/tb_plugin/fe/README.md b/tb_plugin/fe/README.md index e6cff1b35..723f5cf79 100644 --- a/tb_plugin/fe/README.md +++ b/tb_plugin/fe/README.md @@ -3,7 +3,15 @@ ### Install & Build 1. install [Node.js](https://nodejs.org/) + * ```bash + curl -fsSL https://deb.nodesource.com/setup_16.x | sudo -E bash - + sudo apt-get install -y nodejs``` 2. install [Yarn](https://yarnpkg.com/) + * ```bash + curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add - + echo "deb https://dl.yarnpkg.com/debian/ stable main" | sudo tee /etc/apt/sources.list.d/yarn.list + sudo apt update && sudo apt install yarn + ``` 3. shell `yarn` 4. shell `yarn build` 5. `./dist/index.html` diff --git a/tb_plugin/fe/package.json b/tb_plugin/fe/package.json index edd31399e..16f3bef5c 100644 --- a/tb_plugin/fe/package.json +++ b/tb_plugin/fe/package.json @@ -14,6 +14,7 @@ "@babel/runtime": "^7.13.10", "@material-ui/core": "^4.11.3", "@material-ui/icons": "^4.11.2", + "antd": "^4.15.1", "clsx": "^1.1.1", "portable-fetch": "^3.0.0", "react": "^16.13.1", @@ -23,7 +24,7 @@ "@types/react": "^16.9.51", "@types/react-dom": "^16.9.8", "cross-env": "^7.0.2", - "css-loader": "^5.0.0", + "css-loader": "^5.2.4", "html-webpack-plugin": "^5.3.1", "inline-chunk-html-plugin": "^1.1.1", "prettier": "^2.1.2", @@ -35,6 +36,7 @@ "webpack-dev-server": "^3.11.2" }, "resolutions": { - "portable-fetch/**/node-fetch": "^2.6.1" + "portable-fetch/**/node-fetch": "^2.6.1", + "webpack/**/browserslist": "^4.16.5" } } diff --git a/tb_plugin/fe/prettier.json b/tb_plugin/fe/prettier.json index 7abc53cff..783a0a9c1 100644 --- a/tb_plugin/fe/prettier.json +++ b/tb_plugin/fe/prettier.json @@ -8,6 +8,6 @@ "useTabs": false, "trailingComma": "none", "proseWrap": "always", - "endOfLine": "crlf" + "endOfLine": "lf" } diff --git a/tb_plugin/fe/scripts/add_header.py b/tb_plugin/fe/scripts/add_header.py new file mode 100755 index 000000000..15d8524e1 --- /dev/null +++ b/tb_plugin/fe/scripts/add_header.py @@ -0,0 +1,30 @@ +#!/usr/bin/env python +import glob +import os +import sys + +HEADER='''/*--------------------------------------------------------------------------------------------- + * Copyright (c) Microsoft Corporation. All rights reserved. + *--------------------------------------------------------------------------------------------*/ + +''' + +def add_header(file): + with open(file, 'r') as f: + contents = f.readlines() + + # do nothing if there is already header + if contents and contents[0].startswith("/*-"): + return + + with open(file, 'w') as out: + out.write(HEADER) + out.writelines(contents) + +if __name__ == '__main__': + dir = sys.argv[1] + if not os.path.isdir(dir): + raise ValueError("{} is not a directory".format(dir)) + + for file in glob.glob(dir + "/*.ts"): + add_header(file) diff --git a/tb_plugin/fe/src/api/README.md b/tb_plugin/fe/src/api/README.md new file mode 100644 index 000000000..a68dc9223 --- /dev/null +++ b/tb_plugin/fe/src/api/README.md @@ -0,0 +1,13 @@ +# How to generate the api.ts + +## Prerequisites +1. install java +2. run command +```bash + cd fe + wget https://repo1.maven.org/maven2/io/swagger/codegen/v3/swagger-codegen-cli/3.0.25/swagger-codegen-cli-3.0.25.jar -O swagger-codegen-cli.jar + java -jar swagger-codegen-cli.jar generate -i ./src/api/openapi.yaml -l typescript-fetch -o ./src/api/generated/ + rm ./src/api/generated/api_test.spec.ts + yarn prettier --end-of-line lf + python ./scripts/add_header.py ./src/api/generated/ +``` diff --git a/tb_plugin/fe/src/api/generated/api.ts b/tb_plugin/fe/src/api/generated/api.ts index b1695a871..67ba53017 100644 --- a/tb_plugin/fe/src/api/generated/api.ts +++ b/tb_plugin/fe/src/api/generated/api.ts @@ -20,7 +20,7 @@ import * as url from 'url' import * as portableFetch from 'portable-fetch' import { Configuration } from './configuration' -const BASE_PATH = '/data/plugin/pytorch_profiler'.replace(/\/+$/, '') +const BASE_PATH = '.'.replace(/\/+$/, '') /** * @@ -85,6 +85,111 @@ export class RequiredError extends Error { } } +/** + * + * @export + * @interface CallStackTableData + */ +export interface CallStackTableData extends Array {} +/** + * + * @export + * @interface CallStackTableDataInner + */ +export interface CallStackTableDataInner { + /** + * + * @type {string} + * @memberof CallStackTableDataInner + */ + name: string + /** + * + * @type {string} + * @memberof CallStackTableDataInner + */ + inputShape?: string + /** + * + * @type {number} + * @memberof CallStackTableDataInner + */ + calls: number + /** + * + * @type {number} + * @memberof CallStackTableDataInner + */ + deviceSelfDuration?: number + /** + * + * @type {number} + * @memberof CallStackTableDataInner + */ + deviceTotalDuration?: number + /** + * + * @type {number} + * @memberof CallStackTableDataInner + */ + hostSelfDuration: number + /** + * + * @type {number} + * @memberof CallStackTableDataInner + */ + hostTotalDuration: number + /** + * + * @type {string} + * @memberof CallStackTableDataInner + */ + callStack?: string +} +/** + * + * @export + * @interface DistributedGraph + */ +export interface DistributedGraph { + /** + * + * @type {DistributedGraphMetadata} + * @memberof DistributedGraph + */ + metadata: DistributedGraphMetadata + /** + * + * @type {any} + * @memberof DistributedGraph + */ + data: any +} +/** + * + * @export + * @interface DistributedGraphMetadata + */ +export interface DistributedGraphMetadata { + /** + * + * @type {string} + * @memberof DistributedGraphMetadata + */ + title: string + /** + * + * @type {Array} + * @memberof DistributedGraphMetadata + */ + legends: Array + /** + * + * @type {string} + * @memberof DistributedGraphMetadata + */ + units: string +} /** * * @export @@ -104,6 +209,76 @@ export interface Environment { */ value: string } +/** + * + * @export + * @interface GpuInfo + */ +export interface GpuInfo { + /** + * + * @type {GpuInfoMetadata} + * @memberof GpuInfo + */ + metadata: GpuInfoMetadata + /** + * + * @type {any} + * @memberof GpuInfo + */ + data: any +} +/** + * + * @export + * @interface GpuInfoMetadata + */ +export interface GpuInfoMetadata { + /** + * + * @type {string} + * @memberof GpuInfoMetadata + */ + title: string +} +/** + * + * @export + * @interface GpuMetric + */ +export interface GpuMetric { + /** + * + * @type {string} + * @memberof GpuMetric + */ + title: string + /** + * + * @type {string} + * @memberof GpuMetric + */ + value: string +} +/** + * + * @export + * @interface GpuMetrics + */ +export interface GpuMetrics { + /** + * + * @type {Array} + * @memberof GpuMetrics + */ + data: Array + /** + * + * @type {string} + * @memberof GpuMetrics + */ + tooltip: string +} /** * * @export @@ -173,6 +348,25 @@ export interface GraphColumnP { */ html?: boolean } +/** + * + * @export + * @interface InlineResponse200 + */ +export interface InlineResponse200 { + /** + * + * @type {GpuInfoMetadata} + * @memberof InlineResponse200 + */ + metadata: GpuInfoMetadata + /** + * + * @type {any} + * @memberof InlineResponse200 + */ + data: any +} /** * * @export @@ -186,6 +380,117 @@ export interface KernelGraph { */ total: Graph } +/** + * + * @export + * @interface MemoryData + */ +export interface MemoryData { + /** + * + * @type {MemoryTableMetadata} + * @memberof MemoryData + */ + metadata: MemoryTableMetadata + /** + * + * @type {any} + * @memberof MemoryData + */ + data: any +} +/** + * + * @export + * @interface MemoryTableMetadata + */ +export interface MemoryTableMetadata { + /** + * + * @type {string} + * @memberof MemoryTableMetadata + */ + title: string + /** + * + * @type {string} + * @memberof MemoryTableMetadata + */ + defaultDevice: string + /** + * + * @type {string} + * @memberof MemoryTableMetadata + */ + search: string + /** + * + * @type {string} + * @memberof MemoryTableMetadata + */ + sort: string +} +/** + * + * @export + * @interface OperationTableData + */ +export interface OperationTableData extends Array {} +/** + * + * @export + * @interface OperationTableDataInner + */ +export interface OperationTableDataInner { + /** + * + * @type {string} + * @memberof OperationTableDataInner + */ + name: string + /** + * + * @type {string} + * @memberof OperationTableDataInner + */ + inputShape?: string + /** + * + * @type {number} + * @memberof OperationTableDataInner + */ + calls: number + /** + * + * @type {number} + * @memberof OperationTableDataInner + */ + deviceSelfDuration?: number + /** + * + * @type {number} + * @memberof OperationTableDataInner + */ + deviceTotalDuration?: number + /** + * + * @type {number} + * @memberof OperationTableDataInner + */ + hostSelfDuration: number + /** + * + * @type {number} + * @memberof OperationTableDataInner + */ + hostTotalDuration: number + /** + * + * @type {boolean} + * @memberof OperationTableDataInner + */ + hasCallStack: boolean +} /** * * @export @@ -247,6 +552,12 @@ export interface Overview { * @memberof Overview */ recommendations: string + /** + * + * @type {GpuMetrics} + * @memberof Overview + */ + gpuMetrics?: GpuMetrics } /** * @@ -329,47 +640,38 @@ export const DefaultApiFetchParamCreator = function ( * * @param {string} run * @param {string} worker - * @param {string} view - * @param {string} groupBy Group By + * @param {string} span * @param {*} [options] Override http request option. * @throws {RequiredError} */ - kernelGet( + distributedCommopsGet( run: string, worker: string, - view: string, - groupBy: string, + span: string, options: any = {} ): FetchArgs { // verify required parameter 'run' is not null or undefined if (run === null || run === undefined) { throw new RequiredError( 'run', - 'Required parameter run was null or undefined when calling kernelGet.' + 'Required parameter run was null or undefined when calling distributedCommopsGet.' ) } // verify required parameter 'worker' is not null or undefined if (worker === null || worker === undefined) { throw new RequiredError( 'worker', - 'Required parameter worker was null or undefined when calling kernelGet.' - ) - } - // verify required parameter 'view' is not null or undefined - if (view === null || view === undefined) { - throw new RequiredError( - 'view', - 'Required parameter view was null or undefined when calling kernelGet.' + 'Required parameter worker was null or undefined when calling distributedCommopsGet.' ) } - // verify required parameter 'groupBy' is not null or undefined - if (groupBy === null || groupBy === undefined) { + // verify required parameter 'span' is not null or undefined + if (span === null || span === undefined) { throw new RequiredError( - 'groupBy', - 'Required parameter groupBy was null or undefined when calling kernelGet.' + 'span', + 'Required parameter span was null or undefined when calling distributedCommopsGet.' ) } - const localVarPath = `/kernel` + const localVarPath = `/distributed/commops` const localVarUrlObj = url.parse(localVarPath, true) const localVarRequestOptions = Object.assign({ method: 'GET' }, options) const localVarHeaderParameter = {} as any @@ -383,12 +685,8 @@ export const DefaultApiFetchParamCreator = function ( localVarQueryParameter['worker'] = worker } - if (view !== undefined) { - localVarQueryParameter['view'] = view - } - - if (groupBy !== undefined) { - localVarQueryParameter['group_by'] = groupBy + if (span !== undefined) { + localVarQueryParameter['span'] = span } localVarUrlObj.query = Object.assign( @@ -414,40 +712,38 @@ export const DefaultApiFetchParamCreator = function ( * * @param {string} run * @param {string} worker - * @param {string} view - * @param {string} [groupBy] Group By + * @param {string} span * @param {*} [options] Override http request option. * @throws {RequiredError} */ - kernelTableGet( + distributedGpuinfoGet( run: string, worker: string, - view: string, - groupBy?: string, + span: string, options: any = {} ): FetchArgs { // verify required parameter 'run' is not null or undefined if (run === null || run === undefined) { throw new RequiredError( 'run', - 'Required parameter run was null or undefined when calling kernelTableGet.' + 'Required parameter run was null or undefined when calling distributedGpuinfoGet.' ) } // verify required parameter 'worker' is not null or undefined if (worker === null || worker === undefined) { throw new RequiredError( 'worker', - 'Required parameter worker was null or undefined when calling kernelTableGet.' + 'Required parameter worker was null or undefined when calling distributedGpuinfoGet.' ) } - // verify required parameter 'view' is not null or undefined - if (view === null || view === undefined) { + // verify required parameter 'span' is not null or undefined + if (span === null || span === undefined) { throw new RequiredError( - 'view', - 'Required parameter view was null or undefined when calling kernelTableGet.' + 'span', + 'Required parameter span was null or undefined when calling distributedGpuinfoGet.' ) } - const localVarPath = `/kernel/table` + const localVarPath = `/distributed/gpuinfo` const localVarUrlObj = url.parse(localVarPath, true) const localVarRequestOptions = Object.assign({ method: 'GET' }, options) const localVarHeaderParameter = {} as any @@ -461,12 +757,8 @@ export const DefaultApiFetchParamCreator = function ( localVarQueryParameter['worker'] = worker } - if (view !== undefined) { - localVarQueryParameter['view'] = view - } - - if (groupBy !== undefined) { - localVarQueryParameter['group_by'] = groupBy + if (span !== undefined) { + localVarQueryParameter['span'] = span } localVarUrlObj.query = Object.assign( @@ -492,47 +784,38 @@ export const DefaultApiFetchParamCreator = function ( * * @param {string} run * @param {string} worker - * @param {string} view - * @param {string} groupBy Group By + * @param {string} span * @param {*} [options] Override http request option. * @throws {RequiredError} */ - operationGet( + distributedOverlapGet( run: string, worker: string, - view: string, - groupBy: string, + span: string, options: any = {} ): FetchArgs { // verify required parameter 'run' is not null or undefined if (run === null || run === undefined) { throw new RequiredError( 'run', - 'Required parameter run was null or undefined when calling operationGet.' + 'Required parameter run was null or undefined when calling distributedOverlapGet.' ) } // verify required parameter 'worker' is not null or undefined if (worker === null || worker === undefined) { throw new RequiredError( 'worker', - 'Required parameter worker was null or undefined when calling operationGet.' - ) - } - // verify required parameter 'view' is not null or undefined - if (view === null || view === undefined) { - throw new RequiredError( - 'view', - 'Required parameter view was null or undefined when calling operationGet.' + 'Required parameter worker was null or undefined when calling distributedOverlapGet.' ) } - // verify required parameter 'groupBy' is not null or undefined - if (groupBy === null || groupBy === undefined) { + // verify required parameter 'span' is not null or undefined + if (span === null || span === undefined) { throw new RequiredError( - 'groupBy', - 'Required parameter groupBy was null or undefined when calling operationGet.' + 'span', + 'Required parameter span was null or undefined when calling distributedOverlapGet.' ) } - const localVarPath = `/operation` + const localVarPath = `/distributed/overlap` const localVarUrlObj = url.parse(localVarPath, true) const localVarRequestOptions = Object.assign({ method: 'GET' }, options) const localVarHeaderParameter = {} as any @@ -546,12 +829,80 @@ export const DefaultApiFetchParamCreator = function ( localVarQueryParameter['worker'] = worker } - if (view !== undefined) { - localVarQueryParameter['view'] = view + if (span !== undefined) { + localVarQueryParameter['span'] = span } - if (groupBy !== undefined) { - localVarQueryParameter['group_by'] = groupBy + localVarUrlObj.query = Object.assign( + {}, + localVarUrlObj.query, + localVarQueryParameter, + options.query + ) + // fix override query string Detail: https://stackoverflow.com/a/7517673/1077943 + delete localVarUrlObj.search + localVarRequestOptions.headers = Object.assign( + {}, + localVarHeaderParameter, + options.headers + ) + + return { + url: url.format(localVarUrlObj), + options: localVarRequestOptions + } + }, + /** + * + * @param {string} run + * @param {string} worker + * @param {string} span + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + distributedWaittimeGet( + run: string, + worker: string, + span: string, + options: any = {} + ): FetchArgs { + // verify required parameter 'run' is not null or undefined + if (run === null || run === undefined) { + throw new RequiredError( + 'run', + 'Required parameter run was null or undefined when calling distributedWaittimeGet.' + ) + } + // verify required parameter 'worker' is not null or undefined + if (worker === null || worker === undefined) { + throw new RequiredError( + 'worker', + 'Required parameter worker was null or undefined when calling distributedWaittimeGet.' + ) + } + // verify required parameter 'span' is not null or undefined + if (span === null || span === undefined) { + throw new RequiredError( + 'span', + 'Required parameter span was null or undefined when calling distributedWaittimeGet.' + ) + } + const localVarPath = `/distributed/waittime` + const localVarUrlObj = url.parse(localVarPath, true) + const localVarRequestOptions = Object.assign({ method: 'GET' }, options) + const localVarHeaderParameter = {} as any + const localVarQueryParameter = {} as any + + if (run !== undefined) { + localVarQueryParameter['run'] = run + } + + if (worker !== undefined) { + localVarQueryParameter['worker'] = worker + } + + if (span !== undefined) { + localVarQueryParameter['span'] = span } localVarUrlObj.query = Object.assign( @@ -577,15 +928,15 @@ export const DefaultApiFetchParamCreator = function ( * * @param {string} run * @param {string} worker - * @param {string} view + * @param {string} span * @param {string} groupBy Group By * @param {*} [options] Override http request option. * @throws {RequiredError} */ - operationTableGet( + kernelGet( run: string, worker: string, - view: string, + span: string, groupBy: string, options: any = {} ): FetchArgs { @@ -593,31 +944,31 @@ export const DefaultApiFetchParamCreator = function ( if (run === null || run === undefined) { throw new RequiredError( 'run', - 'Required parameter run was null or undefined when calling operationTableGet.' + 'Required parameter run was null or undefined when calling kernelGet.' ) } // verify required parameter 'worker' is not null or undefined if (worker === null || worker === undefined) { throw new RequiredError( 'worker', - 'Required parameter worker was null or undefined when calling operationTableGet.' + 'Required parameter worker was null or undefined when calling kernelGet.' ) } - // verify required parameter 'view' is not null or undefined - if (view === null || view === undefined) { + // verify required parameter 'span' is not null or undefined + if (span === null || span === undefined) { throw new RequiredError( - 'view', - 'Required parameter view was null or undefined when calling operationTableGet.' + 'span', + 'Required parameter span was null or undefined when calling kernelGet.' ) } // verify required parameter 'groupBy' is not null or undefined if (groupBy === null || groupBy === undefined) { throw new RequiredError( 'groupBy', - 'Required parameter groupBy was null or undefined when calling operationTableGet.' + 'Required parameter groupBy was null or undefined when calling kernelGet.' ) } - const localVarPath = `/operation/table` + const localVarPath = `/kernel` const localVarUrlObj = url.parse(localVarPath, true) const localVarRequestOptions = Object.assign({ method: 'GET' }, options) const localVarHeaderParameter = {} as any @@ -631,8 +982,8 @@ export const DefaultApiFetchParamCreator = function ( localVarQueryParameter['worker'] = worker } - if (view !== undefined) { - localVarQueryParameter['view'] = view + if (span !== undefined) { + localVarQueryParameter['span'] = span } if (groupBy !== undefined) { @@ -662,38 +1013,40 @@ export const DefaultApiFetchParamCreator = function ( * * @param {string} run * @param {string} worker - * @param {string} view + * @param {string} span + * @param {string} [groupBy] Group By * @param {*} [options] Override http request option. * @throws {RequiredError} */ - overviewGet( + kernelTableGet( run: string, worker: string, - view: string, + span: string, + groupBy?: string, options: any = {} ): FetchArgs { // verify required parameter 'run' is not null or undefined if (run === null || run === undefined) { throw new RequiredError( 'run', - 'Required parameter run was null or undefined when calling overviewGet.' + 'Required parameter run was null or undefined when calling kernelTableGet.' ) } // verify required parameter 'worker' is not null or undefined if (worker === null || worker === undefined) { throw new RequiredError( 'worker', - 'Required parameter worker was null or undefined when calling overviewGet.' + 'Required parameter worker was null or undefined when calling kernelTableGet.' ) } - // verify required parameter 'view' is not null or undefined - if (view === null || view === undefined) { + // verify required parameter 'span' is not null or undefined + if (span === null || span === undefined) { throw new RequiredError( - 'view', - 'Required parameter view was null or undefined when calling overviewGet.' + 'span', + 'Required parameter span was null or undefined when calling kernelTableGet.' ) } - const localVarPath = `/overview` + const localVarPath = `/kernel/table` const localVarUrlObj = url.parse(localVarPath, true) const localVarRequestOptions = Object.assign({ method: 'GET' }, options) const localVarHeaderParameter = {} as any @@ -707,8 +1060,12 @@ export const DefaultApiFetchParamCreator = function ( localVarQueryParameter['worker'] = worker } - if (view !== undefined) { - localVarQueryParameter['view'] = view + if (span !== undefined) { + localVarQueryParameter['span'] = span + } + + if (groupBy !== undefined) { + localVarQueryParameter['group_by'] = groupBy } localVarUrlObj.query = Object.assign( @@ -732,16 +1089,57 @@ export const DefaultApiFetchParamCreator = function ( }, /** * + * @param {string} run + * @param {string} worker + * @param {string} span * @param {*} [options] Override http request option. * @throws {RequiredError} */ - runsGet(options: any = {}): FetchArgs { - const localVarPath = `/runs` + memoryGet( + run: string, + worker: string, + span: string, + options: any = {} + ): FetchArgs { + // verify required parameter 'run' is not null or undefined + if (run === null || run === undefined) { + throw new RequiredError( + 'run', + 'Required parameter run was null or undefined when calling memoryGet.' + ) + } + // verify required parameter 'worker' is not null or undefined + if (worker === null || worker === undefined) { + throw new RequiredError( + 'worker', + 'Required parameter worker was null or undefined when calling memoryGet.' + ) + } + // verify required parameter 'span' is not null or undefined + if (span === null || span === undefined) { + throw new RequiredError( + 'span', + 'Required parameter span was null or undefined when calling memoryGet.' + ) + } + const localVarPath = `/memory` const localVarUrlObj = url.parse(localVarPath, true) const localVarRequestOptions = Object.assign({ method: 'GET' }, options) const localVarHeaderParameter = {} as any const localVarQueryParameter = {} as any + if (run !== undefined) { + localVarQueryParameter['run'] = run + } + + if (worker !== undefined) { + localVarQueryParameter['worker'] = worker + } + + if (span !== undefined) { + localVarQueryParameter['span'] = span + } + localVarUrlObj.query = Object.assign( {}, localVarUrlObj.query, @@ -765,38 +1163,47 @@ export const DefaultApiFetchParamCreator = function ( * * @param {string} run * @param {string} worker - * @param {string} view + * @param {string} span + * @param {string} groupBy Group By * @param {*} [options] Override http request option. * @throws {RequiredError} */ - traceGet( + operationGet( run: string, worker: string, - view: string, + span: string, + groupBy: string, options: any = {} ): FetchArgs { // verify required parameter 'run' is not null or undefined if (run === null || run === undefined) { throw new RequiredError( 'run', - 'Required parameter run was null or undefined when calling traceGet.' + 'Required parameter run was null or undefined when calling operationGet.' ) } // verify required parameter 'worker' is not null or undefined if (worker === null || worker === undefined) { throw new RequiredError( 'worker', - 'Required parameter worker was null or undefined when calling traceGet.' + 'Required parameter worker was null or undefined when calling operationGet.' ) } - // verify required parameter 'view' is not null or undefined - if (view === null || view === undefined) { + // verify required parameter 'span' is not null or undefined + if (span === null || span === undefined) { throw new RequiredError( - 'view', - 'Required parameter view was null or undefined when calling traceGet.' + 'span', + 'Required parameter span was null or undefined when calling operationGet.' ) } - const localVarPath = `/trace` + // verify required parameter 'groupBy' is not null or undefined + if (groupBy === null || groupBy === undefined) { + throw new RequiredError( + 'groupBy', + 'Required parameter groupBy was null or undefined when calling operationGet.' + ) + } + const localVarPath = `/operation` const localVarUrlObj = url.parse(localVarPath, true) const localVarRequestOptions = Object.assign({ method: 'GET' }, options) const localVarHeaderParameter = {} as any @@ -810,8 +1217,12 @@ export const DefaultApiFetchParamCreator = function ( localVarQueryParameter['worker'] = worker } - if (view !== undefined) { - localVarQueryParameter['view'] = view + if (span !== undefined) { + localVarQueryParameter['span'] = span + } + + if (groupBy !== undefined) { + localVarQueryParameter['group_by'] = groupBy } localVarUrlObj.query = Object.assign( @@ -836,18 +1247,59 @@ export const DefaultApiFetchParamCreator = function ( /** * * @param {string} run + * @param {string} worker + * @param {string} span + * @param {string} groupBy Group By + * @param {string} opName + * @param {string} [inputShape] * @param {*} [options] Override http request option. * @throws {RequiredError} */ - viewsGet(run: string, options: any = {}): FetchArgs { + operationStackGet( + run: string, + worker: string, + span: string, + groupBy: string, + opName: string, + inputShape?: string, + options: any = {} + ): FetchArgs { // verify required parameter 'run' is not null or undefined if (run === null || run === undefined) { throw new RequiredError( 'run', - 'Required parameter run was null or undefined when calling viewsGet.' + 'Required parameter run was null or undefined when calling operationStackGet.' ) } - const localVarPath = `/views` + // verify required parameter 'worker' is not null or undefined + if (worker === null || worker === undefined) { + throw new RequiredError( + 'worker', + 'Required parameter worker was null or undefined when calling operationStackGet.' + ) + } + // verify required parameter 'span' is not null or undefined + if (span === null || span === undefined) { + throw new RequiredError( + 'span', + 'Required parameter span was null or undefined when calling operationStackGet.' + ) + } + // verify required parameter 'groupBy' is not null or undefined + if (groupBy === null || groupBy === undefined) { + throw new RequiredError( + 'groupBy', + 'Required parameter groupBy was null or undefined when calling operationStackGet.' + ) + } + // verify required parameter 'opName' is not null or undefined + if (opName === null || opName === undefined) { + throw new RequiredError( + 'opName', + 'Required parameter opName was null or undefined when calling operationStackGet.' + ) + } + const localVarPath = `/operation/stack` const localVarUrlObj = url.parse(localVarPath, true) const localVarRequestOptions = Object.assign({ method: 'GET' }, options) const localVarHeaderParameter = {} as any @@ -857,6 +1309,26 @@ export const DefaultApiFetchParamCreator = function ( localVarQueryParameter['run'] = run } + if (worker !== undefined) { + localVarQueryParameter['worker'] = worker + } + + if (span !== undefined) { + localVarQueryParameter['span'] = span + } + + if (groupBy !== undefined) { + localVarQueryParameter['group_by'] = groupBy + } + + if (opName !== undefined) { + localVarQueryParameter['op_name'] = opName + } + + if (inputShape !== undefined) { + localVarQueryParameter['input_shape'] = inputShape + } + localVarUrlObj.query = Object.assign( {}, localVarUrlObj.query, @@ -879,18 +1351,48 @@ export const DefaultApiFetchParamCreator = function ( /** * * @param {string} run + * @param {string} worker + * @param {string} span + * @param {string} groupBy Group By * @param {*} [options] Override http request option. * @throws {RequiredError} */ - workersGet(run: string, options: any = {}): FetchArgs { + operationTableGet( + run: string, + worker: string, + span: string, + groupBy: string, + options: any = {} + ): FetchArgs { // verify required parameter 'run' is not null or undefined if (run === null || run === undefined) { throw new RequiredError( 'run', - 'Required parameter run was null or undefined when calling workersGet.' + 'Required parameter run was null or undefined when calling operationTableGet.' ) } - const localVarPath = `/workers` + // verify required parameter 'worker' is not null or undefined + if (worker === null || worker === undefined) { + throw new RequiredError( + 'worker', + 'Required parameter worker was null or undefined when calling operationTableGet.' + ) + } + // verify required parameter 'span' is not null or undefined + if (span === null || span === undefined) { + throw new RequiredError( + 'span', + 'Required parameter span was null or undefined when calling operationTableGet.' + ) + } + // verify required parameter 'groupBy' is not null or undefined + if (groupBy === null || groupBy === undefined) { + throw new RequiredError( + 'groupBy', + 'Required parameter groupBy was null or undefined when calling operationTableGet.' + ) + } + const localVarPath = `/operation/table` const localVarUrlObj = url.parse(localVarPath, true) const localVarRequestOptions = Object.assign({ method: 'GET' }, options) const localVarHeaderParameter = {} as any @@ -900,6 +1402,18 @@ export const DefaultApiFetchParamCreator = function ( localVarQueryParameter['run'] = run } + if (worker !== undefined) { + localVarQueryParameter['worker'] = worker + } + + if (span !== undefined) { + localVarQueryParameter['span'] = span + } + + if (groupBy !== undefined) { + localVarQueryParameter['group_by'] = groupBy + } + localVarUrlObj.query = Object.assign( {}, localVarUrlObj.query, @@ -918,35 +1432,495 @@ export const DefaultApiFetchParamCreator = function ( url: url.format(localVarUrlObj), options: localVarRequestOptions } - } - } -} - -/** - * DefaultApi - functional programming interface - * @export - */ -export const DefaultApiFp = function (configuration?: Configuration) { - return { + }, /** * * @param {string} run * @param {string} worker - * @param {string} view - * @param {string} groupBy Group By + * @param {string} span + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + overviewGet( + run: string, + worker: string, + span: string, + options: any = {} + ): FetchArgs { + // verify required parameter 'run' is not null or undefined + if (run === null || run === undefined) { + throw new RequiredError( + 'run', + 'Required parameter run was null or undefined when calling overviewGet.' + ) + } + // verify required parameter 'worker' is not null or undefined + if (worker === null || worker === undefined) { + throw new RequiredError( + 'worker', + 'Required parameter worker was null or undefined when calling overviewGet.' + ) + } + // verify required parameter 'span' is not null or undefined + if (span === null || span === undefined) { + throw new RequiredError( + 'span', + 'Required parameter span was null or undefined when calling overviewGet.' + ) + } + const localVarPath = `/overview` + const localVarUrlObj = url.parse(localVarPath, true) + const localVarRequestOptions = Object.assign({ method: 'GET' }, options) + const localVarHeaderParameter = {} as any + const localVarQueryParameter = {} as any + + if (run !== undefined) { + localVarQueryParameter['run'] = run + } + + if (worker !== undefined) { + localVarQueryParameter['worker'] = worker + } + + if (span !== undefined) { + localVarQueryParameter['span'] = span + } + + localVarUrlObj.query = Object.assign( + {}, + localVarUrlObj.query, + localVarQueryParameter, + options.query + ) + // fix override query string Detail: https://stackoverflow.com/a/7517673/1077943 + delete localVarUrlObj.search + localVarRequestOptions.headers = Object.assign( + {}, + localVarHeaderParameter, + options.headers + ) + + return { + url: url.format(localVarUrlObj), + options: localVarRequestOptions + } + }, + /** + * + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + runsGet(options: any = {}): FetchArgs { + const localVarPath = `/runs` + const localVarUrlObj = url.parse(localVarPath, true) + const localVarRequestOptions = Object.assign({ method: 'GET' }, options) + const localVarHeaderParameter = {} as any + const localVarQueryParameter = {} as any + + localVarUrlObj.query = Object.assign( + {}, + localVarUrlObj.query, + localVarQueryParameter, + options.query + ) + // fix override query string Detail: https://stackoverflow.com/a/7517673/1077943 + delete localVarUrlObj.search + localVarRequestOptions.headers = Object.assign( + {}, + localVarHeaderParameter, + options.headers + ) + + return { + url: url.format(localVarUrlObj), + options: localVarRequestOptions + } + }, + /** + * + * @param {string} run + * @param {string} worker + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + spansGet(run: string, worker: string, options: any = {}): FetchArgs { + // verify required parameter 'run' is not null or undefined + if (run === null || run === undefined) { + throw new RequiredError( + 'run', + 'Required parameter run was null or undefined when calling spansGet.' + ) + } + // verify required parameter 'worker' is not null or undefined + if (worker === null || worker === undefined) { + throw new RequiredError( + 'worker', + 'Required parameter worker was null or undefined when calling spansGet.' + ) + } + const localVarPath = `/spans` + const localVarUrlObj = url.parse(localVarPath, true) + const localVarRequestOptions = Object.assign({ method: 'GET' }, options) + const localVarHeaderParameter = {} as any + const localVarQueryParameter = {} as any + + if (run !== undefined) { + localVarQueryParameter['run'] = run + } + + if (worker !== undefined) { + localVarQueryParameter['worker'] = worker + } + + localVarUrlObj.query = Object.assign( + {}, + localVarUrlObj.query, + localVarQueryParameter, + options.query + ) + // fix override query string Detail: https://stackoverflow.com/a/7517673/1077943 + delete localVarUrlObj.search + localVarRequestOptions.headers = Object.assign( + {}, + localVarHeaderParameter, + options.headers + ) + + return { + url: url.format(localVarUrlObj), + options: localVarRequestOptions + } + }, + /** + * + * @param {string} run + * @param {string} worker + * @param {string} span + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + traceGet( + run: string, + worker: string, + span: string, + options: any = {} + ): FetchArgs { + // verify required parameter 'run' is not null or undefined + if (run === null || run === undefined) { + throw new RequiredError( + 'run', + 'Required parameter run was null or undefined when calling traceGet.' + ) + } + // verify required parameter 'worker' is not null or undefined + if (worker === null || worker === undefined) { + throw new RequiredError( + 'worker', + 'Required parameter worker was null or undefined when calling traceGet.' + ) + } + // verify required parameter 'span' is not null or undefined + if (span === null || span === undefined) { + throw new RequiredError( + 'span', + 'Required parameter span was null or undefined when calling traceGet.' + ) + } + const localVarPath = `/trace` + const localVarUrlObj = url.parse(localVarPath, true) + const localVarRequestOptions = Object.assign({ method: 'GET' }, options) + const localVarHeaderParameter = {} as any + const localVarQueryParameter = {} as any + + if (run !== undefined) { + localVarQueryParameter['run'] = run + } + + if (worker !== undefined) { + localVarQueryParameter['worker'] = worker + } + + if (span !== undefined) { + localVarQueryParameter['span'] = span + } + + localVarUrlObj.query = Object.assign( + {}, + localVarUrlObj.query, + localVarQueryParameter, + options.query + ) + // fix override query string Detail: https://stackoverflow.com/a/7517673/1077943 + delete localVarUrlObj.search + localVarRequestOptions.headers = Object.assign( + {}, + localVarHeaderParameter, + options.headers + ) + + return { + url: url.format(localVarUrlObj), + options: localVarRequestOptions + } + }, + /** + * + * @param {string} run + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + viewsGet(run: string, options: any = {}): FetchArgs { + // verify required parameter 'run' is not null or undefined + if (run === null || run === undefined) { + throw new RequiredError( + 'run', + 'Required parameter run was null or undefined when calling viewsGet.' + ) + } + const localVarPath = `/views` + const localVarUrlObj = url.parse(localVarPath, true) + const localVarRequestOptions = Object.assign({ method: 'GET' }, options) + const localVarHeaderParameter = {} as any + const localVarQueryParameter = {} as any + + if (run !== undefined) { + localVarQueryParameter['run'] = run + } + + localVarUrlObj.query = Object.assign( + {}, + localVarUrlObj.query, + localVarQueryParameter, + options.query + ) + // fix override query string Detail: https://stackoverflow.com/a/7517673/1077943 + delete localVarUrlObj.search + localVarRequestOptions.headers = Object.assign( + {}, + localVarHeaderParameter, + options.headers + ) + + return { + url: url.format(localVarUrlObj), + options: localVarRequestOptions + } + }, + /** + * + * @param {string} run + * @param {string} view + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + workersGet(run: string, view: string, options: any = {}): FetchArgs { + // verify required parameter 'run' is not null or undefined + if (run === null || run === undefined) { + throw new RequiredError( + 'run', + 'Required parameter run was null or undefined when calling workersGet.' + ) + } + // verify required parameter 'view' is not null or undefined + if (view === null || view === undefined) { + throw new RequiredError( + 'view', + 'Required parameter view was null or undefined when calling workersGet.' + ) + } + const localVarPath = `/workers` + const localVarUrlObj = url.parse(localVarPath, true) + const localVarRequestOptions = Object.assign({ method: 'GET' }, options) + const localVarHeaderParameter = {} as any + const localVarQueryParameter = {} as any + + if (run !== undefined) { + localVarQueryParameter['run'] = run + } + + if (view !== undefined) { + localVarQueryParameter['view'] = view + } + + localVarUrlObj.query = Object.assign( + {}, + localVarUrlObj.query, + localVarQueryParameter, + options.query + ) + // fix override query string Detail: https://stackoverflow.com/a/7517673/1077943 + delete localVarUrlObj.search + localVarRequestOptions.headers = Object.assign( + {}, + localVarHeaderParameter, + options.headers + ) + + return { + url: url.format(localVarUrlObj), + options: localVarRequestOptions + } + } + } +} + +/** + * DefaultApi - functional programming interface + * @export + */ +export const DefaultApiFp = function (configuration?: Configuration) { + return { + /** + * + * @param {string} run + * @param {string} worker + * @param {string} span + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + distributedCommopsGet( + run: string, + worker: string, + span: string, + options?: any + ): (fetch?: FetchAPI, basePath?: string) => Promise { + const localVarFetchArgs = DefaultApiFetchParamCreator( + configuration + ).distributedCommopsGet(run, worker, span, options) + return ( + fetch: FetchAPI = portableFetch, + basePath: string = BASE_PATH + ) => { + return fetch( + basePath + localVarFetchArgs.url, + localVarFetchArgs.options + ).then((response) => { + if (response.status >= 200 && response.status < 300) { + return response.json() + } else { + throw response + } + }) + } + }, + /** + * + * @param {string} run + * @param {string} worker + * @param {string} span + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + distributedGpuinfoGet( + run: string, + worker: string, + span: string, + options?: any + ): (fetch?: FetchAPI, basePath?: string) => Promise { + const localVarFetchArgs = DefaultApiFetchParamCreator( + configuration + ).distributedGpuinfoGet(run, worker, span, options) + return ( + fetch: FetchAPI = portableFetch, + basePath: string = BASE_PATH + ) => { + return fetch( + basePath + localVarFetchArgs.url, + localVarFetchArgs.options + ).then((response) => { + if (response.status >= 200 && response.status < 300) { + return response.json() + } else { + throw response + } + }) + } + }, + /** + * + * @param {string} run + * @param {string} worker + * @param {string} span + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + distributedOverlapGet( + run: string, + worker: string, + span: string, + options?: any + ): (fetch?: FetchAPI, basePath?: string) => Promise { + const localVarFetchArgs = DefaultApiFetchParamCreator( + configuration + ).distributedOverlapGet(run, worker, span, options) + return ( + fetch: FetchAPI = portableFetch, + basePath: string = BASE_PATH + ) => { + return fetch( + basePath + localVarFetchArgs.url, + localVarFetchArgs.options + ).then((response) => { + if (response.status >= 200 && response.status < 300) { + return response.json() + } else { + throw response + } + }) + } + }, + /** + * + * @param {string} run + * @param {string} worker + * @param {string} span + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + distributedWaittimeGet( + run: string, + worker: string, + span: string, + options?: any + ): (fetch?: FetchAPI, basePath?: string) => Promise { + const localVarFetchArgs = DefaultApiFetchParamCreator( + configuration + ).distributedWaittimeGet(run, worker, span, options) + return ( + fetch: FetchAPI = portableFetch, + basePath: string = BASE_PATH + ) => { + return fetch( + basePath + localVarFetchArgs.url, + localVarFetchArgs.options + ).then((response) => { + if (response.status >= 200 && response.status < 300) { + return response.json() + } else { + throw response + } + }) + } + }, + /** + * + * @param {string} run + * @param {string} worker + * @param {string} span + * @param {string} groupBy Group By * @param {*} [options] Override http request option. * @throws {RequiredError} */ kernelGet( run: string, worker: string, - view: string, + span: string, groupBy: string, options?: any ): (fetch?: FetchAPI, basePath?: string) => Promise { const localVarFetchArgs = DefaultApiFetchParamCreator( configuration - ).kernelGet(run, worker, view, groupBy, options) + ).kernelGet(run, worker, span, groupBy, options) return ( fetch: FetchAPI = portableFetch, basePath: string = BASE_PATH @@ -967,7 +1941,7 @@ export const DefaultApiFp = function (configuration?: Configuration) { * * @param {string} run * @param {string} worker - * @param {string} view + * @param {string} span * @param {string} [groupBy] Group By * @param {*} [options] Override http request option. * @throws {RequiredError} @@ -975,13 +1949,13 @@ export const DefaultApiFp = function (configuration?: Configuration) { kernelTableGet( run: string, worker: string, - view: string, + span: string, groupBy?: string, options?: any ): (fetch?: FetchAPI, basePath?: string) => Promise { const localVarFetchArgs = DefaultApiFetchParamCreator( configuration - ).kernelTableGet(run, worker, view, groupBy, options) + ).kernelTableGet(run, worker, span, groupBy, options) return ( fetch: FetchAPI = portableFetch, basePath: string = BASE_PATH @@ -1002,7 +1976,40 @@ export const DefaultApiFp = function (configuration?: Configuration) { * * @param {string} run * @param {string} worker - * @param {string} view + * @param {string} span + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + memoryGet( + run: string, + worker: string, + span: string, + options?: any + ): (fetch?: FetchAPI, basePath?: string) => Promise { + const localVarFetchArgs = DefaultApiFetchParamCreator( + configuration + ).memoryGet(run, worker, span, options) + return ( + fetch: FetchAPI = portableFetch, + basePath: string = BASE_PATH + ) => { + return fetch( + basePath + localVarFetchArgs.url, + localVarFetchArgs.options + ).then((response) => { + if (response.status >= 200 && response.status < 300) { + return response.json() + } else { + throw response + } + }) + } + }, + /** + * + * @param {string} run + * @param {string} worker + * @param {string} span * @param {string} groupBy Group By * @param {*} [options] Override http request option. * @throws {RequiredError} @@ -1010,13 +2017,13 @@ export const DefaultApiFp = function (configuration?: Configuration) { operationGet( run: string, worker: string, - view: string, + span: string, groupBy: string, options?: any ): (fetch?: FetchAPI, basePath?: string) => Promise { const localVarFetchArgs = DefaultApiFetchParamCreator( configuration - ).operationGet(run, worker, view, groupBy, options) + ).operationGet(run, worker, span, groupBy, options) return ( fetch: FetchAPI = portableFetch, basePath: string = BASE_PATH @@ -1037,7 +2044,54 @@ export const DefaultApiFp = function (configuration?: Configuration) { * * @param {string} run * @param {string} worker - * @param {string} view + * @param {string} span + * @param {string} groupBy Group By + * @param {string} opName + * @param {string} [inputShape] + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + operationStackGet( + run: string, + worker: string, + span: string, + groupBy: string, + opName: string, + inputShape?: string, + options?: any + ): (fetch?: FetchAPI, basePath?: string) => Promise { + const localVarFetchArgs = DefaultApiFetchParamCreator( + configuration + ).operationStackGet( + run, + worker, + span, + groupBy, + opName, + inputShape, + options + ) + return ( + fetch: FetchAPI = portableFetch, + basePath: string = BASE_PATH + ) => { + return fetch( + basePath + localVarFetchArgs.url, + localVarFetchArgs.options + ).then((response) => { + if (response.status >= 200 && response.status < 300) { + return response.json() + } else { + throw response + } + }) + } + }, + /** + * + * @param {string} run + * @param {string} worker + * @param {string} span * @param {string} groupBy Group By * @param {*} [options] Override http request option. * @throws {RequiredError} @@ -1045,13 +2099,13 @@ export const DefaultApiFp = function (configuration?: Configuration) { operationTableGet( run: string, worker: string, - view: string, + span: string, groupBy: string, options?: any - ): (fetch?: FetchAPI, basePath?: string) => Promise { + ): (fetch?: FetchAPI, basePath?: string) => Promise { const localVarFetchArgs = DefaultApiFetchParamCreator( configuration - ).operationTableGet(run, worker, view, groupBy, options) + ).operationTableGet(run, worker, span, groupBy, options) return ( fetch: FetchAPI = portableFetch, basePath: string = BASE_PATH @@ -1072,19 +2126,19 @@ export const DefaultApiFp = function (configuration?: Configuration) { * * @param {string} run * @param {string} worker - * @param {string} view + * @param {string} span * @param {*} [options] Override http request option. * @throws {RequiredError} */ overviewGet( run: string, worker: string, - view: string, + span: string, options?: any ): (fetch?: FetchAPI, basePath?: string) => Promise { const localVarFetchArgs = DefaultApiFetchParamCreator( configuration - ).overviewGet(run, worker, view, options) + ).overviewGet(run, worker, span, options) return ( fetch: FetchAPI = portableFetch, basePath: string = BASE_PATH @@ -1132,19 +2186,50 @@ export const DefaultApiFp = function (configuration?: Configuration) { * * @param {string} run * @param {string} worker - * @param {string} view + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + spansGet( + run: string, + worker: string, + options?: any + ): (fetch?: FetchAPI, basePath?: string) => Promise> { + const localVarFetchArgs = DefaultApiFetchParamCreator( + configuration + ).spansGet(run, worker, options) + return ( + fetch: FetchAPI = portableFetch, + basePath: string = BASE_PATH + ) => { + return fetch( + basePath + localVarFetchArgs.url, + localVarFetchArgs.options + ).then((response) => { + if (response.status >= 200 && response.status < 300) { + return response.json() + } else { + throw response + } + }) + } + }, + /** + * + * @param {string} run + * @param {string} worker + * @param {string} span * @param {*} [options] Override http request option. * @throws {RequiredError} */ traceGet( run: string, worker: string, - view: string, + span: string, options?: any ): (fetch?: FetchAPI, basePath?: string) => Promise { const localVarFetchArgs = DefaultApiFetchParamCreator( configuration - ).traceGet(run, worker, view, options) + ).traceGet(run, worker, span, options) return ( fetch: FetchAPI = portableFetch, basePath: string = BASE_PATH @@ -1193,50 +2278,136 @@ export const DefaultApiFp = function (configuration?: Configuration) { /** * * @param {string} run + * @param {string} view + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + workersGet( + run: string, + view: string, + options?: any + ): (fetch?: FetchAPI, basePath?: string) => Promise> { + const localVarFetchArgs = DefaultApiFetchParamCreator( + configuration + ).workersGet(run, view, options) + return ( + fetch: FetchAPI = portableFetch, + basePath: string = BASE_PATH + ) => { + return fetch( + basePath + localVarFetchArgs.url, + localVarFetchArgs.options + ).then((response) => { + if (response.status >= 200 && response.status < 300) { + return response.json() + } else { + throw response + } + }) + } + } + } +} + +/** + * DefaultApi - factory interface + * @export + */ +export const DefaultApiFactory = function ( + configuration?: Configuration, + fetch?: FetchAPI, + basePath?: string +) { + return { + /** + * + * @param {string} run + * @param {string} worker + * @param {string} span + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + distributedCommopsGet( + run: string, + worker: string, + span: string, + options?: any + ) { + return DefaultApiFp(configuration).distributedCommopsGet( + run, + worker, + span, + options + )(fetch, basePath) + }, + /** + * + * @param {string} run + * @param {string} worker + * @param {string} span + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + distributedGpuinfoGet( + run: string, + worker: string, + span: string, + options?: any + ) { + return DefaultApiFp(configuration).distributedGpuinfoGet( + run, + worker, + span, + options + )(fetch, basePath) + }, + /** + * + * @param {string} run + * @param {string} worker + * @param {string} span + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + distributedOverlapGet( + run: string, + worker: string, + span: string, + options?: any + ) { + return DefaultApiFp(configuration).distributedOverlapGet( + run, + worker, + span, + options + )(fetch, basePath) + }, + /** + * + * @param {string} run + * @param {string} worker + * @param {string} span * @param {*} [options] Override http request option. * @throws {RequiredError} */ - workersGet( + distributedWaittimeGet( run: string, + worker: string, + span: string, options?: any - ): (fetch?: FetchAPI, basePath?: string) => Promise> { - const localVarFetchArgs = DefaultApiFetchParamCreator( - configuration - ).workersGet(run, options) - return ( - fetch: FetchAPI = portableFetch, - basePath: string = BASE_PATH - ) => { - return fetch( - basePath + localVarFetchArgs.url, - localVarFetchArgs.options - ).then((response) => { - if (response.status >= 200 && response.status < 300) { - return response.json() - } else { - throw response - } - }) - } - } - } -} - -/** - * DefaultApi - factory interface - * @export - */ -export const DefaultApiFactory = function ( - configuration?: Configuration, - fetch?: FetchAPI, - basePath?: string -) { - return { + ) { + return DefaultApiFp(configuration).distributedWaittimeGet( + run, + worker, + span, + options + )(fetch, basePath) + }, /** * * @param {string} run * @param {string} worker - * @param {string} view + * @param {string} span * @param {string} groupBy Group By * @param {*} [options] Override http request option. * @throws {RequiredError} @@ -1244,14 +2415,14 @@ export const DefaultApiFactory = function ( kernelGet( run: string, worker: string, - view: string, + span: string, groupBy: string, options?: any ) { return DefaultApiFp(configuration).kernelGet( run, worker, - view, + span, groupBy, options )(fetch, basePath) @@ -1260,7 +2431,7 @@ export const DefaultApiFactory = function ( * * @param {string} run * @param {string} worker - * @param {string} view + * @param {string} span * @param {string} [groupBy] Group By * @param {*} [options] Override http request option. * @throws {RequiredError} @@ -1268,14 +2439,14 @@ export const DefaultApiFactory = function ( kernelTableGet( run: string, worker: string, - view: string, + span: string, groupBy?: string, options?: any ) { return DefaultApiFp(configuration).kernelTableGet( run, worker, - view, + span, groupBy, options )(fetch, basePath) @@ -1284,7 +2455,23 @@ export const DefaultApiFactory = function ( * * @param {string} run * @param {string} worker - * @param {string} view + * @param {string} span + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + memoryGet(run: string, worker: string, span: string, options?: any) { + return DefaultApiFp(configuration).memoryGet( + run, + worker, + span, + options + )(fetch, basePath) + }, + /** + * + * @param {string} run + * @param {string} worker + * @param {string} span * @param {string} groupBy Group By * @param {*} [options] Override http request option. * @throws {RequiredError} @@ -1292,14 +2479,14 @@ export const DefaultApiFactory = function ( operationGet( run: string, worker: string, - view: string, + span: string, groupBy: string, options?: any ) { return DefaultApiFp(configuration).operationGet( run, worker, - view, + span, groupBy, options )(fetch, basePath) @@ -1308,7 +2495,37 @@ export const DefaultApiFactory = function ( * * @param {string} run * @param {string} worker - * @param {string} view + * @param {string} span + * @param {string} groupBy Group By + * @param {string} opName + * @param {string} [inputShape] + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + operationStackGet( + run: string, + worker: string, + span: string, + groupBy: string, + opName: string, + inputShape?: string, + options?: any + ) { + return DefaultApiFp(configuration).operationStackGet( + run, + worker, + span, + groupBy, + opName, + inputShape, + options + )(fetch, basePath) + }, + /** + * + * @param {string} run + * @param {string} worker + * @param {string} span * @param {string} groupBy Group By * @param {*} [options] Override http request option. * @throws {RequiredError} @@ -1316,14 +2533,14 @@ export const DefaultApiFactory = function ( operationTableGet( run: string, worker: string, - view: string, + span: string, groupBy: string, options?: any ) { return DefaultApiFp(configuration).operationTableGet( run, worker, - view, + span, groupBy, options )(fetch, basePath) @@ -1332,15 +2549,15 @@ export const DefaultApiFactory = function ( * * @param {string} run * @param {string} worker - * @param {string} view + * @param {string} span * @param {*} [options] Override http request option. * @throws {RequiredError} */ - overviewGet(run: string, worker: string, view: string, options?: any) { + overviewGet(run: string, worker: string, span: string, options?: any) { return DefaultApiFp(configuration).overviewGet( run, worker, - view, + span, options )(fetch, basePath) }, @@ -1356,15 +2573,29 @@ export const DefaultApiFactory = function ( * * @param {string} run * @param {string} worker - * @param {string} view * @param {*} [options] Override http request option. * @throws {RequiredError} */ - traceGet(run: string, worker: string, view: string, options?: any) { + spansGet(run: string, worker: string, options?: any) { + return DefaultApiFp(configuration).spansGet( + run, + worker, + options + )(fetch, basePath) + }, + /** + * + * @param {string} run + * @param {string} worker + * @param {string} span + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + traceGet(run: string, worker: string, span: string, options?: any) { return DefaultApiFp(configuration).traceGet( run, worker, - view, + span, options )(fetch, basePath) }, @@ -1380,14 +2611,16 @@ export const DefaultApiFactory = function ( /** * * @param {string} run + * @param {string} view * @param {*} [options] Override http request option. * @throws {RequiredError} */ - workersGet(run: string, options?: any) { - return DefaultApiFp(configuration).workersGet(run, options)( - fetch, - basePath - ) + workersGet(run: string, view: string, options?: any) { + return DefaultApiFp(configuration).workersGet( + run, + view, + options + )(fetch, basePath) } } } @@ -1403,7 +2636,99 @@ export class DefaultApi extends BaseAPI { * * @param {string} run * @param {string} worker - * @param {string} view + * @param {string} span + * @param {*} [options] Override http request option. + * @throws {RequiredError} + * @memberof DefaultApi + */ + public distributedCommopsGet( + run: string, + worker: string, + span: string, + options?: any + ) { + return DefaultApiFp(this.configuration).distributedCommopsGet( + run, + worker, + span, + options + )(this.fetch, this.basePath) + } + + /** + * + * @param {string} run + * @param {string} worker + * @param {string} span + * @param {*} [options] Override http request option. + * @throws {RequiredError} + * @memberof DefaultApi + */ + public distributedGpuinfoGet( + run: string, + worker: string, + span: string, + options?: any + ) { + return DefaultApiFp(this.configuration).distributedGpuinfoGet( + run, + worker, + span, + options + )(this.fetch, this.basePath) + } + + /** + * + * @param {string} run + * @param {string} worker + * @param {string} span + * @param {*} [options] Override http request option. + * @throws {RequiredError} + * @memberof DefaultApi + */ + public distributedOverlapGet( + run: string, + worker: string, + span: string, + options?: any + ) { + return DefaultApiFp(this.configuration).distributedOverlapGet( + run, + worker, + span, + options + )(this.fetch, this.basePath) + } + + /** + * + * @param {string} run + * @param {string} worker + * @param {string} span + * @param {*} [options] Override http request option. + * @throws {RequiredError} + * @memberof DefaultApi + */ + public distributedWaittimeGet( + run: string, + worker: string, + span: string, + options?: any + ) { + return DefaultApiFp(this.configuration).distributedWaittimeGet( + run, + worker, + span, + options + )(this.fetch, this.basePath) + } + + /** + * + * @param {string} run + * @param {string} worker + * @param {string} span * @param {string} groupBy Group By * @param {*} [options] Override http request option. * @throws {RequiredError} @@ -1412,14 +2737,14 @@ export class DefaultApi extends BaseAPI { public kernelGet( run: string, worker: string, - view: string, + span: string, groupBy: string, options?: any ) { return DefaultApiFp(this.configuration).kernelGet( run, worker, - view, + span, groupBy, options )(this.fetch, this.basePath) @@ -1429,7 +2754,7 @@ export class DefaultApi extends BaseAPI { * * @param {string} run * @param {string} worker - * @param {string} view + * @param {string} span * @param {string} [groupBy] Group By * @param {*} [options] Override http request option. * @throws {RequiredError} @@ -1438,14 +2763,14 @@ export class DefaultApi extends BaseAPI { public kernelTableGet( run: string, worker: string, - view: string, + span: string, groupBy?: string, options?: any ) { return DefaultApiFp(this.configuration).kernelTableGet( run, worker, - view, + span, groupBy, options )(this.fetch, this.basePath) @@ -1455,7 +2780,25 @@ export class DefaultApi extends BaseAPI { * * @param {string} run * @param {string} worker - * @param {string} view + * @param {string} span + * @param {*} [options] Override http request option. + * @throws {RequiredError} + * @memberof DefaultApi + */ + public memoryGet(run: string, worker: string, span: string, options?: any) { + return DefaultApiFp(this.configuration).memoryGet( + run, + worker, + span, + options + )(this.fetch, this.basePath) + } + + /** + * + * @param {string} run + * @param {string} worker + * @param {string} span * @param {string} groupBy Group By * @param {*} [options] Override http request option. * @throws {RequiredError} @@ -1464,14 +2807,14 @@ export class DefaultApi extends BaseAPI { public operationGet( run: string, worker: string, - view: string, + span: string, groupBy: string, options?: any ) { return DefaultApiFp(this.configuration).operationGet( run, worker, - view, + span, groupBy, options )(this.fetch, this.basePath) @@ -1481,7 +2824,39 @@ export class DefaultApi extends BaseAPI { * * @param {string} run * @param {string} worker - * @param {string} view + * @param {string} span + * @param {string} groupBy Group By + * @param {string} opName + * @param {string} [inputShape] + * @param {*} [options] Override http request option. + * @throws {RequiredError} + * @memberof DefaultApi + */ + public operationStackGet( + run: string, + worker: string, + span: string, + groupBy: string, + opName: string, + inputShape?: string, + options?: any + ) { + return DefaultApiFp(this.configuration).operationStackGet( + run, + worker, + span, + groupBy, + opName, + inputShape, + options + )(this.fetch, this.basePath) + } + + /** + * + * @param {string} run + * @param {string} worker + * @param {string} span * @param {string} groupBy Group By * @param {*} [options] Override http request option. * @throws {RequiredError} @@ -1490,14 +2865,14 @@ export class DefaultApi extends BaseAPI { public operationTableGet( run: string, worker: string, - view: string, + span: string, groupBy: string, options?: any ) { return DefaultApiFp(this.configuration).operationTableGet( run, worker, - view, + span, groupBy, options )(this.fetch, this.basePath) @@ -1507,16 +2882,16 @@ export class DefaultApi extends BaseAPI { * * @param {string} run * @param {string} worker - * @param {string} view + * @param {string} span * @param {*} [options] Override http request option. * @throws {RequiredError} * @memberof DefaultApi */ - public overviewGet(run: string, worker: string, view: string, options?: any) { + public overviewGet(run: string, worker: string, span: string, options?: any) { return DefaultApiFp(this.configuration).overviewGet( run, worker, - view, + span, options )(this.fetch, this.basePath) } @@ -1538,16 +2913,32 @@ export class DefaultApi extends BaseAPI { * * @param {string} run * @param {string} worker - * @param {string} view * @param {*} [options] Override http request option. * @throws {RequiredError} * @memberof DefaultApi */ - public traceGet(run: string, worker: string, view: string, options?: any) { + public spansGet(run: string, worker: string, options?: any) { + return DefaultApiFp(this.configuration).spansGet( + run, + worker, + options + )(this.fetch, this.basePath) + } + + /** + * + * @param {string} run + * @param {string} worker + * @param {string} span + * @param {*} [options] Override http request option. + * @throws {RequiredError} + * @memberof DefaultApi + */ + public traceGet(run: string, worker: string, span: string, options?: any) { return DefaultApiFp(this.configuration).traceGet( run, worker, - view, + span, options )(this.fetch, this.basePath) } @@ -1569,14 +2960,16 @@ export class DefaultApi extends BaseAPI { /** * * @param {string} run + * @param {string} view * @param {*} [options] Override http request option. * @throws {RequiredError} * @memberof DefaultApi */ - public workersGet(run: string, options?: any) { - return DefaultApiFp(this.configuration).workersGet(run, options)( - this.fetch, - this.basePath - ) + public workersGet(run: string, view: string, options?: any) { + return DefaultApiFp(this.configuration).workersGet( + run, + view, + options + )(this.fetch, this.basePath) } } diff --git a/tb_plugin/fe/src/api/openapi.json b/tb_plugin/fe/src/api/openapi.json deleted file mode 100644 index 625256715..000000000 --- a/tb_plugin/fe/src/api/openapi.json +++ /dev/null @@ -1,611 +0,0 @@ -{ - "openapi": "3.0.1", - "info": { - "title": "Pytorch profile API", - "version": "1.0.0" - }, - "servers": [ - { - "url": "/data/plugin/pytorch_profiler" - } - ], - "paths": { - "/runs": { - "get": { - "responses": { - "200": { - "description": "successful operation", - "content": { - "*/*": { - "schema": { - "type": "array", - "items": { - "type": "string" - } - } - } - } - } - } - } - }, - "/workers": { - "get": { - "parameters": [ - { - "in": "query", - "name": "run", - "required": true, - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { - "description": "successful workers", - "content": { - "*/*": { - "schema": { - "type": "array", - "items": { - "type": "string" - } - } - } - } - } - } - } - }, - "/views": { - "get": { - "parameters": [ - { - "in": "query", - "name": "run", - "required": true, - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { - "description": "successful views", - "content": { - "*/*": { - "schema": { - "type": "array", - "items": { - "type": "string", - "enum": [ - "Overview", - "Operator", - "Kernel" - ] - } - } - } - } - } - } - } - }, - "/overview": { - "get": { - "parameters": [ - { - "in": "query", - "name": "run", - "required": true, - "schema": { - "type": "string" - } - }, - { - "in": "query", - "name": "worker", - "required": true, - "schema": { - "type": "string" - } - }, - { - "in": "query", - "name": "view", - "required": true, - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { - "description": "successful operation", - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/Overview" - } - } - } - } - } - } - }, - "/operation": { - "get": { - "parameters": [ - { - "in": "query", - "name": "run", - "required": true, - "schema": { - "type": "string" - } - }, - { - "in": "query", - "name": "worker", - "required": true, - "schema": { - "type": "string" - } - }, - { - "in": "query", - "name": "view", - "required": true, - "schema": { - "type": "string" - } - }, - { - "in": "query", - "name": "group_by", - "required": true, - "schema": { - "type": "string", - "enum": [ - "Operation", - "OperationAndInputShape" - ] - }, - "description": "Group By" - } - ], - "responses": { - "200": { - "description": "successful operation", - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/OperatorGraph" - } - } - } - } - } - } - }, - "/operation/table": { - "get": { - "parameters": [ - { - "in": "query", - "name": "run", - "required": true, - "schema": { - "type": "string" - } - }, - { - "in": "query", - "name": "worker", - "required": true, - "schema": { - "type": "string" - } - }, - { - "in": "query", - "name": "view", - "required": true, - "schema": { - "type": "string" - } - }, - { - "in": "query", - "name": "group_by", - "required": true, - "schema": { - "type": "string", - "enum": [ - "Operation", - "OperationAndInputShape" - ] - }, - "description": "Group By" - } - ], - "responses": { - "200": { - "description": "successful operation", - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/TableData" - } - } - } - } - } - } - }, - "/kernel": { - "get": { - "parameters": [ - { - "in": "query", - "name": "run", - "required": true, - "schema": { - "type": "string" - } - }, - { - "in": "query", - "name": "worker", - "required": true, - "schema": { - "type": "string" - } - }, - { - "in": "query", - "name": "view", - "required": true, - "schema": { - "type": "string" - } - }, - { - "in": "query", - "name": "group_by", - "required": true, - "schema": { - "type": "string", - "enum": [ - "Kernel", - "KernelNameAndOpName" - ] - }, - "description": "Group By" - } - ], - "responses": { - "200": { - "description": "successful operation", - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/KernelGraph" - } - } - } - } - } - } - }, - "/kernel/table": { - "get": { - "parameters": [ - { - "in": "query", - "name": "run", - "required": true, - "schema": { - "type": "string" - } - }, - { - "in": "query", - "name": "worker", - "required": true, - "schema": { - "type": "string" - } - }, - { - "in": "query", - "name": "view", - "required": true, - "schema": { - "type": "string" - } - }, - { - "in": "query", - "name": "group_by", - "required": false, - "schema": { - "type": "string", - "enum": [ - "Kernel", - "KernelNameAndOpName" - ] - }, - "description": "Group By" - } - ], - "responses": { - "200": { - "description": "successful kernel", - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/TableData" - } - } - } - } - } - } - }, - "/trace": { - "get": { - "parameters": [ - { - "in": "query", - "name": "run", - "required": true, - "schema": { - "type": "string" - } - }, - { - "in": "query", - "name": "worker", - "required": true, - "schema": { - "type": "string" - } - }, - { - "in": "query", - "name": "view", - "required": true, - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { - "description": "successful trace data", - "content": { - "*/*": { - "schema": { - "type": "object" - } - } - } - } - } - } - } - }, - "components": { - "schemas": { - "Performance": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string" - }, - "description": { - "type": "string" - }, - "value": { - "type": "string" - }, - "extra": { - "type": "string" - }, - "children": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Performance" - } - } - } - }, - "Environment": { - "type": "object", - "required": [ - "title", - "value" - ], - "properties": { - "title": { - "type": "string" - }, - "value": { - "type": "string" - } - } - }, - "GraphColumn": { - "type": "object", - "required": [ - "type", - "name" - ], - "properties": { - "type": { - "type": "string" - }, - "name": { - "type": "string" - }, - "role": { - "type": "string" - }, - "p": { - "type": "object", - "properties": { - "html": { - "type": "boolean" - } - } - } - } - }, - "ValueAndFormat": { - "type": "object", - "required": [ - "v", - "f" - ], - "properties": { - "v": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "number" - }, - { - "type": "boolean" - } - ] - }, - "f": { - "type": "string" - } - } - }, - "Graph": { - "type": "object", - "required": [ - "columns", - "rows" - ], - "properties": { - "title": { - "type": "string" - }, - "columns": { - "type": "array", - "items": { - "$ref": "#/components/schemas/GraphColumn" - } - }, - "rows": { - "type": "array", - "items": { - "type": "array", - "items": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "number" - }, - { - "type": "boolean" - }, - { - "$ref": "#/components/schemas/ValueAndFormat" - } - ] - } - } - } - } - }, - "Overview": { - "type": "object", - "required": [ - "performance", - "environments", - "steps", - "recommendations" - ], - "properties": { - "performance": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Performance" - } - }, - "environments": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Environment" - } - }, - "steps": { - "$ref": "#/components/schemas/Graph" - }, - "recommendations": { - "type": "string" - } - } - }, - "OperatorGraph": { - "type": "object", - "required": [ - "device_total_timeA", - "device_self_time", - "host_total_time", - "host_self_time" - ], - "properties": { - "device_total_time": { - "$ref": "#/components/schemas/Graph" - }, - "device_self_time": { - "$ref": "#/components/schemas/Graph" - }, - "host_total_time": { - "$ref": "#/components/schemas/Graph" - }, - "host_self_time": { - "$ref": "#/components/schemas/Graph" - } - } - }, - "TableData": { - "type": "object", - "required": [ - "data" - ], - "properties": { - "data": { - "$ref": "#/components/schemas/Graph" - } - } - }, - "KernelGraph": { - "type": "object", - "required": [ - "total" - ], - "properties": { - "total": { - "$ref": "#/components/schemas/Graph" - } - } - } - } - } -} - diff --git a/tb_plugin/fe/src/api/openapi.yaml b/tb_plugin/fe/src/api/openapi.yaml new file mode 100644 index 000000000..44f0c9694 --- /dev/null +++ b/tb_plugin/fe/src/api/openapi.yaml @@ -0,0 +1,726 @@ +openapi: 3.0.1 +info: + title: Pytorch profile API + version: 1.0.0 +servers: + - url: . +paths: + /runs: + get: + responses: + '200': + description: successful operation + content: + '*/*': + schema: + type: array + items: + type: string + /views: + get: + parameters: + - in: query + name: run + required: true + schema: + type: string + responses: + '200': + description: successful views + content: + '*/*': + schema: + type: array + items: + type: string + enum: + - Overview + - Operator + - Kernel + - Trace + - Distributed + - Memory + /workers: + get: + parameters: + - in: query + name: run + required: true + schema: + type: string + - in: query + name: view + required: true + schema: + type: string + responses: + '200': + description: successful workers + content: + '*/*': + schema: + type: array + items: + type: string + /spans: + get: + parameters: + - in: query + name: run + required: true + schema: + type: string + - in: query + name: worker + required: true + schema: + type: string + responses: + '200': + description: successful spans + content: + '*/*': + schema: + type: array + items: + type: string + /overview: + get: + parameters: + - in: query + name: run + required: true + schema: + type: string + - in: query + name: worker + required: true + schema: + type: string + - in: query + name: span + required: true + schema: + type: string + responses: + '200': + description: successful operation + content: + '*/*': + schema: + $ref: '#/components/schemas/Overview' + /operation: + get: + parameters: + - in: query + name: run + required: true + schema: + type: string + - in: query + name: worker + required: true + schema: + type: string + - in: query + name: span + required: true + schema: + type: string + - in: query + name: group_by + required: true + schema: + type: string + enum: + - Operation + - OperationAndInputShape + description: Group By + responses: + '200': + description: successful operation + content: + '*/*': + schema: + $ref: '#/components/schemas/OperatorGraph' + /operation/table: + get: + parameters: + - in: query + name: run + required: true + schema: + type: string + - in: query + name: worker + required: true + schema: + type: string + - in: query + name: span + required: true + schema: + type: string + - in: query + name: group_by + required: true + schema: + type: string + enum: + - Operation + - OperationAndInputShape + description: Group By + responses: + '200': + description: successful operation + content: + '*/*': + schema: + $ref: '#/components/schemas/OperationTableData' + /operation/stack: + get: + parameters: + - in: query + name: run + required: true + schema: + type: string + - in: query + name: worker + required: true + schema: + type: string + - in: query + name: span + required: true + schema: + type: string + - in: query + name: group_by + required: true + schema: + type: string + enum: + - Operation + - OperationAndInputShape + description: Group By + - in: query + name: op_name + required: true + schema: + type: string + - in: query + name: input_shape + schema: + type: string + responses: + '200': + description: successful operation + content: + '*/*': + schema: + $ref: '#/components/schemas/CallStackTableData' + /distributed/overlap: + get: + parameters: + - in: query + name: run + required: true + schema: + type: string + - in: query + name: worker + required: true + schema: + type: string + - in: query + name: span + required: true + schema: + type: string + responses: + '200': + description: successful operation + content: + '*/*': + schema: + $ref: '#/components/schemas/DistributedGraph' + /distributed/waittime: + get: + parameters: + - in: query + name: run + required: true + schema: + type: string + - in: query + name: worker + required: true + schema: + type: string + - in: query + name: span + required: true + schema: + type: string + responses: + '200': + description: successful operation + content: + '*/*': + schema: + $ref: '#/components/schemas/DistributedGraph' + /distributed/commops: + get: + parameters: + - in: query + name: run + required: true + schema: + type: string + - in: query + name: worker + required: true + schema: + type: string + - in: query + name: span + required: true + schema: + type: string + responses: + '200': + description: successful operation + content: + '*/*': + schema: + type: object + required: + - metadata + - data + properties: + metadata: + type: object + required: + - title + properties: + title: + type: string + data: + type: object + /distributed/gpuinfo: + get: + parameters: + - in: query + name: run + required: true + schema: + type: string + - in: query + name: worker + required: true + schema: + type: string + - in: query + name: span + required: true + schema: + type: string + responses: + '200': + description: successful operation + content: + '*/*': + schema: + $ref: '#/components/schemas/GpuInfo' + /memory: + get: + parameters: + - in: query + name: run + required: true + schema: + type: string + - in: query + name: worker + required: true + schema: + type: string + - in: query + name: span + required: true + schema: + type: string + responses: + '200': + description: successful operation + content: + '*/*': + schema: + $ref: '#/components/schemas/MemoryData' + /kernel: + get: + parameters: + - in: query + name: run + required: true + schema: + type: string + - in: query + name: worker + required: true + schema: + type: string + - in: query + name: span + required: true + schema: + type: string + - in: query + name: group_by + required: true + schema: + type: string + enum: + - Kernel + - KernelNameAndOpName + description: Group By + responses: + '200': + description: successful operation + content: + '*/*': + schema: + $ref: '#/components/schemas/KernelGraph' + /kernel/table: + get: + parameters: + - in: query + name: run + required: true + schema: + type: string + - in: query + name: worker + required: true + schema: + type: string + - in: query + name: span + required: true + schema: + type: string + - in: query + name: group_by + required: false + schema: + type: string + enum: + - Kernel + - KernelNameAndOpName + description: Group By + responses: + '200': + description: successful kernel + content: + '*/*': + schema: + $ref: '#/components/schemas/TableData' + /trace: + get: + parameters: + - in: query + name: run + required: true + schema: + type: string + - in: query + name: worker + required: true + schema: + type: string + - in: query + name: span + required: true + schema: + type: string + responses: + '200': + description: successful trace data + content: + '*/*': + schema: + type: object +components: + schemas: + Performance: + type: object + required: + - name + properties: + name: + type: string + description: + type: string + value: + type: string + extra: + type: string + children: + type: array + items: + $ref: '#/components/schemas/Performance' + Environment: + type: object + required: + - title + - value + properties: + title: + type: string + value: + type: string + GraphColumn: + type: object + required: + - type + - name + properties: + type: + type: string + name: + type: string + role: + type: string + p: + type: object + properties: + html: + type: boolean + ValueAndFormat: + type: object + required: + - v + - f + properties: + v: + oneOf: + - type: string + - type: number + - type: boolean + f: + type: string + Graph: + type: object + required: + - columns + - rows + properties: + title: + type: string + columns: + type: array + items: + $ref: '#/components/schemas/GraphColumn' + rows: + type: array + items: + type: array + items: + oneOf: + - type: string + - type: number + - type: boolean + - $ref: '#/components/schemas/ValueAndFormat' + Overview: + type: object + required: + - performance + - environments + - steps + - recommendations + properties: + performance: + type: array + items: + $ref: '#/components/schemas/Performance' + environments: + type: array + items: + $ref: '#/components/schemas/Environment' + steps: + $ref: '#/components/schemas/Graph' + recommendations: + type: string + gpu_metrics: + $ref: '#/components/schemas/GpuMetrics' + OperatorGraph: + type: object + required: + - device_total_timeA + - device_self_time + - host_total_time + - host_self_time + properties: + device_total_time: + $ref: '#/components/schemas/Graph' + device_self_time: + $ref: '#/components/schemas/Graph' + host_total_time: + $ref: '#/components/schemas/Graph' + host_self_time: + $ref: '#/components/schemas/Graph' + TableData: + type: object + required: + - data + properties: + data: + $ref: '#/components/schemas/Graph' + KernelGraph: + type: object + required: + - total + properties: + total: + $ref: '#/components/schemas/Graph' + OperationTableData: + type: array + items: + type: object + required: + - name + - calls + - host_self_duration + - host_total_duration + - has_call_stack + properties: + name: + type: string + input_shape: + type: string + calls: + type: number + device_self_duration: + type: number + device_total_duration: + type: number + host_self_duration: + type: number + host_total_duration: + type: number + has_call_stack: + type: boolean + CallStackTableData: + type: array + items: + type: object + required: + - name + - calls + - host_self_duration + - host_total_duration + properties: + name: + type: string + input_shape: + type: string + calls: + type: number + device_self_duration: + type: number + device_total_duration: + type: number + host_self_duration: + type: number + host_total_duration: + type: number + call_stack: + type: string + DistributedGraph: + type: object + required: + - metadata + - data + properties: + metadata: + type: object + required: + - title + - legends + - units + properties: + title: + type: string + legends: + type: array + items: + type: string + units: + type: string + data: + type: object + GpuInfo: + type: object + required: + - metadata + - data + properties: + metadata: + type: object + required: + - title + properties: + title: + type: string + data: + type: object + GpuMetrics: + type: object + required: + - data + - tooltip + properties: + data: + type: array + items: + $ref: '#/components/schemas/GpuMetric' + tooltip: + type: string + GpuMetric: + type: object + required: + - title + - value + properties: + title: + type: string + value: + type: string + MemoryData: + type: object + required: + - metadata + - data + properties: + metadata: + $ref: '#/components/schemas/MemoryTableMetadata' + data: + type: object + MemoryTableMetadata: + type: object + required: + - title + - default_device + - search + - sort + - value + properties: + title: + type: string + default_device: + type: string + search: + type: string + sort: + type: string diff --git a/tb_plugin/fe/src/apifix.d.ts b/tb_plugin/fe/src/apifix.d.ts index f641db896..949c82488 100644 --- a/tb_plugin/fe/src/apifix.d.ts +++ b/tb_plugin/fe/src/apifix.d.ts @@ -2,7 +2,7 @@ * Copyright (c) Microsoft Corporation. All rights reserved. *--------------------------------------------------------------------------------------------*/ -import { OperatorGraph } from './api/generated/api' +import { OperatorGraph, OperationTableDataInner } from './api/generated/api' declare module './api/generated/api' { export interface OperatorGraph { @@ -11,4 +11,30 @@ declare module './api/generated/api' { host_total_time: OperatorGraph['hostTotalTime'] host_self_time: OperatorGraph['hostSelfTime'] } + + export interface OperationTableDataInner { + input_shape: OperationTableDataInner['inputShape'] + device_self_duration: OperationTableDataInner['deviceSelfDuration'] + device_total_duration: OperationTableDataInner['deviceTotalDuration'] + host_self_duration: OperationTableDataInner['hostSelfDuration'] + host_total_duration: OperationTableDataInner['hostTotalDuration'] + has_call_stack: OperationTableDataInner['hasCallStack'] + } + + export interface CallStackTableDataInner { + input_shape: CallStackTableDataInner['inputShape'] + device_self_duration: CallStackTableDataInner['deviceSelfDuration'] + device_total_duration: CallStackTableDataInner['deviceTotalDuration'] + host_self_duration: CallStackTableDataInner['hostSelfDuration'] + host_total_duration: CallStackTableDataInner['hostTotalDuration'] + call_stack: CallStackTableDataInner['callStack'] + } + + export interface Overview { + gpu_metrics: Overview['gpuMetrics'] + } + + export interface MemoryTableMetadata { + default_device: MemoryTableMetadata['defaultDevice'] + } } diff --git a/tb_plugin/fe/src/app.tsx b/tb_plugin/fe/src/app.tsx index 52bd8ed77..2028138db 100644 --- a/tb_plugin/fe/src/app.tsx +++ b/tb_plugin/fe/src/app.tsx @@ -2,43 +2,52 @@ * Copyright (c) Microsoft Corporation. All rights reserved. *--------------------------------------------------------------------------------------------*/ +import ClickAwayListener from '@material-ui/core/ClickAwayListener' import CssBaseline from '@material-ui/core/CssBaseline' +import Divider from '@material-ui/core/Divider' import Drawer from '@material-ui/core/Drawer' +import Fab from '@material-ui/core/Fab' import FormControl from '@material-ui/core/FormControl' import IconButton from '@material-ui/core/IconButton' import ListSubheader from '@material-ui/core/ListSubheader' -import { makeStyles } from '@material-ui/core/styles' import MenuItem from '@material-ui/core/MenuItem' import Select, { SelectProps } from '@material-ui/core/Select' -import { Overview } from './components/Overview' -import Divider from '@material-ui/core/Divider' -import Fab from '@material-ui/core/Fab' -import ClickAwayListener from '@material-ui/core/ClickAwayListener'; -import * as React from 'react' +import { makeStyles } from '@material-ui/core/styles' +import ChevronLeftIcon from '@material-ui/icons/ChevronLeft' +import ChevronRightIcon from '@material-ui/icons/ChevronRight' +import 'antd/es/button/style/css' +import 'antd/es/list/style/css' +import 'antd/es/table/style/css' import clsx from 'clsx' -import { Operator } from './components/Operator' -import { Kernel } from './components/Kernel' +import * as React from 'react' import * as api from './api' -import { firstOrUndefined } from './utils' +import { DistributedView } from './components/DistributedView' +import { FullCircularProgress } from './components/FullCircularProgress' +import { Kernel } from './components/Kernel' +import { MemoryView } from './components/MemoryView' +import { Operator } from './components/Operator' +import { Overview } from './components/Overview' +import { TraceView } from './components/TraceView' import { setup } from './setup' import './styles.css' -import { TraceView } from './components/TraceView' -import { FullCircularProgress } from './components/FullCircularProgress' -import ChevronRightIcon from '@material-ui/icons/ChevronRight' -import ChevronLeftIcon from '@material-ui/icons/ChevronLeft' +import { firstOrUndefined, sleep } from './utils' export enum Views { Overview = 'Overview', Operator = 'Operator', Kernel = 'Kernel', - Trace = 'Trace' + Trace = 'Trace', + Distributed = 'Distributed', + Memory = 'Memory' } const ViewNames = { [Views.Overview]: Views.Overview, [Views.Operator]: Views.Operator, [Views.Kernel]: 'GPU Kernel', - [Views.Trace]: Views.Trace + [Views.Trace]: Views.Trace, + [Views.Distributed]: Views.Distributed, + [Views.Memory]: Views.Memory } const drawerWidth = 340 @@ -125,6 +134,9 @@ export const App = () => { const [workers, setWorkers] = React.useState([]) const [worker, setWorker] = React.useState('') + const [spans, setSpans] = React.useState([]) + const [span, setSpan] = React.useState('') + const [views, setViews] = React.useState([]) const [view, setView] = React.useState('') const [loaded, setLoaded] = React.useState(false) @@ -136,55 +148,87 @@ export const App = () => { }) }, []) + const continuouslyFetchRuns = async () => { + while (true) { + try { + const runs = await api.defaultApi.runsGet() + setRuns(runs) + } catch (e) { + console.info('Cannot fetch runs: ', e) + } + await sleep(5000) + } + } + React.useEffect(() => { - api.defaultApi.runsGet().then((runs) => { - setRuns(runs) - }) + continuouslyFetchRuns() }, []) React.useEffect(() => { - setRun(firstOrUndefined(runs) ?? '') + if (!run || !runs.includes(run)) { + setRun(firstOrUndefined(runs) ?? '') + } }, [runs]) React.useEffect(() => { if (run) { - api.defaultApi.workersGet(run).then((workers) => { - setWorkers(workers) + api.defaultApi.viewsGet(run).then((rawViews) => { + const views = rawViews + .map((v) => Views[Views[v as Views]]) + .filter(Boolean) + setViews(views) }) } }, [run]) + React.useEffect(() => { + setView(firstOrUndefined(views) ?? '') + }, [views]) + + React.useEffect(() => { + if (run && view) { + api.defaultApi.workersGet(run, view).then((workers) => { + setWorkers(workers) + }) + } + }, [run, view]) + React.useEffect(() => { setWorker(firstOrUndefined(workers) ?? '') }, [workers]) React.useEffect(() => { - if (run) { - api.defaultApi.viewsGet(run).then((rawViews) => { - const views = rawViews - .map((v) => Views[Views[v as Views]]) - .filter(Boolean) - setViews(views) + if (run && worker) { + api.defaultApi.spansGet(run, worker).then((spans) => { + setSpans(spans) }) } - }, [run]) + }, [run, worker]) React.useEffect(() => { - setView(firstOrUndefined(views) ?? '') - }, [views]) + setSpan(firstOrUndefined(spans) ?? '') + }, [spans]) const handleRunChange: SelectProps['onChange'] = (event) => { setRun(event.target.value as string) - setWorker('') setView('') + setWorker('') + setSpan('') + } + + const handleViewChange: SelectProps['onChange'] = (event) => { + setView(event.target.value as Views) + setWorker('') + setSpan('') } const handleWorkerChange: SelectProps['onChange'] = (event) => { setWorker(event.target.value as string) + setSpan('') } - const handleViewChange: SelectProps['onChange'] = (event) => { - setView(event.target.value as Views) + const handleSpanChange: SelectProps['onChange'] = (event) => { + setSpan(event.target.value as string) } const [open, setOpen] = React.useState(true) @@ -204,19 +248,53 @@ export const App = () => { } const renderContent = () => { - if (!loaded || !run || !worker || !view) { + if (!loaded || !run || !worker || !view || !span) { return } switch (view) { case Views.Overview: - return + return case Views.Operator: - return + return case Views.Kernel: - return + return case Views.Trace: - return + return ( + + ) + case Views.Distributed: + return + case Views.Memory: + return + } + } + + const spanComponent = () => { + const spanFragment = ( + + Spans + + + + + + + ) + + if (!spans || spans.length <= 1) { + return
{spanFragment}
+ } else { + return spanFragment } } @@ -257,26 +335,27 @@ export const App = () => { - Workers + Views - + {views.map((view) => ( + {ViewNames[view]} ))} - Views + Workers - + {workers.map((worker) => ( + {worker} ))} - + + {spanComponent()} {!open && ( ({ + root: { + flexGrow: 1 + }, + verticalInput: { + display: 'flex', + alignItems: 'center' + }, + inputWidth: { + width: '4em' + }, + inputWidthOverflow: { + minWidth: '15em', + whiteSpace: 'nowrap' + }, + description: { + marginLeft: theme.spacing(1) + } +})) + +export const DistributedView: React.FC = (props) => { + const tooltipCommonClasses = useTooltipCommonStyles() + const chartHeaderRenderer = React.useMemo( + () => makeChartHeaderRenderer(tooltipCommonClasses), + [tooltipCommonClasses] + ) + + let { run, worker, span } = props + const classes = useStyles() + + const [overlapGraph, setOverlapGraph] = React.useState< + DistributedGraph | undefined + >(undefined) + const [waittimeGraph, setWaittimeGraph] = React.useState< + DistributedGraph | undefined + >(undefined) + const [commopsTableData, setCommopsTableData] = React.useState< + any | undefined + >(undefined) + const [gpuInfo, setGpuInfo] = React.useState(undefined) + const [commopsTableTitle, setCommopsTableTitle] = React.useState('') + const [commopsWorkers, setCommopsWorkers] = React.useState([]) + const [overlapSteps, setOverlapSteps] = React.useState([]) + const [waittimeSteps, setWaittimeSteps] = React.useState([]) + const [overlapStep, setOverlapStep] = React.useState('') + const [waittimeStep, setWaittimeStep] = React.useState('') + const [commopsWorker, setCommopsWorker] = React.useState('') + + React.useEffect(() => { + if (waittimeSteps.includes('all')) { + setWaittimeStep('all') + } else { + setWaittimeStep(firstOrUndefined(waittimeSteps) ?? '') + } + }, [waittimeSteps]) + + React.useEffect(() => { + if (overlapSteps.includes('all')) { + setOverlapStep('all') + } else { + setOverlapStep(firstOrUndefined(overlapSteps) ?? '') + } + }, [overlapSteps]) + + React.useEffect(() => { + setCommopsWorker(firstOrUndefined(commopsWorkers) ?? '') + }, [commopsWorkers]) + + React.useEffect(() => { + api.defaultApi.distributedOverlapGet(run, 'All', span).then((resp) => { + setOverlapGraph(resp) + setOverlapSteps(Object.keys(resp.data)) + }) + api.defaultApi.distributedWaittimeGet(run, 'All', span).then((resp) => { + setWaittimeGraph(resp) + setWaittimeSteps(Object.keys(resp.data)) + }) + api.defaultApi.distributedCommopsGet(run, 'All', span).then((resp) => { + setCommopsTableData(resp.data) + setCommopsWorkers(Object.keys(resp.data)) + setCommopsTableTitle(resp.metadata.title) + }) + api.defaultApi.distributedGpuinfoGet(run, 'All', span).then((resp) => { + setGpuInfo(resp) + }) + }, [run, worker, span]) + + const onCommopsWorkerChanged: SelectProps['onChange'] = (event) => { + setCommopsWorker(event.target.value as string) + } + + const onOverlapStepChanged: SelectProps['onChange'] = (event) => { + setOverlapStep(event.target.value as string) + } + + const onWaittimeStepChanged: SelectProps['onChange'] = (event) => { + setWaittimeStep(event.target.value as string) + } + + const getColumnChartData = ( + distributedGraph?: DistributedGraph, + step?: string + ) => { + if (!distributedGraph || !step) return undefined + const barLabels = Object.keys(distributedGraph.data[step]) + return { + legends: distributedGraph.metadata.legends, + barLabels, + barHeights: barLabels.map((label) => distributedGraph.data[step][label]) + } + } + const overlapData = React.useMemo( + () => getColumnChartData(overlapGraph, overlapStep), + [overlapGraph, overlapStep] + ) + const waittimeData = React.useMemo( + () => getColumnChartData(waittimeGraph, waittimeStep), + [waittimeGraph, waittimeStep] + ) + + const getTableData = (tableData?: any, worker?: string) => { + if (!tableData || !worker) return undefined + return tableData[worker] as Graph + } + const commopsTable = getTableData(commopsTableData, commopsWorker) + + return ( +
+ + + + + {gpuInfo && ( + + + + + + + + + )} + + + {(chartData) => ( + + + + + Step + + + + + + + {overlapGraph?.metadata?.title && ( + + )} + + + )} + + + + + + {(chartData) => ( + + + + + Step + + + + + + + {waittimeGraph?.metadata?.title && ( + + )} + + + )} + + + + + + + + + + Worker + + + + + + + + + + {(graph) => } + + + + + + + +
+ ) +} diff --git a/tb_plugin/fe/src/components/FullCircularProgress.tsx b/tb_plugin/fe/src/components/FullCircularProgress.tsx index f0c75dc88..5212bd74b 100644 --- a/tb_plugin/fe/src/components/FullCircularProgress.tsx +++ b/tb_plugin/fe/src/components/FullCircularProgress.tsx @@ -1,10 +1,9 @@ /*--------------------------------------------------------------------------------------------- * Copyright (c) Microsoft Corporation. All rights reserved. *--------------------------------------------------------------------------------------------*/ - -import * as React from 'react' import CircularProgress from '@material-ui/core/CircularProgress' import { makeStyles } from '@material-ui/core/styles' +import * as React from 'react' const useStyles = makeStyles(() => ({ root: { diff --git a/tb_plugin/fe/src/components/GpuInfoTable.tsx b/tb_plugin/fe/src/components/GpuInfoTable.tsx new file mode 100644 index 000000000..4c624db05 --- /dev/null +++ b/tb_plugin/fe/src/components/GpuInfoTable.tsx @@ -0,0 +1,134 @@ +/*--------------------------------------------------------------------------------------------- + * Copyright (c) Microsoft Corporation. All rights reserved. + *--------------------------------------------------------------------------------------------*/ + +import { makeStyles } from '@material-ui/core/styles' +import * as React from 'react' + +export interface IProps { + gpuInfo: any +} + +const useStyles = makeStyles((theme) => ({ + root: { + border: '1px solid #E0E0E0', + borderCollapse: 'collapse', + width: '100%' + }, + td: { + borderTop: '1px solid #E0E0E0', + borderBottom: '1px solid #E0E0E0', + borderCollapse: 'collapse', + paddingLeft: 10, + paddingRight: 10 + }, + nodeTd: { + fontWeight: 'bold' + }, + pidTd: { + fontWeight: 'normal' + }, + gpuTd: { + fontWeight: 'normal' + }, + keyTd: { + fontWeight: 'normal', + textAlign: 'right' + }, + valueTd: { + fontWeight: 'bold' + } +})) + +interface TableCellInfo { + content: string + rowspan: number + cellType: 'node' | 'pid' | 'gpu' | 'key' | 'value' + last?: boolean +} + +function makeTableCellInfo(gpuInfo: any): TableCellInfo[][] { + const rows: TableCellInfo[][] = [] + let curr_row: TableCellInfo[] = [] + rows.push(curr_row) + Object.keys(gpuInfo.data).forEach(function (node_name) { + const node_cell = { + content: node_name, + rowspan: 0, + cellType: 'node' as const + } + const i = rows.length + curr_row.push(node_cell) + Object.keys(gpuInfo.data[node_name]).forEach(function (pid) { + const pid_cell = { content: pid, rowspan: 0, cellType: 'pid' as const } + const i = rows.length + curr_row.push(pid_cell) + Object.keys(gpuInfo.data[node_name][pid]).forEach(function (gpu) { + const gpu_cell = { content: gpu, rowspan: 0, cellType: 'gpu' as const } + const i = rows.length + curr_row.push(gpu_cell) + Object.keys(gpuInfo.data[node_name][pid][gpu]).forEach(function ( + key_name + ) { + curr_row.push({ + content: key_name, + rowspan: 1, + cellType: 'key' as const + }) + const value: string = gpuInfo.data[node_name][pid][gpu][key_name] + curr_row.push({ + content: value, + rowspan: 1, + cellType: 'value' as const + }) + curr_row = [] + rows.push(curr_row) + }) + gpu_cell.rowspan = rows.length - i + }) + pid_cell.rowspan = rows.length - i + }) + node_cell.rowspan = rows.length - i + }) + rows.pop() + return rows +} + +export const GpuInfoTable: React.FC = (props) => { + const classes = useStyles() + interface TableCellInfo { + content: string + rowspan: number + cellType: 'node' | 'pid' | 'gpu' | 'key' | 'value' + } + + const rows = React.useMemo(() => makeTableCellInfo(props.gpuInfo), [ + props.gpuInfo + ]) + + const cellToClass = { + node: classes.nodeTd, + pid: classes.pidTd, + gpu: classes.gpuTd, + key: classes.keyTd, + value: classes.valueTd + } + + const renderCell = function (info: TableCellInfo) { + let cellClass = cellToClass[info.cellType] + let content = info.cellType == 'key' ? info.content + ':' : info.content + return ( + + {content} + + ) + } + + return ( + + {rows.map((row) => ( + {row.map(renderCell)} + ))} +
+ ) +} diff --git a/tb_plugin/fe/src/components/Kernel.tsx b/tb_plugin/fe/src/components/Kernel.tsx index 127f70289..04d8759a5 100644 --- a/tb_plugin/fe/src/components/Kernel.tsx +++ b/tb_plugin/fe/src/components/Kernel.tsx @@ -3,40 +3,36 @@ *--------------------------------------------------------------------------------------------*/ import Card from '@material-ui/core/Card' -import Grid from '@material-ui/core/Grid' -import TextField, { - TextFieldProps, - StandardTextFieldProps -} from '@material-ui/core/TextField' -import CardHeader from '@material-ui/core/CardHeader' import CardContent from '@material-ui/core/CardContent' -import { makeStyles } from '@material-ui/core/styles' -import MenuItem from '@material-ui/core/MenuItem' +import CardHeader from '@material-ui/core/CardHeader' +import FormControlLabel from '@material-ui/core/FormControlLabel' +import Grid from '@material-ui/core/Grid' import InputLabel from '@material-ui/core/InputLabel' +import MenuItem from '@material-ui/core/MenuItem' +import Radio from '@material-ui/core/Radio' +import RadioGroup, { RadioGroupProps } from '@material-ui/core/RadioGroup' import Select, { SelectProps } from '@material-ui/core/Select' +import { makeStyles } from '@material-ui/core/styles' +import TextField, { + StandardTextFieldProps, + TextFieldProps +} from '@material-ui/core/TextField' import * as React from 'react' -import { PieChart } from './charts/PieChart' -import { TableChart } from './charts/TableChart' import * as api from '../api' import { Graph } from '../api' -import { DataLoading } from './DataLoading' -import { UseTop, useTopN } from '../utils/top' -import RadioGroup, { RadioGroupProps } from '@material-ui/core/RadioGroup' -import Radio from '@material-ui/core/Radio' -import FormControlLabel from '@material-ui/core/FormControlLabel' +import { KernelGroupBy } from '../constants/groupBy' import { useSearch } from '../utils/search' -import { useTooltipCommonStyles, makeChartHeaderRenderer } from './helpers' +import { topIsValid, UseTop, useTopN } from '../utils/top' +import { AntTableChart } from './charts/AntTableChart' +import { PieChart } from './charts/PieChart' +import { DataLoading } from './DataLoading' +import { makeChartHeaderRenderer, useTooltipCommonStyles } from './helpers' import { GPUKernelTotalTimeTooltip } from './TooltipDescriptions' export interface IProps { run: string worker: string - view: string -} - -enum GroupBy { - Kernel = 'Kernel', - KernelNameAndOpName = 'KernelNameAndOpName' + span: string } const useStyles = makeStyles((theme) => ({ @@ -60,7 +56,7 @@ const useStyles = makeStyles((theme) => ({ })) export const Kernel: React.FC = (props) => { - const { run, worker, view } = props + const { run, worker, span } = props const classes = useStyles() const tooltipCommonClasses = useTooltipCommonStyles() const chartHeaderRenderer = React.useMemo( @@ -74,12 +70,12 @@ export const Kernel: React.FC = (props) => { const [kernelTable, setKernelTable] = React.useState( undefined ) - const [groupBy, setGroupBy] = React.useState(GroupBy.Kernel) + const [groupBy, setGroupBy] = React.useState(KernelGroupBy.Kernel) const [searchKernelName, setSearchKernelName] = React.useState('') const [searchOpName, setSearchOpName] = React.useState('') const [sortColumn, setSortColumn] = React.useState(2) - const [top, actualTop, useTop, setTop, setUseTop] = useTopN({ + const [topText, actualTop, useTop, setTopText, setUseTop] = useTopN({ defaultUseTop: UseTop.Use, defaultTop: 10 }) @@ -90,21 +86,23 @@ export const Kernel: React.FC = (props) => { React.useEffect(() => { if (kernelGraph) { - setTop(Math.min(kernelGraph.rows?.length, 10)) + setTopText(String(Math.min(kernelGraph.rows?.length, 10))) } }, [kernelGraph]) React.useEffect(() => { - api.defaultApi.kernelTableGet(run, worker, view, groupBy).then((resp) => { + api.defaultApi.kernelTableGet(run, worker, span, groupBy).then((resp) => { setKernelTable(resp.data) }) - }, [run, worker, view, groupBy]) + }, [run, worker, span, groupBy]) React.useEffect(() => { - api.defaultApi.kernelGet(run, worker, view, GroupBy.Kernel).then((resp) => { - setKernelGraph(resp.total) - }) - }, [run, worker, view]) + api.defaultApi + .kernelGet(run, worker, span, KernelGroupBy.Kernel) + .then((resp) => { + setKernelGraph(resp.total) + }) + }, [run, worker, span]) const [searchedKernelTable] = useSearch(searchKernelName, 'name', kernelTable) const [searchedOpTable] = useSearch( @@ -114,8 +112,8 @@ export const Kernel: React.FC = (props) => { ) const onGroupByChanged: SelectProps['onChange'] = (event) => { - setGroupBy(event.target.value as GroupBy) - setSortColumn(event.target.value == GroupBy.Kernel ? 2 : 3) + setGroupBy(event.target.value as KernelGroupBy) + setSortColumn(event.target.value == KernelGroupBy.Kernel ? 2 : 3) } const onSearchKernelChanged: TextFieldProps['onChange'] = (event) => { @@ -131,7 +129,7 @@ export const Kernel: React.FC = (props) => { } const onTopChanged = (event: React.ChangeEvent) => { - setTop(Number(event.target.value)) + setTopText(event.target.value) } const inputProps: StandardTextFieldProps['inputProps'] = { @@ -170,8 +168,9 @@ export const Kernel: React.FC = (props) => { classes={{ root: classes.inputWidth }} inputProps={inputProps} type="number" - value={top} + value={topText} onChange={onTopChanged} + error={!topIsValid(topText)} /> )} @@ -190,55 +189,55 @@ export const Kernel: React.FC = (props) => { )} - - - - - - Group By - - + + + + + Group By + + + + + + - + {groupBy === KernelGroupBy.KernelNameAndOpName && ( - {groupBy === GroupBy.KernelNameAndOpName && ( - - - - )} - - - - - {(graph) => ( - - )} - + )} + + + {(graph) => ( + + )} + + diff --git a/tb_plugin/fe/src/components/MemoryView.tsx b/tb_plugin/fe/src/components/MemoryView.tsx new file mode 100644 index 000000000..58427a99d --- /dev/null +++ b/tb_plugin/fe/src/components/MemoryView.tsx @@ -0,0 +1,152 @@ +/*--------------------------------------------------------------------------------------------- + * Copyright (c) Microsoft Corporation. All rights reserved. + *--------------------------------------------------------------------------------------------*/ + +import Card from '@material-ui/core/Card' +import CardContent from '@material-ui/core/CardContent' +import CardHeader from '@material-ui/core/CardHeader' +import Grid from '@material-ui/core/Grid' +import InputLabel from '@material-ui/core/InputLabel' +import MenuItem from '@material-ui/core/MenuItem' +import Select, { SelectProps } from '@material-ui/core/Select' +import { makeStyles } from '@material-ui/core/styles' +import TextField, { TextFieldProps } from '@material-ui/core/TextField' +import * as React from 'react' +import * as api from '../api' +import { MemoryData } from '../api' +import { useSearchDirectly } from '../utils/search' +import { DataLoading } from './DataLoading' +import { MemoryTable } from './tables/MemoryTable' + +const useStyles = makeStyles((theme) => ({ + root: { + flexGrow: 1 + }, + verticalInput: { + display: 'flex', + alignItems: 'center' + }, + inputWidth: { + width: '4em' + }, + inputWidthOverflow: { + minWidth: '15em', + whiteSpace: 'nowrap' + }, + full: { + width: '100%' + }, + description: { + marginLeft: theme.spacing(1) + } +})) + +export interface IProps { + run: string + worker: string + span: string +} + +export const MemoryView: React.FC = (props) => { + const { run, worker, span } = props + const classes = useStyles() + + const [memoryData, setMemoryData] = React.useState( + undefined + ) + const [devices, setDevices] = React.useState([]) + const [device, setDevice] = React.useState('') + const [searchOperatorName, setSearchOperatorName] = React.useState('') + + const tableData = memoryData ? memoryData.data[device] : undefined + + const getSearchIndex = function () { + if (!tableData || !memoryData) { + return -1 + } + for (let i = 0; i < tableData.columns.length; i++) { + if (tableData.columns[i].name == memoryData.metadata.search) { + return i + } + } + return -1 + } + + const searchIndex = getSearchIndex() + const getName = React.useCallback((row: any) => row[searchIndex], [ + searchIndex + ]) + const [searchedTableDataRows] = useSearchDirectly( + searchOperatorName, + getName, + tableData?.rows + ) + + const onSearchOperatorChanged: TextFieldProps['onChange'] = (event) => { + setSearchOperatorName(event.target.value as string) + } + + React.useEffect(() => { + api.defaultApi.memoryGet(run, worker, span).then((resp) => { + setMemoryData(resp) + setDevices(Object.keys(resp.data)) + setDevice(resp.metadata.default_device) + }) + }, [run, worker, span]) + + const onDeviceChanged: SelectProps['onChange'] = (event) => { + setDevice(event.target.value as string) + } + + return ( +
+ + + + + + + + + Device + + + + + + + + + + {(data) => ( + + )} + + + + + + +
+ ) +} diff --git a/tb_plugin/fe/src/components/Operator.tsx b/tb_plugin/fe/src/components/Operator.tsx index 9124f1d87..5cf3b4bc8 100644 --- a/tb_plugin/fe/src/components/Operator.tsx +++ b/tb_plugin/fe/src/components/Operator.tsx @@ -3,38 +3,42 @@ *--------------------------------------------------------------------------------------------*/ import Card from '@material-ui/core/Card' +import CardContent from '@material-ui/core/CardContent' +import CardHeader from '@material-ui/core/CardHeader' +import FormControlLabel from '@material-ui/core/FormControlLabel' import Grid from '@material-ui/core/Grid' +import GridList from '@material-ui/core/GridList' +import GridListTile from '@material-ui/core/GridListTile' +import InputLabel from '@material-ui/core/InputLabel' +import MenuItem from '@material-ui/core/MenuItem' +import Radio from '@material-ui/core/Radio' +import RadioGroup, { RadioGroupProps } from '@material-ui/core/RadioGroup' +import Select, { SelectProps } from '@material-ui/core/Select' +import { makeStyles } from '@material-ui/core/styles' import TextField, { StandardTextFieldProps, TextFieldProps } from '@material-ui/core/TextField' -import CardHeader from '@material-ui/core/CardHeader' -import CardContent from '@material-ui/core/CardContent' -import { makeStyles } from '@material-ui/core/styles' -import MenuItem from '@material-ui/core/MenuItem' -import InputLabel from '@material-ui/core/InputLabel' -import GridList from '@material-ui/core/GridList' -import GridListTile from '@material-ui/core/GridListTile' -import Select, { SelectProps } from '@material-ui/core/Select' - import * as React from 'react' -import { PieChart } from './charts/PieChart' -import { TableChart } from './charts/TableChart' import * as api from '../api' -import { Graph, OperatorGraph } from '../api' +import { + OperationTableData, + OperationTableDataInner, + OperatorGraph +} from '../api' +import { OperationGroupBy } from '../constants/groupBy' +import { useSearchDirectly } from '../utils/search' +import { topIsValid, UseTop, useTopN } from '../utils/top' +import { PieChart } from './charts/PieChart' import { DataLoading } from './DataLoading' -import RadioGroup, { RadioGroupProps } from '@material-ui/core/RadioGroup' -import Radio from '@material-ui/core/Radio' -import FormControlLabel from '@material-ui/core/FormControlLabel' -import { UseTop, useTopN } from '../utils/top' -import { useSearch } from '../utils/search' +import { makeChartHeaderRenderer, useTooltipCommonStyles } from './helpers' +import { OperationTable } from './tables/OperationTable' import { DeviceSelfTimeTooltip, DeviceTotalTimeTooltip, HostSelfTimeTooltip, HostTotalTimeTooltip } from './TooltipDescriptions' -import { useTooltipCommonStyles, makeChartHeaderRenderer } from './helpers' const useStyles = makeStyles((theme) => ({ root: { @@ -62,16 +66,11 @@ const useStyles = makeStyles((theme) => ({ export interface IProps { run: string worker: string - view: string -} - -enum GroupBy { - Operation = 'Operation', - OperationAndInputShape = 'OperationAndInputShape' + span: string } export const Operator: React.FC = (props) => { - const { run, worker, view } = props + const { run, worker, span } = props const classes = useStyles() const tooltipCommonClasses = useTooltipCommonStyles() const chartHeaderRenderer = React.useMemo( @@ -82,16 +81,29 @@ export const Operator: React.FC = (props) => { const [operatorGraph, setOperatorGraph] = React.useState< OperatorGraph | undefined >(undefined) - const [operatorTable, setOperatorTable] = React.useState( - undefined - ) - const [groupBy, setGroupBy] = React.useState(GroupBy.Operation) + const [operatorTable, setOperatorTable] = React.useState< + OperationTableData | undefined + >(undefined) + const [groupBy, setGroupBy] = React.useState(OperationGroupBy.Operation) const [searchOperatorName, setSearchOperatorName] = React.useState('') - const [top, actualTop, useTop, setTop, setUseTop] = useTopN({ + const [topText, actualTop, useTop, setTopText, setUseTop] = useTopN({ defaultUseTop: UseTop.Use, defaultTop: 10 }) - const [sortColumn, setSortColumn] = React.useState(2) + + const getName = React.useCallback( + (row: OperationTableDataInner) => row.name, + [] + ) + const [searchedOperatorTable] = useSearchDirectly( + searchOperatorName, + getName, + operatorTable + ) + + const onSearchOperatorChanged: TextFieldProps['onChange'] = (event) => { + setSearchOperatorName(event.target.value as string) + } React.useEffect(() => { if (operatorGraph) { @@ -101,39 +113,28 @@ export const Operator: React.FC = (props) => { operatorGraph.host_self_time.rows?.length ?? 0, operatorGraph.host_total_time.rows?.length ?? 0 ] - setTop(Math.min(Math.max(...counts), 10)) + setTopText(String(Math.min(Math.max(...counts), 10))) } }, [operatorGraph]) React.useEffect(() => { api.defaultApi - .operationTableGet(run, worker, view, groupBy) + .operationTableGet(run, worker, span, groupBy) .then((resp) => { - setOperatorTable(resp.data) + setOperatorTable(resp) }) - }, [run, worker, view, groupBy]) + }, [run, worker, span, groupBy]) React.useEffect(() => { api.defaultApi - .operationGet(run, worker, view, GroupBy.Operation) + .operationGet(run, worker, span, OperationGroupBy.Operation) .then((resp) => { setOperatorGraph(resp) }) - }, [run, worker, view]) - - const [searchedOperatorTable] = useSearch( - searchOperatorName, - 'name', - operatorTable - ) - - const onSearchOperatorChanged: TextFieldProps['onChange'] = (event) => { - setSearchOperatorName(event.target.value as string) - } + }, [run, worker, span]) const onGroupByChanged: SelectProps['onChange'] = (event) => { - setGroupBy(event.target.value as GroupBy) - setSortColumn(event.target.value == GroupBy.Operation ? 2 : 3) + setGroupBy(event.target.value as OperationGroupBy) } const onUseTopChanged: RadioGroupProps['onChange'] = (event) => { @@ -141,7 +142,7 @@ export const Operator: React.FC = (props) => { } const onTopChanged = (event: React.ChangeEvent) => { - setTop(Number(event.target.value)) + setTopText(event.target.value) } const inputProps: StandardTextFieldProps['inputProps'] = { @@ -238,8 +239,9 @@ export const Operator: React.FC = (props) => { classes={{ root: classes.inputWidth }} inputProps={inputProps} type="number" - value={top} + value={topText} onChange={onTopChanged} + error={!topIsValid(topText)} />
)} @@ -257,10 +259,12 @@ export const Operator: React.FC = (props) => { value={groupBy} onChange={onGroupByChanged} > - + Operator + Input Shape - Operator + + Operator +
@@ -276,8 +280,14 @@ export const Operator: React.FC = (props) => { - {(graph) => ( - + {(table) => ( + )} diff --git a/tb_plugin/fe/src/components/Overview.tsx b/tb_plugin/fe/src/components/Overview.tsx index a5704f24f..c9d16bf95 100644 --- a/tb_plugin/fe/src/components/Overview.tsx +++ b/tb_plugin/fe/src/components/Overview.tsx @@ -2,24 +2,24 @@ * Copyright (c) Microsoft Corporation. All rights reserved. *--------------------------------------------------------------------------------------------*/ -import * as React from 'react' -import { makeStyles } from '@material-ui/core/styles' -import Grid from '@material-ui/core/Grid' import Card from '@material-ui/core/Card' -import CardHeader from '@material-ui/core/CardHeader' import CardContent from '@material-ui/core/CardContent' -import { TextListItem } from './TextListItem' +import CardHeader from '@material-ui/core/CardHeader' +import Grid from '@material-ui/core/Grid' +import { makeStyles } from '@material-ui/core/styles' +import * as React from 'react' import * as api from '../api' -import { DataLoading } from './DataLoading' +import { PieChart } from './charts/PieChart' import { SteppedAreaChart } from './charts/SteppedAreaChart' -import { - transformPerformanceIntoTable, - transformPerformanceIntoPie -} from './transform' import { TableChart } from './charts/TableChart' -import { PieChart } from './charts/PieChart' +import { DataLoading } from './DataLoading' +import { makeChartHeaderRenderer, useTooltipCommonStyles } from './helpers' +import { TextListItem } from './TextListItem' import { StepTimeBreakDownTooltip } from './TooltipDescriptions' -import { useTooltipCommonStyles, makeChartHeaderRenderer } from './helpers' +import { + transformPerformanceIntoPie, + transformPerformanceIntoTable +} from './transform' const topGraphHeight = 230 @@ -64,15 +64,18 @@ const highlightNoTopLevel = ( export interface IProps { run: string worker: string - view: string + span: string } export const Overview: React.FC = (props) => { - const { run, worker, view } = props + const { run, worker, span } = props const [steps, setSteps] = React.useState(undefined) const [performances, setPerformances] = React.useState([]) const [environments, setEnvironments] = React.useState([]) + const [gpuMetrics, setGpuMetrics] = React.useState< + api.GpuMetrics | undefined + >(undefined) const [recommendations, setRecommendations] = React.useState('') const synthesizedTableGraph = React.useMemo(() => { @@ -84,13 +87,15 @@ export const Overview: React.FC = (props) => { }, [performances]) React.useEffect(() => { - api.defaultApi.overviewGet(run, worker, view).then((resp) => { + api.defaultApi.overviewGet(run, worker, span).then((resp) => { setPerformances(resp.performance) setEnvironments(resp.environments) setSteps(resp.steps) setRecommendations(resp.recommendations) + setGpuMetrics(resp.gpu_metrics) + console.log(resp.gpu_metrics) }) - }, [run, worker, view]) + }, [run, worker, span]) const classes = useStyles() const tooltipCommonClasses = useTooltipCommonStyles() @@ -104,11 +109,15 @@ export const Overview: React.FC = (props) => { [tooltipCommonClasses, chartHeaderRenderer] ) + const cardSizes = gpuMetrics + ? ([2, 3, 7] as const) + : ([4, undefined, 8] as const) + return (
- + {React.useMemo( () => ( @@ -126,7 +135,28 @@ export const Overview: React.FC = (props) => { [environments] )} - + {gpuMetrics && ( + + + + + {gpuMetrics.data.map((metric) => ( + + ))} + + + + )} + diff --git a/tb_plugin/fe/src/components/TextListItem.tsx b/tb_plugin/fe/src/components/TextListItem.tsx index a4d74ec6b..c5e4eee52 100644 --- a/tb_plugin/fe/src/components/TextListItem.tsx +++ b/tb_plugin/fe/src/components/TextListItem.tsx @@ -2,10 +2,9 @@ * Copyright (c) Microsoft Corporation. All rights reserved. *--------------------------------------------------------------------------------------------*/ -import * as React from 'react' - import Grid from '@material-ui/core/Grid' import { makeStyles } from '@material-ui/core/styles' +import * as React from 'react' export interface IStylesProps { root?: string @@ -18,6 +17,7 @@ export interface IProps { description?: string extra?: string classes?: IStylesProps + dangerouslyAllowHtml?: boolean } const useStyles = makeStyles((theme) => ({ @@ -35,35 +35,53 @@ const useStyles = makeStyles((theme) => ({ export const TextListItem: React.FC = (props) => { const classes = useStyles() - const sizes = - props.value && props.extra - ? ([4, 4, 4] as const) - : props.value - ? ([8, 4, undefined] as const) - : ([12, undefined, undefined] as const) + const getSizes = function () { + if (props.value && props.extra) { + return [4, 4, 4] as const + } + if (props.value) { + if (props.value.length > props.name.length) { + return [4, 8, undefined] as const + } + return [8, 4, undefined] as const + } + return [12, undefined, undefined] as const + } + + const sizes = getSizes() + + const renderSpan = function (content: string, className?: string) { + if (props.dangerouslyAllowHtml) { + return ( + + ) + } + return {content} + } return ( - {props.name} + {renderSpan(props.name, props.classes?.name)} {props.description && ( - - {props.description} - + {renderSpan(props.description)} )} {props.value && ( - {props.value} + {renderSpan(props.value)} )} {props.extra && ( - {props.extra} + {renderSpan(props.extra)} )} diff --git a/tb_plugin/fe/src/components/TooltipDescriptions.ts b/tb_plugin/fe/src/components/TooltipDescriptions.ts index c99c86ff2..7724403db 100644 --- a/tb_plugin/fe/src/components/TooltipDescriptions.ts +++ b/tb_plugin/fe/src/components/TooltipDescriptions.ts @@ -1,3 +1,7 @@ +/*--------------------------------------------------------------------------------------------- + * Copyright (c) Microsoft Corporation. All rights reserved. + *--------------------------------------------------------------------------------------------*/ + export const StepTimeBreakDownTooltip = `The time spent on each step is broken down into multiple categories as follows: Kernel: Kernels execution time on GPU device; Memcpy: GPU involved memory copy time (either D2D, D2H or H2D); @@ -16,3 +20,11 @@ export const HostSelfTimeTooltip = `The accumulated time spent on Host, not incl export const HostTotalTimeTooltip = `The accumulated time spent on Host, including this operator’s child operators.` export const GPUKernelTotalTimeTooltip = `The accumulated time of all calls of this kernel.` + +export const DistributedGpuInfoTableTooltip = `Information about GPU hardware used during the run.` + +export const DistributedOverlapGraphTooltip = `The time spent on computation vs communication.` + +export const DistributedWaittimeGraphTooltip = `The time spent waiting vs communicating between devices.` + +export const DistributedCommopsTableTooltip = `Statistics for operations managing communications between nodes.` diff --git a/tb_plugin/fe/src/components/TraceView.tsx b/tb_plugin/fe/src/components/TraceView.tsx index 14b8536c7..8f1f36843 100644 --- a/tb_plugin/fe/src/components/TraceView.tsx +++ b/tb_plugin/fe/src/components/TraceView.tsx @@ -2,15 +2,15 @@ * Copyright (c) Microsoft Corporation. All rights reserved. *--------------------------------------------------------------------------------------------*/ +import ClickAwayListener from '@material-ui/core/ClickAwayListener' import { makeStyles } from '@material-ui/core/styles' import * as React from 'react' import * as api from '../api' -import ClickAwayListener from '@material-ui/core/ClickAwayListener'; export interface IProps { run: string worker: string - view: string + span: string iframeRef: React.RefObject } @@ -26,7 +26,7 @@ const useStyles = makeStyles(() => ({ })) export const TraceView: React.FC = (props) => { - const { run, worker, view, iframeRef } = props + const { run, worker, span, iframeRef } = props const classes = useStyles() const [traceData, setTraceData] = React.useState | null>(null) @@ -34,11 +34,11 @@ export const TraceView: React.FC = (props) => { React.useEffect(() => { setTraceData( - api.defaultApi.traceGet(run, worker, view).then((resp) => { + api.defaultApi.traceGet(run, worker, span).then((resp) => { return JSON.stringify(resp) }) ) - }, [run, worker, view]) + }, [run, worker, span]) React.useEffect(() => { function callback(event: MessageEvent) { @@ -75,7 +75,7 @@ export const TraceView: React.FC = (props) => { ), diff --git a/tb_plugin/fe/src/components/charts/AntTableChart.tsx b/tb_plugin/fe/src/components/charts/AntTableChart.tsx new file mode 100644 index 000000000..d2b32cda4 --- /dev/null +++ b/tb_plugin/fe/src/components/charts/AntTableChart.tsx @@ -0,0 +1,90 @@ +/*--------------------------------------------------------------------------------------------- + * Copyright (c) Microsoft Corporation. All rights reserved. + *--------------------------------------------------------------------------------------------*/ + +import { makeStyles } from '@material-ui/core/styles' +import { Table, TablePaginationConfig } from 'antd' +import * as React from 'react' +import { Graph } from '../../api' + +interface IProps { + graph: Graph + sortColumn?: number +} + +const useStyles = makeStyles((theme) => ({ + tooltip: { + whiteSpace: 'pre-wrap' + }, + row: { + wordBreak: 'break-word' + } +})) + +const getTableColumns = function ( + columns: any, + sort: string | undefined, + tooltipClass: string +) { + let i = 0 + return columns.map(function (col: any) { + const key = 'col' + i++ + const stringCompare = (a: any, b: any) => a[key].localeCompare(b[key]) + const numberCompare = (a: any, b: any) => (a[key] || 0) - (b[key] || 0) + return { + dataIndex: key, + key: key, + title: col.name, + sorter: col.type == 'string' ? stringCompare : numberCompare, + defaultSortOrder: sort == col.name ? ('descend' as const) : undefined, + showSorterTooltip: col.tooltip + ? { title: col.tooltip, overlayClassName: tooltipClass } + : true + } + }) +} + +const getTableRows = function (rows: any) { + return rows.map(function (row: any) { + let i = 0 + const res: any = {} + row.forEach(function (entry: any) { + res['col' + i++] = entry + }) + return res + }) +} + +export const AntTableChart: React.FC = (props) => { + const { graph, sortColumn } = props + const classes = useStyles(props) + const sort = + sortColumn === undefined ? undefined : graph.columns[sortColumn].name + + const rows = React.useMemo(() => getTableRows(graph.rows), [graph.rows]) + + const columns = React.useMemo( + () => getTableColumns(graph.columns, sort, classes.tooltip), + [graph.columns, sort, classes.tooltip] + ) + + const [pageSize, setPageSize] = React.useState(30) + const onShowSizeChange = (current: number, size: number) => { + setPageSize(size) + } + + return ( + + ) +} diff --git a/tb_plugin/fe/src/components/charts/AreaChart.tsx b/tb_plugin/fe/src/components/charts/AreaChart.tsx index d8712d793..6a0f5b484 100644 --- a/tb_plugin/fe/src/components/charts/AreaChart.tsx +++ b/tb_plugin/fe/src/components/charts/AreaChart.tsx @@ -5,6 +5,7 @@ import { makeStyles } from '@material-ui/core/styles' import * as React from 'react' import { Graph } from '../../api' +import { useResizeEventDependency } from '../../utils/resize' interface IProps { graph: Graph @@ -22,6 +23,7 @@ export const AreaChart: React.FC = (props) => { const { graph, height = 400, hAxisTitle } = props const classes = useStyles({ height }) const graphRef = React.useRef(null) + const [resizeEventDependency] = useResizeEventDependency() React.useLayoutEffect(() => { const element = graphRef.current @@ -58,7 +60,7 @@ export const AreaChart: React.FC = (props) => { return () => { chart.clearChart() } - }, [graph, height]) + }, [graph, height, resizeEventDependency]) return (
diff --git a/tb_plugin/fe/src/components/charts/ColumnChart.tsx b/tb_plugin/fe/src/components/charts/ColumnChart.tsx new file mode 100644 index 000000000..40d1d1b9b --- /dev/null +++ b/tb_plugin/fe/src/components/charts/ColumnChart.tsx @@ -0,0 +1,87 @@ +/*--------------------------------------------------------------------------------------------- + * Copyright (c) Microsoft Corporation. All rights reserved. + *--------------------------------------------------------------------------------------------*/ + +import { makeStyles } from '@material-ui/core/styles' +import * as React from 'react' +import { Graph } from '../../api' +import { useResizeEventDependency } from '../../utils/resize' + +interface IProps { + title?: string + units?: string + colors?: Array + chartData: ColumnChartData +} + +const useStyles = makeStyles(() => ({ + root: { + height: 500 + } +})) + +export interface ColumnChartData { + legends: Array + barLabels: Array + barHeights: Array> +} + +export const ColumnChart: React.FC = (props) => { + const { title, units, colors, chartData } = props + const { legends, barLabels, barHeights } = chartData + const classes = useStyles() + const graphRef = React.useRef(null) + const [resizeEventDependency] = useResizeEventDependency() + + React.useLayoutEffect(() => { + const element = graphRef.current + if (!element) return + + const data = new google.visualization.DataTable() + data.addColumn({ + type: 'string', + label: 'Worker' + }) + legends.forEach((label) => { + data.addColumn({ + type: 'number', + label + }) + }) + const rows = barHeights.map((heights, i) => + [barLabels[i] as string | number].concat(heights) + ) + data.addRows(rows) + + const options = { + height: 500, + title, + isStacked: true, + legend: { position: 'bottom' }, + vAxis: { + title: units + }, + tooltip: { isHtml: true }, + chartArea: { + left: '15%', + width: '80%', + top: title ? '10%' : '5%' + }, + colors + } + + const chart = new google.visualization.ColumnChart(element) + + chart.draw(data, options) + + return () => { + chart.clearChart() + } + }, [title, chartData, resizeEventDependency]) + + return ( +
+
+
+ ) +} diff --git a/tb_plugin/fe/src/components/charts/PieChart.tsx b/tb_plugin/fe/src/components/charts/PieChart.tsx index 71e0bbfb5..aa5706746 100644 --- a/tb_plugin/fe/src/components/charts/PieChart.tsx +++ b/tb_plugin/fe/src/components/charts/PieChart.tsx @@ -6,6 +6,7 @@ import { makeStyles } from '@material-ui/core/styles' import * as React from 'react' import { Graph } from '../../api' import { value } from '../../utils' +import { useResizeEventDependency } from '../../utils/resize' interface IProps { graph: Graph @@ -30,6 +31,8 @@ export const PieChart: React.FC = (props) => { const classes = useStyles(props) const graphRef = React.useRef(null) + const [resizeEventDependency] = useResizeEventDependency() + React.useLayoutEffect(() => { const element = graphRef.current if (!element) return @@ -65,15 +68,16 @@ export const PieChart: React.FC = (props) => { const chart = new google.visualization.PieChart(element) - google.visualization.events.addListener(chart, 'onmouseover', function ( - entry: any - ) { - chart.setSelection([{ row: entry.row }]) - }) + google.visualization.events.addListener( + chart, + 'onmouseover', + function (entry: any) { + chart.setSelection([{ row: entry.row }]) + } + ) - google.visualization.events.addListener(chart, 'onmouseout', function ( - ) { - chart.setSelection([]) + google.visualization.events.addListener(chart, 'onmouseout', function () { + chart.setSelection([]) }) chart.draw(data, options) @@ -81,7 +85,7 @@ export const PieChart: React.FC = (props) => { return () => { chart.clearChart() } - }, [graph, height, top]) + }, [graph, height, top, resizeEventDependency]) return (
diff --git a/tb_plugin/fe/src/components/charts/SteppedAreaChart.tsx b/tb_plugin/fe/src/components/charts/SteppedAreaChart.tsx index 6127381a5..6d2647878 100644 --- a/tb_plugin/fe/src/components/charts/SteppedAreaChart.tsx +++ b/tb_plugin/fe/src/components/charts/SteppedAreaChart.tsx @@ -5,6 +5,7 @@ import { makeStyles } from '@material-ui/core/styles' import * as React from 'react' import { Graph } from '../../api' +import { useResizeEventDependency } from '../../utils/resize' interface IProps { graph: Graph @@ -23,6 +24,7 @@ export const SteppedAreaChart: React.FC = (props) => { const { graph, height = 400, hAxisTitle, vAxisTitle } = props const classes = useStyles({ height }) const graphRef = React.useRef(null) + const [resizeEventDependency] = useResizeEventDependency() React.useLayoutEffect(() => { const element = graphRef.current @@ -63,7 +65,7 @@ export const SteppedAreaChart: React.FC = (props) => { return () => { chart.clearChart() } - }, [graph, height]) + }, [graph, height, resizeEventDependency]) return (
diff --git a/tb_plugin/fe/src/components/charts/TableChart.tsx b/tb_plugin/fe/src/components/charts/TableChart.tsx index 69273dc3a..267624c85 100644 --- a/tb_plugin/fe/src/components/charts/TableChart.tsx +++ b/tb_plugin/fe/src/components/charts/TableChart.tsx @@ -5,6 +5,7 @@ import { makeStyles } from '@material-ui/core/styles' import * as React from 'react' import { Graph } from '../../api' +import { useResizeEventDependency } from '../../utils/resize' interface IProps { graph: Graph @@ -28,6 +29,7 @@ export const TableChart: React.FC = (props) => { const { graph, sortColumn, setCellProperty, allowHtml } = props const classes = useStyles(props) const graphRef = React.useRef(null) + const [resizeEventDependency] = useResizeEventDependency() React.useLayoutEffect(() => { const element = graphRef.current @@ -67,8 +69,15 @@ export const TableChart: React.FC = (props) => { const chart = new google.visualization.Table(element) + /* `chart.draw()` removes the contents of `element` and rebuilds it. This can cause a jump in the scroll position + * if the height/width change to 0. Since we can't change the code of Google Charts, we temporarily lock the dims + * of the parent container. */ + if (element.offsetHeight > 0) { + element.parentElement!.style.height = element.offsetHeight + 'px' + } chart.draw(data, options) - }, [graph]) + element.parentElement!.style.height = '' + }, [graph, resizeEventDependency]) return (
diff --git a/tb_plugin/fe/src/components/helpers.tsx b/tb_plugin/fe/src/components/helpers.tsx index 395099980..b787a5e91 100644 --- a/tb_plugin/fe/src/components/helpers.tsx +++ b/tb_plugin/fe/src/components/helpers.tsx @@ -1,8 +1,12 @@ -import * as React from 'react' -import HelpOutline from '@material-ui/icons/HelpOutline' -import Tooltip from '@material-ui/core/Tooltip' +/*--------------------------------------------------------------------------------------------- + * Copyright (c) Microsoft Corporation. All rights reserved. + *--------------------------------------------------------------------------------------------*/ + import { makeStyles } from '@material-ui/core/styles' +import Tooltip from '@material-ui/core/Tooltip' +import HelpOutline from '@material-ui/icons/HelpOutline' import clsx from 'clsx' +import * as React from 'react' export const useTooltipCommonStyles = makeStyles((theme) => ({ tooltip: { diff --git a/tb_plugin/fe/src/components/tables/CallFrameList.tsx b/tb_plugin/fe/src/components/tables/CallFrameList.tsx new file mode 100644 index 000000000..1e2a385bb --- /dev/null +++ b/tb_plugin/fe/src/components/tables/CallFrameList.tsx @@ -0,0 +1,42 @@ +/*--------------------------------------------------------------------------------------------- + * Copyright (c) Microsoft Corporation. All rights reserved. + *--------------------------------------------------------------------------------------------*/ + +import * as React from 'react' +import { CallStackFrame } from './transform' +import { List } from 'antd' +import { NavToCodeButton } from './NavToCodeButton' +import { makeStyles } from '@material-ui/core/styles' + +interface IProps { + callFrames: CallStackFrame[] +} + +const useStyles = makeStyles(() => ({ + item: { + paddingTop: '1px !important', + paddingBottom: '1px !important' + } +})) + +export const CallFrameList = (props: IProps) => { + const classes = useStyles() + + const renderItem = React.useCallback( + (item: CallStackFrame) => ( + + + + ), + [classes.item] + ) + + return ( + + ) +} diff --git a/tb_plugin/fe/src/components/tables/CallStackTable.tsx b/tb_plugin/fe/src/components/tables/CallStackTable.tsx new file mode 100644 index 000000000..a1373c231 --- /dev/null +++ b/tb_plugin/fe/src/components/tables/CallStackTable.tsx @@ -0,0 +1,83 @@ +/*--------------------------------------------------------------------------------------------- + * Copyright (c) Microsoft Corporation. All rights reserved. + *--------------------------------------------------------------------------------------------*/ + +import * as React from 'react' +import { CallStackTableData, OperationTableDataInner } from '../../api' +import { Table, TableProps } from 'antd' + +import * as api from '../../api' +import { transformTableData, TransformedCallStackDataInner } from './transform' +import { attachId, getCommonOperationColumns } from './common' +import { OperationGroupBy } from '../../constants/groupBy' +import { makeExpandIcon } from './ExpandIcon' +import { CallFrameList } from './CallFrameList' + +export interface IProps { + data: OperationTableDataInner + run: string + worker: string + span: string + groupBy: OperationGroupBy +} + +const expandIcon = makeExpandIcon( + 'View call frames', + (record) => !record.callStackFrames.length +) + +const rowExpandable = (record: TransformedCallStackDataInner) => + !!record.callStackFrames.length +const expandedRowRender = (record: TransformedCallStackDataInner) => ( + +) + +export const CallStackTable = (props: IProps) => { + const { data, run, worker, span, groupBy } = props + const { name, input_shape } = data + + const [stackData, setStackData] = React.useState< + CallStackTableData | undefined + >(undefined) + + React.useEffect(() => { + api.defaultApi + .operationStackGet(run, worker, span, groupBy, name, input_shape) + .then((resp) => { + setStackData(resp) + }) + }, [name, input_shape, run, worker, span, groupBy]) + + const transformedData = React.useMemo( + () => stackData && transformTableData(attachId(stackData)), + [stackData] + ) + + const columns = React.useMemo( + () => transformedData && getCommonOperationColumns(transformedData), + [transformedData] + ) + + const expandIconColumnIndex = columns?.length + + const expandable: TableProps['expandable'] = React.useMemo( + () => ({ + expandIconColumnIndex, + expandIcon, + expandedRowRender, + rowExpandable + }), + [expandIconColumnIndex] + ) + + return ( +
+ ) +} diff --git a/tb_plugin/fe/src/components/tables/ExpandIcon.tsx b/tb_plugin/fe/src/components/tables/ExpandIcon.tsx new file mode 100644 index 000000000..68ff48282 --- /dev/null +++ b/tb_plugin/fe/src/components/tables/ExpandIcon.tsx @@ -0,0 +1,34 @@ +/*--------------------------------------------------------------------------------------------- + * Copyright (c) Microsoft Corporation. All rights reserved. + *--------------------------------------------------------------------------------------------*/ + +import * as React from 'react' +import { Button, TableProps } from 'antd' +import { OperationTableDataInner, CallStackTableDataInner } from '../../api' +import { Arguments } from '../../utils/type' + +type Types = NonNullable['expandable']>['expandIcon'] +type BasePropType = Arguments>>[0] +type PropType = BasePropType & { text: string; disabled?: boolean } + +export function ExpandIcon< + T extends OperationTableDataInner | CallStackTableDataInner +>(props: PropType) { + const onClick = (e: React.MouseEvent) => { + props.onExpand(props.record, e) + } + + return ( + + ) +} + +export function makeExpandIcon< + T extends OperationTableDataInner | CallStackTableDataInner +>(text: string, disabled?: (v: T) => boolean) { + return (props: BasePropType) => ( + + ) +} diff --git a/tb_plugin/fe/src/components/tables/MemoryTable.tsx b/tb_plugin/fe/src/components/tables/MemoryTable.tsx new file mode 100644 index 000000000..340f249fb --- /dev/null +++ b/tb_plugin/fe/src/components/tables/MemoryTable.tsx @@ -0,0 +1,92 @@ +/*--------------------------------------------------------------------------------------------- + * Copyright (c) Microsoft Corporation. All rights reserved. + *--------------------------------------------------------------------------------------------*/ + +import * as React from 'react' +import { + MemoryData, + OperationTableData, + OperationTableDataInner +} from '../../api' +import { OperationGroupBy } from '../../constants/groupBy' +import { attachId, getCommonOperationColumns } from './common' +import { Table, TablePaginationConfig, TableProps } from 'antd' +import { makeExpandIcon } from './ExpandIcon' +import { CallStackTable } from './CallStackTable' +import { makeStyles } from '@material-ui/core' + +export interface IProps { + data: any + sort: string +} + +const useStyles = makeStyles((theme) => ({ + tooltip: { + whiteSpace: 'pre-wrap' + } +})) + +const getMemoryTableColumns = function ( + columns: any, + sort: string, + tooltipClass: string +) { + let i = 0 + return columns.map(function (col: any) { + const key = 'col' + i++ + const stringCompare = (a: any, b: any) => a[key].localeCompare(b[key]) + const numberCompare = (a: any, b: any) => (a[key] || 0) - (b[key] || 0) + return { + dataIndex: key, + key: key, + title: col.name, + sorter: col.type == 'string' ? stringCompare : numberCompare, + defaultSortOrder: sort == col.name ? ('descend' as const) : undefined, + showSorterTooltip: col.tooltip + ? { title: col.tooltip, overlayClassName: tooltipClass } + : true + } + }) +} + +const getMemoryTableRows = function (rows: any) { + return rows.map(function (row: any) { + let i = 0 + const res: any = {} + row.forEach(function (entry: any) { + res['col' + i++] = entry + }) + return res + }) +} + +export const MemoryTable = (props: IProps) => { + const { data, sort } = props + const classes = useStyles() + + const rows = React.useMemo(() => getMemoryTableRows(data.rows), [data.rows]) + + const columns = React.useMemo( + () => getMemoryTableColumns(data.columns, sort, classes.tooltip), + [data.columns, sort, classes.tooltip] + ) + + const [pageSize, setPageSize] = React.useState(30) + const onShowSizeChange = (current: number, size: number) => { + setPageSize(size) + } + + return ( +
+ ) +} diff --git a/tb_plugin/fe/src/components/tables/NavToCodeButton.tsx b/tb_plugin/fe/src/components/tables/NavToCodeButton.tsx new file mode 100644 index 000000000..fb40e7f38 --- /dev/null +++ b/tb_plugin/fe/src/components/tables/NavToCodeButton.tsx @@ -0,0 +1,29 @@ +/*--------------------------------------------------------------------------------------------- + * Copyright (c) Microsoft Corporation. All rights reserved. + *--------------------------------------------------------------------------------------------*/ + +import * as React from 'react' +import { CallStackFrame } from './transform' +import { Button } from 'antd' +import { navToCode } from '../../utils/vscode' + +interface IProps { + frame: CallStackFrame +} + +export const NavToCodeButton = (props: IProps) => { + const { raw, line, file } = props.frame + const couldNavToFile = line && file + + const onClick = () => { + if (line && file) { + navToCode(file, line - 1) + } + } + + return ( + + ) +} diff --git a/tb_plugin/fe/src/components/tables/OperationTable.tsx b/tb_plugin/fe/src/components/tables/OperationTable.tsx new file mode 100644 index 000000000..04b418542 --- /dev/null +++ b/tb_plugin/fe/src/components/tables/OperationTable.tsx @@ -0,0 +1,75 @@ +/*--------------------------------------------------------------------------------------------- + * Copyright (c) Microsoft Corporation. All rights reserved. + *--------------------------------------------------------------------------------------------*/ + +import * as React from 'react' +import { OperationTableData, OperationTableDataInner } from '../../api' +import { OperationGroupBy } from '../../constants/groupBy' +import { attachId, getCommonOperationColumns } from './common' +import { Table, TablePaginationConfig, TableProps } from 'antd' +import { makeExpandIcon } from './ExpandIcon' +import { CallStackTable } from './CallStackTable' + +export interface IProps { + data: OperationTableData + run: string + worker: string + span: string + groupBy: OperationGroupBy +} +const rowExpandable = (record: OperationTableDataInner) => record.has_call_stack +const expandIcon = makeExpandIcon( + 'View CallStack', + (record) => !record.has_call_stack +) +export const OperationTable = (props: IProps) => { + const { data, run, worker, span, groupBy } = props + + const rows = React.useMemo(() => attachId(data), [data]) + + const columns = React.useMemo(() => getCommonOperationColumns(rows), [rows]) + + const [pageSize, setPageSize] = React.useState(30) + const onShowSizeChange = (current: number, size: number) => { + setPageSize(size) + } + + const expandIconColumnIndex = columns.length + const expandedRowRender = React.useCallback( + (record: OperationTableDataInner) => ( + + ), + [run, worker, span, groupBy] + ) + + const expandable: TableProps['expandable'] = React.useMemo( + () => ({ + expandIconColumnIndex, + expandIcon, + expandedRowRender, + rowExpandable + }), + [expandIconColumnIndex, expandedRowRender] + ) + + return ( +
+ ) +} diff --git a/tb_plugin/fe/src/components/tables/common.tsx b/tb_plugin/fe/src/components/tables/common.tsx new file mode 100644 index 000000000..d589abde5 --- /dev/null +++ b/tb_plugin/fe/src/components/tables/common.tsx @@ -0,0 +1,91 @@ +/*--------------------------------------------------------------------------------------------- + * Copyright (c) Microsoft Corporation. All rights reserved. + *--------------------------------------------------------------------------------------------*/ + +import { firstOrUndefined, isDef } from '../../utils/def' +import { CallStackTableDataInner, OperationTableDataInner } from '../../api' +import type { ColumnsType } from 'antd/es/table' + +export function getCommonOperationColumns< + T extends OperationTableDataInner | CallStackTableDataInner +>(data: T[] | undefined): ColumnsType { + const firstData = firstOrUndefined(data) + + const hasInputShape = !firstData || isDef(firstData.input_shape) + const hasDeviceSelfDuration = + !firstData || isDef(firstData.device_self_duration) + const hasDeviceTotalDuration = + !firstData || isDef(firstData.device_total_duration) + + const nameCompare = (a: T, b: T) => a.name.localeCompare(b.name) + const callsCompare = (a: T, b: T) => a.calls - b.calls + const deviceSelfDurationCompare = (a: T, b: T) => + (a.device_self_duration || 0) - (b.device_self_duration || 0) + const deviceTotalDurationCompare = (a: T, b: T) => + (a.device_total_duration || 0) - (b.device_total_duration || 0) + const hostSelfDurationCompare = (a: T, b: T) => + (a.host_self_duration || 0) - (b.host_self_duration || 0) + const hostTotalDurationCompare = (a: T, b: T) => + (a.host_total_duration || 0) - (b.host_total_duration || 0) + + return [ + { + dataIndex: 'name', + key: 'name', + title: 'Name', + sorter: nameCompare + }, + hasInputShape + ? { + dataIndex: 'input_shape', + key: 'input_shape', + title: 'Input Shape' + } + : undefined, + { + dataIndex: 'calls', + sorter: callsCompare, + key: 'calls', + title: 'Calls' + }, + hasDeviceSelfDuration + ? { + dataIndex: 'device_self_duration', + key: 'device_self_duration', + title: 'Device Self Duration (us)', + sorter: deviceSelfDurationCompare, + defaultSortOrder: 'descend' as const + } + : undefined, + hasDeviceTotalDuration + ? { + dataIndex: 'device_total_duration', + key: 'device_total_duration', + title: 'Device Total Duration (us)', + sorter: deviceTotalDurationCompare + } + : undefined, + { + dataIndex: 'host_self_duration', + key: 'host_self_duration', + title: 'Host Self Duration (us)', + sorter: hostSelfDurationCompare + }, + { + dataIndex: 'host_total_duration', + key: 'host_total_duration', + title: 'Host Total Duration (us)', + sorter: hostTotalDurationCompare + } + ].filter(isDef) +} + +let uid = 1 +export function attachId< + T extends CallStackTableDataInner | OperationTableDataInner +>(data: T[]): T[] { + return data.map((d) => ({ + ...d, + key: uid++ + })) +} diff --git a/tb_plugin/fe/src/components/tables/transform.ts b/tb_plugin/fe/src/components/tables/transform.ts new file mode 100644 index 000000000..bd051fd42 --- /dev/null +++ b/tb_plugin/fe/src/components/tables/transform.ts @@ -0,0 +1,63 @@ +/*--------------------------------------------------------------------------------------------- + * Copyright (c) Microsoft Corporation. All rights reserved. + *--------------------------------------------------------------------------------------------*/ + +import { CallStackTableData, CallStackTableDataInner } from '../../api' + +export interface CallStackFrame { + file?: string + line?: number + raw: string +} + +export interface TransformedCallStackDataInner extends CallStackTableDataInner { + callStackFrames: CallStackFrame[] +} + +const lineRegex = /\([0-9]+\)$/ + +function parseCallStackLine(raw: string): CallStackFrame { + raw = raw.trim() + const results = raw.split(':') + const location = results.slice(0, results.length - 1).join(':') + + const result = lineRegex.exec(location) + if (!result) { + return { raw } + } + + const lineWithParens = result[0].trim() + const file = raw.slice(0, result.index).trim() + const line = Number( + lineWithParens.substr(1, lineWithParens.length - 2).trim() + ) + + return { + raw, + file, + line + } +} + +function parseCallStack(callStack: string | undefined): CallStackFrame[] { + const lines = (callStack ?? '') + .trim() + .split(';') + .map((x) => x.trim()) + return lines.map(parseCallStackLine) +} + +function transformCallStackData( + data: CallStackTableDataInner +): TransformedCallStackDataInner { + return { + ...data, + callStackFrames: parseCallStack(data.call_stack) + } +} + +export function transformTableData( + data: CallStackTableData +): TransformedCallStackDataInner[] { + return data.map(transformCallStackData) +} diff --git a/tb_plugin/fe/src/constants/groupBy.ts b/tb_plugin/fe/src/constants/groupBy.ts new file mode 100644 index 000000000..2b96c6b8d --- /dev/null +++ b/tb_plugin/fe/src/constants/groupBy.ts @@ -0,0 +1,13 @@ +/*--------------------------------------------------------------------------------------------- + * Copyright (c) Microsoft Corporation. All rights reserved. + *--------------------------------------------------------------------------------------------*/ + +export enum OperationGroupBy { + Operation = 'Operation', + OperationAndInputShape = 'OperationAndInputShape' +} + +export enum KernelGroupBy { + Kernel = 'Kernel', + KernelNameAndOpName = 'KernelNameAndOpName' +} diff --git a/tb_plugin/fe/src/utils/debounce.ts b/tb_plugin/fe/src/utils/debounce.ts new file mode 100644 index 000000000..fcd6368e6 --- /dev/null +++ b/tb_plugin/fe/src/utils/debounce.ts @@ -0,0 +1,21 @@ +/*--------------------------------------------------------------------------------------------- + * Copyright (c) Microsoft Corporation. All rights reserved. + *--------------------------------------------------------------------------------------------*/ + +import * as React from 'react' + +export function useDebounce(value: T, delay: number): T { + const [debouncedValue, setDebouncedValue] = React.useState(value) + + React.useEffect(() => { + const handler = setTimeout(() => { + setDebouncedValue(value) + }, delay) + + return () => { + clearTimeout(handler) + } + }, [value, delay]) + + return debouncedValue +} diff --git a/tb_plugin/fe/src/utils/def.ts b/tb_plugin/fe/src/utils/def.ts index fe8d83df7..c024293a5 100644 --- a/tb_plugin/fe/src/utils/def.ts +++ b/tb_plugin/fe/src/utils/def.ts @@ -11,3 +11,8 @@ export function assertDef(v: T | undefined | null): asserts v is T { throw new Error('Must be defined') } } + +export function firstOrUndefined(v: T[] | undefined): T | undefined { + if (!v || !v.length) return undefined + return v[0] +} diff --git a/tb_plugin/fe/src/utils/index.ts b/tb_plugin/fe/src/utils/index.ts index 532020c5f..1c7074b4c 100644 --- a/tb_plugin/fe/src/utils/index.ts +++ b/tb_plugin/fe/src/utils/index.ts @@ -9,6 +9,10 @@ export function firstOrUndefined(v: T[] | undefined | null): T | undefined { return v[0] } +export function sleep(delay: number) { + return new Promise((resolve) => setTimeout(resolve, delay)) +} + export function isValueAndFormat(v: any): v is ValueAndFormat { return 'f' in v && 'v' in v } diff --git a/tb_plugin/fe/src/utils/resize.ts b/tb_plugin/fe/src/utils/resize.ts new file mode 100644 index 000000000..57ab39404 --- /dev/null +++ b/tb_plugin/fe/src/utils/resize.ts @@ -0,0 +1,27 @@ +/*--------------------------------------------------------------------------------------------- + * Copyright (c) Microsoft Corporation. All rights reserved. + *--------------------------------------------------------------------------------------------*/ + +import * as React from 'react' +import debounce from '@material-ui/core/utils/debounce' + +export function useResizeEventDependency() { + const [version, setVersion] = React.useState(0) + + const increaseVersion = React.useCallback( + debounce(() => { + setVersion((prev) => prev + 1) + }, 100), + [] + ) + + React.useEffect(() => { + window.addEventListener('resize', increaseVersion) + + return () => { + window.removeEventListener('resize', increaseVersion) + } + }, []) + + return [version] as const +} diff --git a/tb_plugin/fe/src/utils/search.ts b/tb_plugin/fe/src/utils/search.ts index 8927c8e82..2835c4324 100644 --- a/tb_plugin/fe/src/utils/search.ts +++ b/tb_plugin/fe/src/utils/search.ts @@ -2,28 +2,17 @@ * Copyright (c) Microsoft Corporation. All rights reserved. *--------------------------------------------------------------------------------------------*/ -import debounce from '@material-ui/core/utils/debounce' import * as React from 'react' import { value } from '.' import * as api from '../api' +import { useDebounce } from './debounce' export function useSearch( searchName: string, columnName: string, table: api.Graph | undefined ): [api.Graph | undefined] { - const [searchNameDebounce, setSearchNameDebounce] = React.useState(searchName) - - const onSearchOperatorNameChanged = React.useCallback( - debounce((value: string) => { - setSearchNameDebounce(value.trim()) - }, 500), - [] - ) - - React.useEffect(() => { - onSearchOperatorNameChanged(searchName) - }, [searchName]) + const searchNameDebounce = useDebounce(searchName.trim(), 500) const searchedTable: api.Graph | undefined = React.useMemo(() => { if (!searchNameDebounce) { @@ -52,3 +41,27 @@ export function useSearch( }, [table, searchNameDebounce]) return [searchedTable] } + +export function useSearchDirectly( + searchName: string, + field: (v: T) => string, + table: T[] | undefined +): [T[] | undefined] { + const searchNameDebounce = useDebounce(searchName.trim(), 500) + + const result = React.useMemo(() => { + if (!searchNameDebounce) { + return table + } + + if (!table) { + return undefined + } + + return table.filter((row) => { + return field(row).toLowerCase().includes(searchNameDebounce.toLowerCase()) + }) + }, [table, field, searchNameDebounce]) + console.log(result) + return [result] +} diff --git a/tb_plugin/fe/src/utils/top.ts b/tb_plugin/fe/src/utils/top.ts index 9a2a86d97..87bd3c1b8 100644 --- a/tb_plugin/fe/src/utils/top.ts +++ b/tb_plugin/fe/src/utils/top.ts @@ -20,8 +20,10 @@ interface IOptions { export function useTopN(options?: IOptions) { options ??= {} - const [top, setTop] = React.useState(options.defaultTop ?? 15) - const [actualTop, setActualTop] = React.useState(top) + const [topText, setTopText] = React.useState(String(options.defaultTop ?? 15)) + const [actualTop, setActualTop] = React.useState( + Number(topText) + ) const [useTop, setUseTop] = React.useState( options.defaultUseTop ?? UseTop.NotUse ) @@ -30,8 +32,19 @@ export function useTopN(options?: IOptions) { ? React.useCallback(debounce(setActualTop, options.wait ?? 500), []) : setActualTop React.useEffect(() => { - setActualDebounce(useTop === UseTop.Use && top > 0 ? top : undefined) - }, [top, useTop]) + if (useTop !== UseTop.Use) { + setActualDebounce(undefined) + } else if (topIsValid(topText)) { + setActualDebounce(Number(topText)) + } else { + setActualDebounce(actualTop) + } + }, [topText, useTop]) + + return [topText, actualTop, useTop, setTopText, setUseTop] as const +} - return [top, actualTop, useTop, setTop, setUseTop] as const +export function topIsValid(topText: string) { + const top = Number(topText) + return !Number.isNaN(top) && top > 0 && Number.isInteger(top) } diff --git a/tb_plugin/fe/src/utils/vscode.ts b/tb_plugin/fe/src/utils/vscode.ts new file mode 100644 index 000000000..62f1a9080 --- /dev/null +++ b/tb_plugin/fe/src/utils/vscode.ts @@ -0,0 +1,13 @@ +/*--------------------------------------------------------------------------------------------- + * Copyright (c) Microsoft Corporation. All rights reserved. + *--------------------------------------------------------------------------------------------*/ + +export function navToCode(filename: string, line: number) { + window.parent.parent.postMessage( + { + filename, + line + }, + '*' + ) +} diff --git a/tb_plugin/fe/tsconfig.json b/tb_plugin/fe/tsconfig.json index 90ded5ecd..029ba4db6 100644 --- a/tb_plugin/fe/tsconfig.json +++ b/tb_plugin/fe/tsconfig.json @@ -4,6 +4,7 @@ "strictPropertyInitialization": false, "module": "esnext", "moduleResolution": "node", + "forceConsistentCasingInFileNames": true, "rootDir": "src", "outDir": "dist", "jsx": "react" diff --git a/tb_plugin/fe/yarn.lock b/tb_plugin/fe/yarn.lock index 64c0bbd9b..f01c098a4 100644 --- a/tb_plugin/fe/yarn.lock +++ b/tb_plugin/fe/yarn.lock @@ -2,13 +2,52 @@ # yarn lockfile v1 -"@babel/runtime@^7.13.10", "@babel/runtime@^7.3.1", "@babel/runtime@^7.4.4", "@babel/runtime@^7.5.5", "@babel/runtime@^7.8.3", "@babel/runtime@^7.8.7": +"@ant-design/colors@^6.0.0": + version "6.0.0" + resolved "https://registry.yarnpkg.com/@ant-design/colors/-/colors-6.0.0.tgz#9b9366257cffcc47db42b9d0203bb592c13c0298" + integrity sha512-qAZRvPzfdWHtfameEGP2Qvuf838NhergR35o+EuVyB5XvSA98xod5r4utvi4TJ3ywmevm290g9nsCG5MryrdWQ== + dependencies: + "@ctrl/tinycolor" "^3.4.0" + +"@ant-design/icons-svg@^4.0.0": + version "4.1.0" + resolved "https://registry.yarnpkg.com/@ant-design/icons-svg/-/icons-svg-4.1.0.tgz#480b025f4b20ef7fe8f47d4a4846e4fee84ea06c" + integrity sha512-Fi03PfuUqRs76aI3UWYpP864lkrfPo0hluwGqh7NJdLhvH4iRDc3jbJqZIvRDLHKbXrvAfPPV3+zjUccfFvWOQ== + +"@ant-design/icons@^4.6.2": + version "4.6.2" + resolved "https://registry.yarnpkg.com/@ant-design/icons/-/icons-4.6.2.tgz#290f2e8cde505ab081fda63e511e82d3c48be982" + integrity sha512-QsBG2BxBYU/rxr2eb8b2cZ4rPKAPBpzAR+0v6rrZLp/lnyvflLH3tw1vregK+M7aJauGWjIGNdFmUfpAOtw25A== + dependencies: + "@ant-design/colors" "^6.0.0" + "@ant-design/icons-svg" "^4.0.0" + "@babel/runtime" "^7.11.2" + classnames "^2.2.6" + rc-util "^5.9.4" + +"@ant-design/react-slick@~0.28.1": + version "0.28.3" + resolved "https://registry.yarnpkg.com/@ant-design/react-slick/-/react-slick-0.28.3.tgz#ad5cf1cf50363c1a3842874d69d0ce1f26696e71" + integrity sha512-u3onF2VevGRbkGbgpldVX/nzd7LFtLeZJE0x2xIFT2qYHKkJZ6QT/jQ7KqYK4UpeTndoyrbMqLN4DiJza4BVBg== + dependencies: + "@babel/runtime" "^7.10.4" + classnames "^2.2.5" + json2mq "^0.2.0" + lodash "^4.17.21" + resize-observer-polyfill "^1.5.0" + +"@babel/runtime@^7.10.1", "@babel/runtime@^7.10.2", "@babel/runtime@^7.10.4", "@babel/runtime@^7.11.1", "@babel/runtime@^7.11.2", "@babel/runtime@^7.12.5", "@babel/runtime@^7.13.10", "@babel/runtime@^7.3.1", "@babel/runtime@^7.4.4", "@babel/runtime@^7.5.5", "@babel/runtime@^7.8.3", "@babel/runtime@^7.8.4", "@babel/runtime@^7.8.7": version "7.13.10" resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.13.10.tgz#47d42a57b6095f4468da440388fdbad8bebf0d7d" integrity sha512-4QPkjJq6Ns3V/RgpEahRk+AGfL0eO6RHHtTWoNNr5mO49G6B5+X6d6THgWEAvTrznU5xYpbAlVKRYcsCgh/Akw== dependencies: regenerator-runtime "^0.13.4" +"@ctrl/tinycolor@^3.4.0": + version "3.4.0" + resolved "https://registry.yarnpkg.com/@ctrl/tinycolor/-/tinycolor-3.4.0.tgz#c3c5ae543c897caa9c2a68630bed355be5f9990f" + integrity sha512-JZButFdZ1+/xAfpguQHoabIXkcqRRKpMrWKBkpEZZyxfY9C1DpADFB8PEqGSTeFr135SaTRfKqGKx5xSCLI7ZQ== + "@discoveryjs/json-ext@^0.5.0": version "0.5.2" resolved "https://registry.yarnpkg.com/@discoveryjs/json-ext/-/json-ext-0.5.2.tgz#8f03a22a04de437254e8ce8cc84ba39689288752" @@ -401,6 +440,54 @@ ansi-styles@^4.1.0: dependencies: color-convert "^2.0.1" +antd@^4.15.1: + version "4.15.1" + resolved "https://registry.yarnpkg.com/antd/-/antd-4.15.1.tgz#1ba1e0108866e1be03d3e8fb351582ade17d88db" + integrity sha512-zTZz8GY9yERNjSnH6xWU3Rw5sC3RtHEs/LOTKcSMTtU3Q5jHXIbAHKd1C6bYLQT6Ru75p+/UyKvJoNip/ax/WQ== + dependencies: + "@ant-design/colors" "^6.0.0" + "@ant-design/icons" "^4.6.2" + "@ant-design/react-slick" "~0.28.1" + "@babel/runtime" "^7.12.5" + array-tree-filter "^2.1.0" + classnames "^2.2.6" + copy-to-clipboard "^3.2.0" + lodash "^4.17.21" + moment "^2.25.3" + rc-cascader "~1.4.0" + rc-checkbox "~2.3.0" + rc-collapse "~3.1.0" + rc-dialog "~8.5.1" + rc-drawer "~4.3.0" + rc-dropdown "~3.2.0" + rc-field-form "~1.20.0" + rc-image "~5.2.4" + rc-input-number "~7.0.1" + rc-mentions "~1.5.0" + rc-menu "~8.10.0" + rc-motion "^2.4.0" + rc-notification "~4.5.2" + rc-pagination "~3.1.6" + rc-picker "~2.5.10" + rc-progress "~3.1.0" + rc-rate "~2.9.0" + rc-resize-observer "^1.0.0" + rc-select "~12.1.6" + rc-slider "~9.7.1" + rc-steps "~4.1.0" + rc-switch "~3.2.0" + rc-table "~7.13.0" + rc-tabs "~11.7.0" + rc-textarea "~0.3.0" + rc-tooltip "~5.1.0" + rc-tree "~4.1.0" + rc-tree-select "~4.3.0" + rc-trigger "^5.2.1" + rc-upload "~4.2.0-alpha.0" + rc-util "^5.9.4" + scroll-into-view-if-needed "^2.2.25" + warning "^4.0.3" + anymatch@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-2.0.0.tgz#bcb24b4f37934d9aa7ac17b4adaf89e7c76ef2eb" @@ -434,6 +521,11 @@ array-flatten@^2.1.0: resolved "https://registry.yarnpkg.com/array-flatten/-/array-flatten-2.1.2.tgz#24ef80a28c1a893617e2149b0c6d0d788293b099" integrity sha512-hNfzcOV8W4NdualtqBFPyVO+54DSJuZGY9qT4pRroB6S9e3iiido2ISIC5h9R2sPJ8H3FHCIiEnsv1lPXO3KtQ== +array-tree-filter@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/array-tree-filter/-/array-tree-filter-2.1.0.tgz#873ac00fec83749f255ac8dd083814b4f6329190" + integrity sha512-4ROwICNlNw/Hqa9v+rk5h22KjmzB1JGTMVKP2AKJBOCgb0yL0ASf0+YvCcLNNwquOHNX48jkeZIJ3a+oOQqKcw== + array-union@^1.0.1: version "1.0.2" resolved "https://registry.yarnpkg.com/array-union/-/array-union-1.0.2.tgz#9a34410e4f4e3da23dea375be5be70f24778ec39" @@ -466,6 +558,11 @@ async-limiter@~1.0.0: resolved "https://registry.yarnpkg.com/async-limiter/-/async-limiter-1.0.1.tgz#dd379e94f0db8310b08291f9d64c3209766617fd" integrity sha512-csOlWGAcRFJaI6m+F2WKdnMKr4HhdhFVBk0H/QbJFMCr+uO2kwohwXQPxw/9OCxp05r5ghVBFSyioixx3gfkNQ== +async-validator@^3.0.3: + version "3.5.1" + resolved "https://registry.yarnpkg.com/async-validator/-/async-validator-3.5.1.tgz#cd62b9688b2465f48420e27adb47760ab1b5559f" + integrity sha512-DDmKA7sdSAJtTVeNZHrnr2yojfFaoeW8MfQN8CeuXg8DDQHTqKk9Fdv38dSvnesHoO8MUwMI2HphOeSyIF+wmQ== + async@^2.6.2: version "2.6.3" resolved "https://registry.yarnpkg.com/async/-/async-2.6.3.tgz#d72625e2344a3656e3a3ad4fa749fa83299d82ff" @@ -582,16 +679,16 @@ braces@^3.0.1: dependencies: fill-range "^7.0.1" -browserslist@^4.14.5: - version "4.16.3" - resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.16.3.tgz#340aa46940d7db878748567c5dea24a48ddf3717" - integrity sha512-vIyhWmIkULaq04Gt93txdh+j02yX/JzlyhLYbV3YQCn/zvES3JnY7TifHHvvr1w5hTDluNKMkV05cs4vy8Q7sw== +browserslist@^4.14.5, browserslist@^4.16.5: + version "4.16.6" + resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.16.6.tgz#d7901277a5a88e554ed305b183ec9b0c08f66fa2" + integrity sha512-Wspk/PqO+4W9qp5iUTJsa1B/QrYn1keNCcEP5OvP7WBwT4KaDly0uONYmC6Xa3Z5IqnUgS0KcgLYu1l74x0ZXQ== dependencies: - caniuse-lite "^1.0.30001181" - colorette "^1.2.1" - electron-to-chromium "^1.3.649" + caniuse-lite "^1.0.30001219" + colorette "^1.2.2" + electron-to-chromium "^1.3.723" escalade "^3.1.1" - node-releases "^1.1.70" + node-releases "^1.1.71" buffer-from@^1.0.0: version "1.1.1" @@ -654,10 +751,10 @@ camelcase@^6.2.0: resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-6.2.0.tgz#924af881c9d525ac9d87f40d964e5cea982a1809" integrity sha512-c7wVvbw3f37nuobQNtgsgG9POC9qMbNuMQmTCqZv23b6MIz0fcYpBiOlv9gEN/hdLdnZTDQhg6e9Dq5M1vKvfg== -caniuse-lite@^1.0.30001181: - version "1.0.30001204" - resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001204.tgz#256c85709a348ec4d175e847a3b515c66e79f2aa" - integrity sha512-JUdjWpcxfJ9IPamy2f5JaRDCaqJOxDzOSKtbdx4rH9VivMd1vIzoPumsJa9LoMIi4Fx2BV2KZOxWhNkBjaYivQ== +caniuse-lite@^1.0.30001219: + version "1.0.30001230" + resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001230.tgz#8135c57459854b2240b57a4a6786044bdc5a9f71" + integrity sha512-5yBd5nWCBS+jWKTcHOzXwo5xzcj4ePE/yjtkZyUV1BTUmrBaA9MRGC+e7mxnqXSA90CmCA8L3eKLaSUkt099IQ== chalk@^4.1.0: version "4.1.0" @@ -703,6 +800,11 @@ class-utils@^0.3.5: isobject "^3.0.0" static-extend "^0.1.1" +classnames@2.x, classnames@^2.2.1, classnames@^2.2.3, classnames@^2.2.5, classnames@^2.2.6: + version "2.3.1" + resolved "https://registry.yarnpkg.com/classnames/-/classnames-2.3.1.tgz#dfcfa3891e306ec1dad105d0e88f4417b8535e8e" + integrity sha512-OlQdbZ7gLfGarSqxesMesDa5uz7KFbID8Kpq/SxIoNGDqY8lSYs0D+hhtBXhcdB3rcbXArFr7vlHheLk1voeNA== + clean-css@^4.2.3: version "4.2.3" resolved "https://registry.yarnpkg.com/clean-css/-/clean-css-4.2.3.tgz#507b5de7d97b48ee53d84adb0160ff6216380f78" @@ -810,6 +912,11 @@ compression@^1.7.4: safe-buffer "5.1.2" vary "~1.1.2" +compute-scroll-into-view@^1.0.17: + version "1.0.17" + resolved "https://registry.yarnpkg.com/compute-scroll-into-view/-/compute-scroll-into-view-1.0.17.tgz#6a88f18acd9d42e9cf4baa6bec7e0522607ab7ab" + integrity sha512-j4dx+Fb0URmzbwwMUrhqWM2BEWHdFGx+qZ9qqASHRPqvTYdqvWnHg0H1hIbcyLnvgnoNAVMlwkepyqM3DaIFUg== + concat-map@0.0.1: version "0.0.1" resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b" @@ -847,6 +954,13 @@ copy-descriptor@^0.1.0: resolved "https://registry.yarnpkg.com/copy-descriptor/-/copy-descriptor-0.1.1.tgz#676f6eb3c39997c2ee1ac3a924fd6124748f578d" integrity sha1-Z29us8OZl8LuGsOpJP1hJHSPV40= +copy-to-clipboard@^3.2.0: + version "3.3.1" + resolved "https://registry.yarnpkg.com/copy-to-clipboard/-/copy-to-clipboard-3.3.1.tgz#115aa1a9998ffab6196f93076ad6da3b913662ae" + integrity sha512-i13qo6kIHTTpCm8/Wup+0b1mVWETvu2kIMzKoK8FpkLkFxlt0znUAHcMzox+T8sPlqtZXq3CulEjQHsYiGFJUw== + dependencies: + toggle-selection "^1.0.6" + core-util-is@~1.0.0: version "1.0.2" resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7" @@ -879,23 +993,22 @@ cross-spawn@^7.0.1, cross-spawn@^7.0.3: shebang-command "^2.0.0" which "^2.0.1" -css-loader@^5.0.0: - version "5.2.0" - resolved "https://registry.yarnpkg.com/css-loader/-/css-loader-5.2.0.tgz#a9ecda190500863673ce4434033710404efbff00" - integrity sha512-MfRo2MjEeLXMlUkeUwN71Vx5oc6EJnx5UQ4Yi9iUtYQvrPtwLUucYptz0hc6n++kdNcyF5olYBS4vPjJDAcLkw== +css-loader@^5.2.4: + version "5.2.4" + resolved "https://registry.yarnpkg.com/css-loader/-/css-loader-5.2.4.tgz#e985dcbce339812cb6104ef3670f08f9893a1536" + integrity sha512-OFYGyINCKkdQsTrSYxzGSFnGS4gNjcXkKkQgWxK138jgnPt+lepxdjSZNc8sHAl5vP3DhsJUxufWIjOwI8PMMw== dependencies: camelcase "^6.2.0" - cssesc "^3.0.0" icss-utils "^5.1.0" loader-utils "^2.0.0" - postcss "^8.2.8" + postcss "^8.2.10" postcss-modules-extract-imports "^3.0.0" postcss-modules-local-by-default "^4.0.0" postcss-modules-scope "^3.0.0" postcss-modules-values "^4.0.0" postcss-value-parser "^4.1.0" schema-utils "^3.0.0" - semver "^7.3.4" + semver "^7.3.5" css-select@^2.0.2: version "2.1.0" @@ -935,6 +1048,11 @@ csstype@^3.0.2: resolved "https://registry.yarnpkg.com/csstype/-/csstype-3.0.7.tgz#2a5fb75e1015e84dd15692f71e89a1450290950b" integrity sha512-KxnUB0ZMlnUWCsx2Z8MUsr6qV6ja1w9ArPErJaJaF8a5SOWoHLIszeCTKGRGRgtLgYrs1E8CHkNSP1VZTTPc9g== +date-fns@^2.15.0: + version "2.20.1" + resolved "https://registry.yarnpkg.com/date-fns/-/date-fns-2.20.1.tgz#7e60b7035284a5f83e37500376e738d9f49ecfd3" + integrity sha512-8P5M8Kxbnovd0zfvOs7ipkiVJ3/zZQ0F/nrBW4x5E+I0uAZVZ80h6CKd24fSXQ5TLK5hXMtI4yb2O5rEZdUt2A== + debug@2.6.9, debug@^2.2.0, debug@^2.3.3: version "2.6.9" resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.9.tgz#5d128515df134ff327e90a4c93f4e077a536341f" @@ -1063,6 +1181,11 @@ dns-txt@^2.0.2: dependencies: buffer-indexof "^1.0.0" +dom-align@^1.7.0: + version "1.12.0" + resolved "https://registry.yarnpkg.com/dom-align/-/dom-align-1.12.0.tgz#56fb7156df0b91099830364d2d48f88963f5a29c" + integrity sha512-YkoezQuhp3SLFGdOlr5xkqZ640iXrnHAwVYcDg8ZKRUtO7mSzSC2BA5V0VuyAwPSJA4CLIc6EDDJh4bEsD2+zA== + dom-converter@^0.2: version "0.2.0" resolved "https://registry.yarnpkg.com/dom-converter/-/dom-converter-0.2.0.tgz#6721a9daee2e293682955b6afe416771627bb768" @@ -1124,10 +1247,10 @@ ee-first@1.1.1: resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d" integrity sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0= -electron-to-chromium@^1.3.649: - version "1.3.699" - resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.3.699.tgz#854eea9db8bc8109c409a4807bfdb200dd75a2c7" - integrity sha512-fjt43CPXdPYwD9ybmKbNeLwZBmCVdLY2J5fGZub7/eMPuiqQznOGNXv/wurnpXIlE7ScHnvG9Zi+H4/i6uMKmw== +electron-to-chromium@^1.3.723: + version "1.3.740" + resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.3.740.tgz#e38b7d2b848f632191b643e6dabca51be2162922" + integrity sha512-Mi2m55JrX2BFbNZGKYR+2ItcGnR4O5HhrvgoRRyZQlaMGQULqDhoGkLWHzJoshSzi7k1PUofxcDbNhlFrDZNhg== emoji-regex@^7.0.1: version "7.0.3" @@ -2072,6 +2195,13 @@ json-schema-traverse@^0.4.1: resolved "https://registry.yarnpkg.com/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz#69f6a87d9513ab8bb8fe63bdb0979c448e684660" integrity sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg== +json2mq@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/json2mq/-/json2mq-0.2.0.tgz#b637bd3ba9eabe122c83e9720483aeb10d2c904a" + integrity sha1-tje9O6nqvhIsg+lyBIOusQ0skEo= + dependencies: + string-convert "^0.2.0" + json3@^3.3.3: version "3.3.3" resolved "https://registry.yarnpkg.com/json3/-/json3-3.3.3.tgz#7fc10e375fc5ae42c4705a5cc0aa6f62be305b81" @@ -2213,7 +2343,7 @@ locate-path@^5.0.0: dependencies: p-locate "^4.1.0" -lodash@^4.17.11, lodash@^4.17.14, lodash@^4.17.20: +lodash@^4.17.11, lodash@^4.17.14, lodash@^4.17.20, lodash@^4.17.21: version "4.17.21" resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c" integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg== @@ -2223,7 +2353,7 @@ loglevel@^1.6.8: resolved "https://registry.yarnpkg.com/loglevel/-/loglevel-1.7.1.tgz#005fde2f5e6e47068f935ff28573e125ef72f197" integrity sha512-Hesni4s5UkWkwCGJMQGAh71PaLUmKFM60dHvq0zi/vDhhrzuk+4GgNbTXJ12YYQJn6ZKBDNIjYcuQGKudvqrIw== -loose-envify@^1.1.0, loose-envify@^1.4.0: +loose-envify@^1.0.0, loose-envify@^1.1.0, loose-envify@^1.4.0: version "1.4.0" resolved "https://registry.yarnpkg.com/loose-envify/-/loose-envify-1.4.0.tgz#71ee51fa7be4caec1a63839f7e682d8132d30caf" integrity sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q== @@ -2346,6 +2476,14 @@ mimic-fn@^2.1.0: resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-2.1.0.tgz#7ed2c2ccccaf84d3ffcb7a69b57711fc2083401b" integrity sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg== +mini-store@^3.0.1: + version "3.0.6" + resolved "https://registry.yarnpkg.com/mini-store/-/mini-store-3.0.6.tgz#44b86be5b2877271224ce0689b3a35a2dffb1ca9" + integrity sha512-YzffKHbYsMQGUWQRKdsearR79QsMzzJcDDmZKlJBqt5JNkqpyJHYlK6gP61O36X+sLf76sO9G6mhKBe83gIZIQ== + dependencies: + hoist-non-react-statics "^3.3.2" + shallowequal "^1.0.2" + minimalistic-assert@^1.0.0: version "1.0.1" resolved "https://registry.yarnpkg.com/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz#2e194de044626d4a10e7f7fbc00ce73e83e4d5c7" @@ -2378,6 +2516,11 @@ mkdirp@^0.5.1, mkdirp@^0.5.5: dependencies: minimist "^1.2.5" +moment@^2.24.0, moment@^2.25.3: + version "2.29.1" + resolved "https://registry.yarnpkg.com/moment/-/moment-2.29.1.tgz#b2be769fa31940be9eeea6469c075e35006fa3d3" + integrity sha512-kHmoybcPV8Sqy59DwNDY3Jefr64lK/by/da0ViFcuA4DH0vQg5Q6Ze5VimxkfQNSC+Mls/Kx53s7TjP1RhFEDQ== + ms@2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/ms/-/ms-2.0.0.tgz#5608aeadfc00be6c2901df5f9861788de0d597c8" @@ -2416,10 +2559,10 @@ nan@^2.12.1: resolved "https://registry.yarnpkg.com/nan/-/nan-2.14.2.tgz#f5376400695168f4cc694ac9393d0c9585eeea19" integrity sha512-M2ufzIiINKCuDfBSAUr1vWQ+vuVcA9kqx8JJUsbQi6yf1uGRyb7HfpdfUr5qLXf3B/t8dPvcjhKMmlfnP47EzQ== -nanoid@^3.1.20: - version "3.1.22" - resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.1.22.tgz#b35f8fb7d151990a8aebd5aa5015c03cf726f844" - integrity sha512-/2ZUaJX2ANuLtTvqTlgqBQNJoQO398KyJgZloL0PZkC0dpysjncRUPsFe3DUPzz/y3h+u7C46np8RMuvF3jsSQ== +nanoid@^3.1.23: + version "3.1.23" + resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.1.23.tgz#f744086ce7c2bc47ee0a8472574d5c78e4183a81" + integrity sha512-FiB0kzdP0FFVGDKlRLEQ1BgDzU87dy5NnzjeW9YZNt+/c3+q82EQDUwniSAUxp/F0gFNI1ZhKU1FqYsMuqZVnw== nanomatch@^1.2.9: version "1.2.13" @@ -2471,10 +2614,10 @@ node-forge@^0.10.0: resolved "https://registry.yarnpkg.com/node-forge/-/node-forge-0.10.0.tgz#32dea2afb3e9926f02ee5ce8794902691a676bf3" integrity sha512-PPmu8eEeG9saEUvI97fm4OYxXVB6bFvyNTyiUOBichBpFG8A1Ljw3bY62+5oOjDEMHRnd0Y7HQ+x7uzxOzC6JA== -node-releases@^1.1.70: - version "1.1.71" - resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-1.1.71.tgz#cb1334b179896b1c89ecfdd4b725fb7bbdfc7dbb" - integrity sha512-zR6HoT6LrLCRBwukmrVbHv0EpEQjksO6GmFcZQQuCAy139BEsoVKPYnf3jongYW83fAa1torLGYwxxky/p28sg== +node-releases@^1.1.71: + version "1.1.72" + resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-1.1.72.tgz#14802ab6b1039a79a0c7d662b610a5bbd76eacbe" + integrity sha512-LLUo+PpH3dU6XizX3iVoubUNheF/owjXCZZ5yACDxNnPtgFuludV1ZL3ayK1kVep42Rmm0+R9/Y60NQbZ2bifw== normalize-path@^2.1.1: version "2.1.1" @@ -2827,13 +2970,13 @@ postcss-value-parser@^4.1.0: resolved "https://registry.yarnpkg.com/postcss-value-parser/-/postcss-value-parser-4.1.0.tgz#443f6a20ced6481a2bda4fa8532a6e55d789a2cb" integrity sha512-97DXOFbQJhk71ne5/Mt6cOu6yxsSfM0QGQyl0L25Gca4yGWEGJaig7l7gbCX623VqTBNGLRLaVUCnNkcedlRSQ== -postcss@^8.2.8: - version "8.2.8" - resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.2.8.tgz#0b90f9382efda424c4f0f69a2ead6f6830d08ece" - integrity sha512-1F0Xb2T21xET7oQV9eKuctbM9S7BC0fetoHCc4H13z0PT6haiRLP4T0ZY4XWh7iLP0usgqykT6p9B2RtOf4FPw== +postcss@^8.2.10: + version "8.2.15" + resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.2.15.tgz#9e66ccf07292817d226fc315cbbf9bc148fbca65" + integrity sha512-2zO3b26eJD/8rb106Qu2o7Qgg52ND5HPjcyQiK2B98O388h43A448LCslC0dI2P97wCAQRJsFvwTRcXxTKds+Q== dependencies: colorette "^1.2.2" - nanoid "^3.1.20" + nanoid "^3.1.23" source-map "^0.6.1" prettier@^2.1.2: @@ -2931,6 +3074,349 @@ raw-body@2.4.0: iconv-lite "0.4.24" unpipe "1.0.0" +rc-align@^4.0.0: + version "4.0.9" + resolved "https://registry.yarnpkg.com/rc-align/-/rc-align-4.0.9.tgz#46d8801c4a139ff6a65ad1674e8efceac98f85f2" + integrity sha512-myAM2R4qoB6LqBul0leaqY8gFaiECDJ3MtQDmzDo9xM9NRT/04TvWOYd2YHU9zvGzqk9QXF6S9/MifzSKDZeMw== + dependencies: + "@babel/runtime" "^7.10.1" + classnames "2.x" + dom-align "^1.7.0" + rc-util "^5.3.0" + resize-observer-polyfill "^1.5.1" + +rc-cascader@~1.4.0: + version "1.4.2" + resolved "https://registry.yarnpkg.com/rc-cascader/-/rc-cascader-1.4.2.tgz#caa81098e3ef4d5f823f9156f6d8d6dbd6321afa" + integrity sha512-JVuLGrSi+3G8DZyPvlKlGVWJjhoi9NTz6REHIgRspa5WnznRkKGm2ejb0jJtz0m2IL8Q9BG4ZA2sXuqAu71ltQ== + dependencies: + "@babel/runtime" "^7.12.5" + array-tree-filter "^2.1.0" + rc-trigger "^5.0.4" + rc-util "^5.0.1" + warning "^4.0.1" + +rc-checkbox@~2.3.0: + version "2.3.2" + resolved "https://registry.yarnpkg.com/rc-checkbox/-/rc-checkbox-2.3.2.tgz#f91b3678c7edb2baa8121c9483c664fa6f0aefc1" + integrity sha512-afVi1FYiGv1U0JlpNH/UaEXdh6WUJjcWokj/nUN2TgG80bfG+MDdbfHKlLcNNba94mbjy2/SXJ1HDgrOkXGAjg== + dependencies: + "@babel/runtime" "^7.10.1" + classnames "^2.2.1" + +rc-collapse@~3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/rc-collapse/-/rc-collapse-3.1.0.tgz#4ce5e612568c5fbeaf368cc39214471c1461a1a1" + integrity sha512-EwpNPJcLe7b+5JfyaxM9ZNnkCgqArt3QQO0Cr5p5plwz/C9h8liAmjYY5I4+hl9lAjBqb7ZwLu94+z+rt5g1WQ== + dependencies: + "@babel/runtime" "^7.10.1" + classnames "2.x" + rc-motion "^2.3.4" + rc-util "^5.2.1" + shallowequal "^1.1.0" + +rc-dialog@~8.5.0, rc-dialog@~8.5.1: + version "8.5.2" + resolved "https://registry.yarnpkg.com/rc-dialog/-/rc-dialog-8.5.2.tgz#530e289c25a31c15c85a0e8a4ba3f33414bff418" + integrity sha512-3n4taFcjqhTE9uNuzjB+nPDeqgRBTEGBfe46mb1e7r88DgDo0lL4NnxY/PZ6PJKd2tsCt+RrgF/+YeTvJ/Thsw== + dependencies: + "@babel/runtime" "^7.10.1" + classnames "^2.2.6" + rc-motion "^2.3.0" + rc-util "^5.6.1" + +rc-drawer@~4.3.0: + version "4.3.1" + resolved "https://registry.yarnpkg.com/rc-drawer/-/rc-drawer-4.3.1.tgz#356333a7af01b777abd685c96c2ce62efb44f3f3" + integrity sha512-GMfFy4maqxS9faYXEhQ+0cA1xtkddEQzraf6SAdzWbn444DrrLogwYPk1NXSpdXjLCLxgxOj9MYtyYG42JsfXg== + dependencies: + "@babel/runtime" "^7.10.1" + classnames "^2.2.6" + rc-util "^5.7.0" + +rc-dropdown@^3.1.3, rc-dropdown@~3.2.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/rc-dropdown/-/rc-dropdown-3.2.0.tgz#da6c2ada403842baee3a9e909a0b1a91ba3e1090" + integrity sha512-j1HSw+/QqlhxyTEF6BArVZnTmezw2LnSmRk6I9W7BCqNCKaRwleRmMMs1PHbuaG8dKHVqP6e21RQ7vPBLVnnNw== + dependencies: + "@babel/runtime" "^7.10.1" + classnames "^2.2.6" + rc-trigger "^5.0.4" + +rc-field-form@~1.20.0: + version "1.20.0" + resolved "https://registry.yarnpkg.com/rc-field-form/-/rc-field-form-1.20.0.tgz#2201092095429f7f020825462835c4086d2baf16" + integrity sha512-jkzsIfXR7ywEYdeAtktt1aLff88wxIPDLpq7KShHNl4wlsWrCE+TzkXBfjvVzYOVZt5GGrD8YDqNO/q6eaR/eA== + dependencies: + "@babel/runtime" "^7.8.4" + async-validator "^3.0.3" + rc-util "^5.8.0" + +rc-image@~5.2.4: + version "5.2.4" + resolved "https://registry.yarnpkg.com/rc-image/-/rc-image-5.2.4.tgz#ff1059f937bde6ca918c6f1beb316beba911f255" + integrity sha512-kWOjhZC1OoGKfvWqtDoO9r8WUNswBwnjcstI6rf7HMudz0usmbGvewcWqsOhyaBRJL9+I4eeG+xiAoxV1xi75Q== + dependencies: + "@babel/runtime" "^7.11.2" + classnames "^2.2.6" + rc-dialog "~8.5.0" + rc-util "^5.0.6" + +rc-input-number@~7.0.1: + version "7.0.4" + resolved "https://registry.yarnpkg.com/rc-input-number/-/rc-input-number-7.0.4.tgz#2f62df75fd19d2cc898de62b3827086084d65585" + integrity sha512-sORROpUKc7iEHfgJjpCg5HdFQwW+MqWswNzyjcl/U1KK0Xwar+3ksYZ7ty6yOQmjyIvpThBGFyjzEp/R3WU1bw== + dependencies: + "@babel/runtime" "^7.10.1" + classnames "^2.2.5" + rc-util "^5.9.8" + +rc-mentions@~1.5.0: + version "1.5.3" + resolved "https://registry.yarnpkg.com/rc-mentions/-/rc-mentions-1.5.3.tgz#b92bebadf8ad9fb3586ba1af922d63b49d991c67" + integrity sha512-NG/KB8YiKBCJPHHvr/QapAb4f9YzLJn7kDHtmI1K6t7ZMM5YgrjIxNNhoRKKP9zJvb9PdPts69Hbg4ZMvLVIFQ== + dependencies: + "@babel/runtime" "^7.10.1" + classnames "^2.2.6" + rc-menu "^8.0.1" + rc-textarea "^0.3.0" + rc-trigger "^5.0.4" + rc-util "^5.0.1" + +rc-menu@^8.0.1, rc-menu@^8.6.1, rc-menu@~8.10.0: + version "8.10.7" + resolved "https://registry.yarnpkg.com/rc-menu/-/rc-menu-8.10.7.tgz#8ea2d2c27137f77a8580c403df634ec5d780f046" + integrity sha512-m/ypV7OjkkUsMdutzMUxEI8tWyi0Y1TQ5YkSDk7k2uv2aCKkHYEoDKsDAfcPeejo3HMo2z5unWE+jD+dCphraw== + dependencies: + "@babel/runtime" "^7.10.1" + classnames "2.x" + mini-store "^3.0.1" + rc-motion "^2.0.1" + rc-trigger "^5.1.2" + rc-util "^5.7.0" + resize-observer-polyfill "^1.5.0" + shallowequal "^1.1.0" + +rc-motion@^2.0.0, rc-motion@^2.0.1, rc-motion@^2.2.0, rc-motion@^2.3.0, rc-motion@^2.3.4, rc-motion@^2.4.0: + version "2.4.1" + resolved "https://registry.yarnpkg.com/rc-motion/-/rc-motion-2.4.1.tgz#323f47c8635e6b2bc0cba2dfad25fc415b58e1dc" + integrity sha512-TWLvymfMu8SngPx5MDH8dQ0D2RYbluNTfam4hY/dNNx9RQ3WtGuZ/GXHi2ymLMzH+UNd6EEFYkOuR5JTTtm8Xg== + dependencies: + "@babel/runtime" "^7.11.1" + classnames "^2.2.1" + rc-util "^5.2.1" + +rc-notification@~4.5.2: + version "4.5.5" + resolved "https://registry.yarnpkg.com/rc-notification/-/rc-notification-4.5.5.tgz#9660a495d5f20bd677686e4f7fc00e4f0c1a3849" + integrity sha512-YIfhTSw+h5GsSdgMnuMx24wqiPlg3FeamuOlkh9RkyHx+SeZVAKzQ0juy2NGvPEF2hDWi5xTqxUqLdo0L2AmGg== + dependencies: + "@babel/runtime" "^7.10.1" + classnames "2.x" + rc-motion "^2.2.0" + rc-util "^5.0.1" + +rc-overflow@^1.0.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/rc-overflow/-/rc-overflow-1.1.1.tgz#c465e75f115f1b4b0cbe5e05faf3a84469d18190" + integrity sha512-bkGrxvWtz6xQfxBPBQcN8xOEHFCeG0R4pfLAku6kFLQF9NPMTt5HvT+Bq0+stqom9eI3WRlun6RPzfjTamPwew== + dependencies: + "@babel/runtime" "^7.11.1" + classnames "^2.2.1" + rc-resize-observer "^1.0.0" + rc-util "^5.5.1" + +rc-pagination@~3.1.6: + version "3.1.6" + resolved "https://registry.yarnpkg.com/rc-pagination/-/rc-pagination-3.1.6.tgz#db3c06e50270b52fe272ac527c1fdc2c8d28af1f" + integrity sha512-Pb2zJEt8uxXzYCWx/2qwsYZ3vSS9Eqdw0cJBli6C58/iYhmvutSBqrBJh51Z5UzYc5ZcW5CMeP5LbbKE1J3rpw== + dependencies: + "@babel/runtime" "^7.10.1" + classnames "^2.2.1" + +rc-picker@~2.5.10: + version "2.5.10" + resolved "https://registry.yarnpkg.com/rc-picker/-/rc-picker-2.5.10.tgz#0db17c535a37abbe5d016bdcdfb13d6626f802d0" + integrity sha512-d2or2jql9SSY8CaRPybpbKkXBq3bZ6g88UKyWQZBLTCrc92Xm87RfRC/P3UEQo/CLmia3jVF7IXVi1HmNe2DZA== + dependencies: + "@babel/runtime" "^7.10.1" + classnames "^2.2.1" + date-fns "^2.15.0" + moment "^2.24.0" + rc-trigger "^5.0.4" + rc-util "^5.4.0" + shallowequal "^1.1.0" + +rc-progress@~3.1.0: + version "3.1.3" + resolved "https://registry.yarnpkg.com/rc-progress/-/rc-progress-3.1.3.tgz#d77d8fd26d9d948d72c2a28b64b71a6e86df2426" + integrity sha512-Jl4fzbBExHYMoC6HBPzel0a9VmhcSXx24LVt/mdhDM90MuzoMCJjXZAlhA0V0CJi+SKjMhfBoIQ6Lla1nD4QNw== + dependencies: + "@babel/runtime" "^7.10.1" + classnames "^2.2.6" + +rc-rate@~2.9.0: + version "2.9.1" + resolved "https://registry.yarnpkg.com/rc-rate/-/rc-rate-2.9.1.tgz#e43cb95c4eb90a2c1e0b16ec6614d8c43530a731" + integrity sha512-MmIU7FT8W4LYRRHJD1sgG366qKtSaKb67D0/vVvJYR0lrCuRrCiVQ5qhfT5ghVO4wuVIORGpZs7ZKaYu+KMUzA== + dependencies: + "@babel/runtime" "^7.10.1" + classnames "^2.2.5" + rc-util "^5.0.1" + +rc-resize-observer@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/rc-resize-observer/-/rc-resize-observer-1.0.0.tgz#97fb89856f62fec32ab6e40933935cf58e2e102d" + integrity sha512-RgKGukg1mlzyGdvzF7o/LGFC8AeoMH9aGzXTUdp6m+OApvmRdUuOscq/Y2O45cJA+rXt1ApWlpFoOIioXL3AGg== + dependencies: + "@babel/runtime" "^7.10.1" + classnames "^2.2.1" + rc-util "^5.0.0" + resize-observer-polyfill "^1.5.1" + +rc-select@^12.0.0, rc-select@~12.1.6: + version "12.1.9" + resolved "https://registry.yarnpkg.com/rc-select/-/rc-select-12.1.9.tgz#87b1bbb58649bc4a4d7961c1f1aa36a16c011a59" + integrity sha512-jsqcdby3Ag9ohYQ0d4vS4Q2jeWjj6kb2NHS9WcQSse0/5lCb3mqXI/1fkKRRIhdQvMBklYh4ctSox3mDrZiB8A== + dependencies: + "@babel/runtime" "^7.10.1" + classnames "2.x" + rc-motion "^2.0.1" + rc-overflow "^1.0.0" + rc-trigger "^5.0.4" + rc-util "^5.9.8" + rc-virtual-list "^3.2.0" + +rc-slider@~9.7.1: + version "9.7.2" + resolved "https://registry.yarnpkg.com/rc-slider/-/rc-slider-9.7.2.tgz#282f571f7582752ebaa33964e441184f4e79ad74" + integrity sha512-mVaLRpDo6otasBs6yVnG02ykI3K6hIrLTNfT5eyaqduFv95UODI9PDS6fWuVVehVpdS4ENgOSwsTjrPVun+k9g== + dependencies: + "@babel/runtime" "^7.10.1" + classnames "^2.2.5" + rc-tooltip "^5.0.1" + rc-util "^5.0.0" + shallowequal "^1.1.0" + +rc-steps@~4.1.0: + version "4.1.3" + resolved "https://registry.yarnpkg.com/rc-steps/-/rc-steps-4.1.3.tgz#208580e22db619e3830ddb7fa41bc886c65d9803" + integrity sha512-GXrMfWQOhN3sVze3JnzNboHpQdNHcdFubOETUHyDpa/U3HEKBZC3xJ8XK4paBgF4OJ3bdUVLC+uBPc6dCxvDYA== + dependencies: + "@babel/runtime" "^7.10.2" + classnames "^2.2.3" + rc-util "^5.0.1" + +rc-switch@~3.2.0: + version "3.2.2" + resolved "https://registry.yarnpkg.com/rc-switch/-/rc-switch-3.2.2.tgz#d001f77f12664d52595b4f6fb425dd9e66fba8e8" + integrity sha512-+gUJClsZZzvAHGy1vZfnwySxj+MjLlGRyXKXScrtCTcmiYNPzxDFOxdQ/3pK1Kt/0POvwJ/6ALOR8gwdXGhs+A== + dependencies: + "@babel/runtime" "^7.10.1" + classnames "^2.2.1" + rc-util "^5.0.1" + +rc-table@~7.13.0: + version "7.13.3" + resolved "https://registry.yarnpkg.com/rc-table/-/rc-table-7.13.3.tgz#25d5f5ec47ee2d8a293aff18c4c4b8876f78c22b" + integrity sha512-oP4fknjvKCZAaiDnvj+yzBaWcg+JYjkASbeWonU1BbrLcomkpKvMUgPODNEzg0QdXA9OGW0PO86h4goDSW06Kg== + dependencies: + "@babel/runtime" "^7.10.1" + classnames "^2.2.5" + rc-resize-observer "^1.0.0" + rc-util "^5.4.0" + shallowequal "^1.1.0" + +rc-tabs@~11.7.0: + version "11.7.3" + resolved "https://registry.yarnpkg.com/rc-tabs/-/rc-tabs-11.7.3.tgz#32a30e59c6992d60fb58115ba0bf2652b337ed43" + integrity sha512-5nd2NVss9TprPRV9r8N05SjQyAE7zDrLejxFLcbJ+BdLxSwnGnk3ws/Iq0smqKZUnPQC0XEvnpF3+zlllUUT2w== + dependencies: + "@babel/runtime" "^7.11.2" + classnames "2.x" + rc-dropdown "^3.1.3" + rc-menu "^8.6.1" + rc-resize-observer "^1.0.0" + rc-util "^5.5.0" + +rc-textarea@^0.3.0, rc-textarea@~0.3.0: + version "0.3.4" + resolved "https://registry.yarnpkg.com/rc-textarea/-/rc-textarea-0.3.4.tgz#1408a64c87b5e76db5c847699ef9ab5ee97dd6f9" + integrity sha512-ILUYx831ZukQPv3m7R4RGRtVVWmL1LV4ME03L22mvT56US0DGCJJaRTHs4vmpcSjFHItph5OTmhodY4BOwy81A== + dependencies: + "@babel/runtime" "^7.10.1" + classnames "^2.2.1" + rc-resize-observer "^1.0.0" + rc-util "^5.7.0" + +rc-tooltip@^5.0.1, rc-tooltip@~5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/rc-tooltip/-/rc-tooltip-5.1.0.tgz#abb453c463c31a705aa01d268279f4ae6ae3b15f" + integrity sha512-pFqD1JZwNIpbdcefB7k5xREoHAWM/k3yQwYF0iminbmDXERgq4rvBfUwIvlCqqZSM7HDr9hYeYr6ZsVNaKtvCQ== + dependencies: + "@babel/runtime" "^7.11.2" + rc-trigger "^5.0.0" + +rc-tree-select@~4.3.0: + version "4.3.1" + resolved "https://registry.yarnpkg.com/rc-tree-select/-/rc-tree-select-4.3.1.tgz#4881bae5f6a5d696c5f61e52ad9489313f356eb4" + integrity sha512-OeV8u5kBEJ8MbatP04Rh8T3boOHGjdGBTEm1a0bubBbB2GNNhlMOr4ZxezkHYtXf02JdBS/WyydmI/RMjXgtJA== + dependencies: + "@babel/runtime" "^7.10.1" + classnames "2.x" + rc-select "^12.0.0" + rc-tree "^4.0.0" + rc-util "^5.0.5" + +rc-tree@^4.0.0, rc-tree@~4.1.0: + version "4.1.5" + resolved "https://registry.yarnpkg.com/rc-tree/-/rc-tree-4.1.5.tgz#734ab1bfe835e78791be41442ca0e571147ab6fa" + integrity sha512-q2vjcmnBDylGZ9/ZW4F9oZMKMJdbFWC7um+DAQhZG1nqyg1iwoowbBggUDUaUOEryJP+08bpliEAYnzJXbI5xQ== + dependencies: + "@babel/runtime" "^7.10.1" + classnames "2.x" + rc-motion "^2.0.1" + rc-util "^5.0.0" + rc-virtual-list "^3.0.1" + +rc-trigger@^5.0.0, rc-trigger@^5.0.4, rc-trigger@^5.1.2, rc-trigger@^5.2.1: + version "5.2.3" + resolved "https://registry.yarnpkg.com/rc-trigger/-/rc-trigger-5.2.3.tgz#8c55046ab432d7b52d51c69afb57ebb5bbe37e17" + integrity sha512-6Fokao07HUbqKIDkDRFEM0AGZvsvK0Fbp8A/KFgl1ngaqfO1nY037cISCG1Jm5fxImVsXp9awdkP7Vu5cxjjog== + dependencies: + "@babel/runtime" "^7.11.2" + classnames "^2.2.6" + rc-align "^4.0.0" + rc-motion "^2.0.0" + rc-util "^5.5.0" + +rc-upload@~4.2.0-alpha.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/rc-upload/-/rc-upload-4.2.0.tgz#5e21cab29f10ecb69d71cfb9055912d0e1e08ee0" + integrity sha512-BXtvBs1PnwLjaUzBBU5z4yb9NMSaxc6mUIoPmS9LUAzaTz12L3TLrwu+8dnopYUiyLmYFS3LEO7aUfEWBqJfSA== + dependencies: + "@babel/runtime" "^7.10.1" + classnames "^2.2.5" + rc-util "^5.2.0" + +rc-util@^5.0.0, rc-util@^5.0.1, rc-util@^5.0.5, rc-util@^5.0.6, rc-util@^5.0.7, rc-util@^5.2.0, rc-util@^5.2.1, rc-util@^5.3.0, rc-util@^5.4.0, rc-util@^5.5.0, rc-util@^5.5.1, rc-util@^5.6.1, rc-util@^5.7.0, rc-util@^5.8.0, rc-util@^5.9.4, rc-util@^5.9.8: + version "5.9.8" + resolved "https://registry.yarnpkg.com/rc-util/-/rc-util-5.9.8.tgz#dfcacc1f7b7c45fa18ab786e2b530dd0509073f1" + integrity sha512-typLSHYGf5irvGLYQshs0Ra3aze086h0FhzsAkyirMunYZ7b3Te8gKa5PVaanoHaZa9sS6qx98BxgysoRP+6Tw== + dependencies: + "@babel/runtime" "^7.12.5" + react-is "^16.12.0" + shallowequal "^1.1.0" + +rc-virtual-list@^3.0.1, rc-virtual-list@^3.2.0: + version "3.2.6" + resolved "https://registry.yarnpkg.com/rc-virtual-list/-/rc-virtual-list-3.2.6.tgz#2c92a40f4425e19881b38134d6bd286a11137d2d" + integrity sha512-8FiQLDzm3c/tMX0d62SQtKDhLH7zFlSI6pWBAPt+TUntEqd3Lz9zFAmpvTu8gkvUom/HCsDSZs4wfV4wDPWC0Q== + dependencies: + classnames "^2.2.6" + rc-resize-observer "^1.0.0" + rc-util "^5.0.7" + react-dom@^16.13.1: version "16.14.0" resolved "https://registry.yarnpkg.com/react-dom/-/react-dom-16.14.0.tgz#7ad838ec29a777fb3c75c3a190f661cf92ab8b89" @@ -2941,7 +3427,7 @@ react-dom@^16.13.1: prop-types "^15.6.2" scheduler "^0.19.1" -react-is@^16.7.0, react-is@^16.8.1: +react-is@^16.12.0, react-is@^16.7.0, react-is@^16.8.1: version "16.13.1" resolved "https://registry.yarnpkg.com/react-is/-/react-is-16.13.1.tgz#789729a4dc36de2999dc156dd6c1d9c18cea56a4" integrity sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ== @@ -3075,6 +3561,11 @@ requires-port@^1.0.0: resolved "https://registry.yarnpkg.com/requires-port/-/requires-port-1.0.0.tgz#925d2601d39ac485e091cf0da5c6e694dc3dcaff" integrity sha1-kl0mAdOaxIXgkc8NpcbmlNw9yv8= +resize-observer-polyfill@^1.5.0, resize-observer-polyfill@^1.5.1: + version "1.5.1" + resolved "https://registry.yarnpkg.com/resize-observer-polyfill/-/resize-observer-polyfill-1.5.1.tgz#0e9020dd3d21024458d4ebd27e23e40269810464" + integrity sha512-LwZrotdHOo12nQuZlHEmtuXdqGoOD0OhaxopaNFxWzInpEgaLWoVuAMbTzixuosCx2nEG58ngzW3vxdWoxIgdg== + resolve-cwd@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/resolve-cwd/-/resolve-cwd-2.0.0.tgz#00a9f7387556e27038eae232caa372a6a59b665a" @@ -3177,6 +3668,13 @@ schema-utils@^3.0.0: ajv "^6.12.5" ajv-keywords "^3.5.2" +scroll-into-view-if-needed@^2.2.25: + version "2.2.28" + resolved "https://registry.yarnpkg.com/scroll-into-view-if-needed/-/scroll-into-view-if-needed-2.2.28.tgz#5a15b2f58a52642c88c8eca584644e01703d645a" + integrity sha512-8LuxJSuFVc92+0AdNv4QOxRL4Abeo1DgLnGNkn1XlaujPH/3cCFz3QI60r2VNu4obJJROzgnIUw5TKQkZvZI1w== + dependencies: + compute-scroll-into-view "^1.0.17" + select-hose@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/select-hose/-/select-hose-2.0.0.tgz#625d8658f865af43ec962bfc376a37359a4994ca" @@ -3199,7 +3697,7 @@ semver@^6.3.0: resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.0.tgz#ee0a64c8af5e8ceea67687b133761e1becbd1d3d" integrity sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw== -semver@^7.3.4: +semver@^7.3.4, semver@^7.3.5: version "7.3.5" resolved "https://registry.yarnpkg.com/semver/-/semver-7.3.5.tgz#0b621c879348d8998e4b0e4be94b3f12e6018ef7" integrity sha512-PoeGJYh8HK4BTO/a9Tf6ZG3veo/A7ZVsYrSA6J8ny9nb3B1VrpkuN+z9OE5wfE5p6H4LchYZsegiQgbJD94ZFQ== @@ -3287,6 +3785,11 @@ shallow-clone@^3.0.0: dependencies: kind-of "^6.0.2" +shallowequal@^1.0.2, shallowequal@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/shallowequal/-/shallowequal-1.1.0.tgz#188d521de95b9087404fd4dcb68b13df0ae4e7f8" + integrity sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ== + shebang-command@^1.2.0: version "1.2.0" resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-1.2.0.tgz#44aac65b695b03398968c39f363fee5deafdf1ea" @@ -3454,6 +3957,11 @@ static-extend@^0.1.1: resolved "https://registry.yarnpkg.com/statuses/-/statuses-1.5.0.tgz#161c7dac177659fd9811f43771fa99381478628c" integrity sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow= +string-convert@^0.2.0: + version "0.2.1" + resolved "https://registry.yarnpkg.com/string-convert/-/string-convert-0.2.1.tgz#6982cc3049fbb4cd85f8b24568b9d9bf39eeff97" + integrity sha1-aYLMMEn7tM2F+LJFaLnZvznu/5c= + string-width@^3.0.0, string-width@^3.1.0: version "3.1.0" resolved "https://registry.yarnpkg.com/string-width/-/string-width-3.1.0.tgz#22767be21b62af1081574306f69ac51b62203961" @@ -3610,6 +4118,11 @@ to-regex@^3.0.1, to-regex@^3.0.2: regex-not "^1.0.2" safe-regex "^1.1.0" +toggle-selection@^1.0.6: + version "1.0.6" + resolved "https://registry.yarnpkg.com/toggle-selection/-/toggle-selection-1.0.6.tgz#6e45b1263f2017fa0acc7d89d78b15b8bf77da32" + integrity sha1-bkWxJj8gF/oKzH2J14sVuL932jI= + toidentifier@1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/toidentifier/-/toidentifier-1.0.0.tgz#7e1be3470f1e77948bc43d94a3c8f4d7752ba553" @@ -3745,6 +4258,13 @@ vary@~1.1.2: resolved "https://registry.yarnpkg.com/vary/-/vary-1.1.2.tgz#2299f02c6ded30d4a5961b0b9f74524a18f634fc" integrity sha1-IpnwLG3tMNSllhsLn3RSShj2NPw= +warning@^4.0.1, warning@^4.0.3: + version "4.0.3" + resolved "https://registry.yarnpkg.com/warning/-/warning-4.0.3.tgz#16e9e077eb8a86d6af7d64aa1e05fd85b4678ca3" + integrity sha512-rpJyN222KWIvHJ/F53XSZv0Zl/accqHR8et1kpaMTD/fLCRxtV8iX8czMzY7sVZupTI3zcUTg8eycS2kNF9l6w== + dependencies: + loose-envify "^1.0.0" + watchpack@^2.0.0: version "2.1.1" resolved "https://registry.yarnpkg.com/watchpack/-/watchpack-2.1.1.tgz#e99630550fca07df9f90a06056987baa40a689c7" diff --git a/tb_plugin/samples/resnet50_num_workers_0/worker0.1623143089861.pt.trace.json.gz b/tb_plugin/samples/resnet50_num_workers_0/worker0.1623143089861.pt.trace.json.gz new file mode 100644 index 000000000..769c3eb78 Binary files /dev/null and b/tb_plugin/samples/resnet50_num_workers_0/worker0.1623143089861.pt.trace.json.gz differ diff --git a/tb_plugin/samples/resnet50_num_workers_0/worker0.1623143566756.pt.trace.json.gz b/tb_plugin/samples/resnet50_num_workers_0/worker0.1623143566756.pt.trace.json.gz new file mode 100644 index 000000000..383a06643 Binary files /dev/null and b/tb_plugin/samples/resnet50_num_workers_0/worker0.1623143566756.pt.trace.json.gz differ diff --git a/tb_plugin/samples/resnet50_num_workers_0/worker0.pt.trace.json.gz b/tb_plugin/samples/resnet50_num_workers_0/worker0.pt.trace.json.gz deleted file mode 100644 index d3fb4076d..000000000 Binary files a/tb_plugin/samples/resnet50_num_workers_0/worker0.pt.trace.json.gz and /dev/null differ diff --git a/tb_plugin/samples/resnet50_num_workers_4/worker0.1623212756351.pt.trace.json.gz b/tb_plugin/samples/resnet50_num_workers_4/worker0.1623212756351.pt.trace.json.gz new file mode 100644 index 000000000..234cf25df Binary files /dev/null and b/tb_plugin/samples/resnet50_num_workers_4/worker0.1623212756351.pt.trace.json.gz differ diff --git a/tb_plugin/samples/resnet50_num_workers_4/worker0.1623213129365.pt.trace.json.gz b/tb_plugin/samples/resnet50_num_workers_4/worker0.1623213129365.pt.trace.json.gz new file mode 100644 index 000000000..3e633b5fd Binary files /dev/null and b/tb_plugin/samples/resnet50_num_workers_4/worker0.1623213129365.pt.trace.json.gz differ diff --git a/tb_plugin/samples/resnet50_num_workers_4/worker0.pt.trace.json.gz b/tb_plugin/samples/resnet50_num_workers_4/worker0.pt.trace.json.gz deleted file mode 100644 index 29dba67d0..000000000 Binary files a/tb_plugin/samples/resnet50_num_workers_4/worker0.pt.trace.json.gz and /dev/null differ diff --git a/tb_plugin/setup.py b/tb_plugin/setup.py index a0754ed26..8880098b4 100644 --- a/tb_plugin/setup.py +++ b/tb_plugin/setup.py @@ -1,22 +1,29 @@ # ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # -------------------------------------------------------------------------- - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - import os +import pathlib import setuptools +import subprocess + + +def read(rel_path): + here = os.path.abspath(os.path.dirname(__file__)) + with open(os.path.join(here, rel_path)) as fp: + return fp.read() -def get_version(): - with open("version.txt", encoding="utf-8") as f: - version = f.read().strip() + +def get_version(rel_path): + for line in read(rel_path).splitlines(): + if line.startswith("__version__"): + delim = '"' if '"' in line else "'" + version = line.split(delim)[1] if os.getenv('TORCH_TB_PROFILER_BUILD_VERSION'): version = os.getenv('TORCH_TB_PROFILER_BUILD_VERSION') return version + INSTALL_REQUIRED = [ "pandas >= 1.0.0", "tensorboard >= 1.15, !=2.1.0" @@ -27,15 +34,46 @@ def get_version(): "torchvision >= 0.8" ] +EXTRAS = { + "s3": ["boto3"], + "blob": ["azure-storage-blob"], + "gs": ["google-cloud-storage"] +} + + +class build_fe(setuptools.Command): + """Build the frontend""" + description = "run yarn build on frontend directory" + + user_options = [] + + def initialize_options(self): + pass + + def finalize_options(self): + pass + + def run(self): + cwd = pathlib.Path().absolute() + root = pathlib.Path(__file__).parent.absolute() + os.chdir(root / "fe") + subprocess.run(["yarn", "build:copy"], check=True) + # restore the working directory + os.chdir(cwd) + + setuptools.setup( name="torch_tb_profiler", - version=get_version(), + version=get_version(os.path.join('torch_tb_profiler', '__init__.py')), description="PyTorch Profiler TensorBoard Plugin", long_description="PyTorch Profiler TensorBoard Plugin : \ https://github.com/pytorch/kineto/tree/master/tb_plugin", url="https://github.com/pytorch/kineto/tree/master/tb_plugin", author="PyTorch Team", author_email="packages@pytorch.org", + cmdclass={ + "build_fe": build_fe + }, packages=setuptools.find_packages(), package_data={ "torch_tb_profiler": ["static/**"], @@ -45,7 +83,7 @@ def get_version(): "torch_profiler = torch_tb_profiler.plugin:TorchProfilerPlugin", ], }, - python_requires=">= 2.7, != 3.0.*, != 3.1.*", + python_requires=">=3.6.2", install_requires=INSTALL_REQUIRED, tests_require=TESTS_REQUIRED, classifiers=[ @@ -63,4 +101,5 @@ def get_version(): ], license='BSD-3', keywords='pytorch tensorboard profile plugin', + extras_require=EXTRAS ) diff --git a/tb_plugin/test/gpu_metrics_expected.json b/tb_plugin/test/gpu_metrics_expected.json new file mode 100644 index 000000000..81f03632c --- /dev/null +++ b/tb_plugin/test/gpu_metrics_expected.json @@ -0,0 +1,3105 @@ + +{ + "schemaVersion": 1, + + "computeProperties": [ + + { + "id": 0, "name": "Tesla V100-DGXS-32GB", "totalGlobalMem": 34084028416, + "major": 7, "minor": 0, + "maxThreadsPerBlock": 1024, "maxThreadsPerMultiProcessor": 2048, + "regsPerBlock": 65536, "regsPerMultiprocessor": 65536, "warpSize": 32, + "sharedMemPerBlock": 49152, "sharedMemPerMultiprocessor": 98304, + "multiProcessorCount": 80, "sharedMemPerBlockOptin": 98304 + }, + + { + "id": 1, "name": "Tesla V100-DGXS-32GB", "totalGlobalMem": 34087305216, + "major": 7, "minor": 0, + "maxThreadsPerBlock": 1024, "maxThreadsPerMultiProcessor": 2048, + "regsPerBlock": 65536, "regsPerMultiprocessor": 65536, "warpSize": 32, + "sharedMemPerBlock": 49152, "sharedMemPerMultiprocessor": 98304, + "multiProcessorCount": 80, "sharedMemPerBlockOptin": 98304 + }, + + { + "id": 2, "name": "Tesla V100-DGXS-32GB", "totalGlobalMem": 34087305216, + "major": 7, "minor": 0, + "maxThreadsPerBlock": 1024, "maxThreadsPerMultiProcessor": 2048, + "regsPerBlock": 65536, "regsPerMultiprocessor": 65536, "warpSize": 32, + "sharedMemPerBlock": 49152, "sharedMemPerMultiprocessor": 98304, + "multiProcessorCount": 80, "sharedMemPerBlockOptin": 98304 + }, + + { + "id": 3, "name": "Tesla V100-DGXS-32GB", "totalGlobalMem": 34087305216, + "major": 7, "minor": 0, + "maxThreadsPerBlock": 1024, "maxThreadsPerMultiProcessor": 2048, + "regsPerBlock": 65536, "regsPerMultiprocessor": 65536, "warpSize": 32, + "sharedMemPerBlock": 49152, "sharedMemPerMultiprocessor": 98304, + "multiProcessorCount": 80, "sharedMemPerBlockOptin": 98304 + } + ], + "traceEvents": [ + + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24572", + "ts": 1621401187223197, "dur": 21, + "args": { + "Device": 24572, "External id": 2, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::zero_", "pid": 24572, "tid": "24572", + "ts": 1621401187223264, "dur": 5, + "args": { + "Device": 24572, "External id": 3, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::zeros", "pid": 24572, "tid": "24572", + "ts": 1621401187223182, "dur": 99, + "args": { + "Device": 24572, "External id": 1, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24572", + "ts": 1621401187223376, "dur": 19, + "args": { + "Device": 24572, "External id": 5, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24572", + "ts": 1621401187223480, "dur": 18, + "args": { + "Device": 24572, "External id": 7, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::zero_", "pid": 24572, "tid": "24572", + "ts": 1621401187223530, "dur": 5, + "args": { + "Device": 24572, "External id": 8, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::zeros", "pid": 24572, "tid": "24572", + "ts": 1621401187223469, "dur": 72, + "args": { + "Device": 24572, "External id": 6, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24572", + "ts": 1621401187223622, "dur": 19, + "args": { + "Device": 24572, "External id": 10, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::as_strided", "pid": 24572, "tid": "24572", + "ts": 1621401187223790, "dur": 12, + "args": { + "Device": 24572, "External id": 13, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::unsqueeze", "pid": 24572, "tid": "24572", + "ts": 1621401187223777, "dur": 50, + "args": { + "Device": 24572, "External id": 12, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::as_strided", "pid": 24572, "tid": "24572", + "ts": 1621401187223850, "dur": 7, + "args": { + "Device": 24572, "External id": 15, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::unsqueeze", "pid": 24572, "tid": "24572", + "ts": 1621401187223841, "dur": 24, + "args": { + "Device": 24572, "External id": 14, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24572", + "ts": 1621401187223904, "dur": 16, + "args": { + "Device": 24572, "External id": 18, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::resize_", "pid": 24572, "tid": "24572", + "ts": 1621401187223945, "dur": 14, + "args": { + "Device": 24572, "External id": 19, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::_cat", "pid": 24572, "tid": "24572", + "ts": 1621401187223888, "dur": 87, + "args": { + "Device": 24572, "External id": 17, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::cat", "pid": 24572, "tid": "24572", + "ts": 1621401187223876, "dur": 106, + "args": { + "Device": 24572, "External id": 16, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::stack", "pid": 24572, "tid": "24572", + "ts": 1621401187223752, "dur": 245, + "args": { + "Device": 24572, "External id": 11, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 22 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::as_strided", "pid": 24572, "tid": "24572", + "ts": 1621401187224094, "dur": 12, + "args": { + "Device": 24572, "External id": 22, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::unsqueeze", "pid": 24572, "tid": "24572", + "ts": 1621401187224074, "dur": 43, + "args": { + "Device": 24572, "External id": 21, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::as_strided", "pid": 24572, "tid": "24572", + "ts": 1621401187224137, "dur": 6, + "args": { + "Device": 24572, "External id": 24, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::unsqueeze", "pid": 24572, "tid": "24572", + "ts": 1621401187224128, "dur": 21, + "args": { + "Device": 24572, "External id": 23, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24572", + "ts": 1621401187224184, "dur": 15, + "args": { + "Device": 24572, "External id": 27, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::resize_", "pid": 24572, "tid": "24572", + "ts": 1621401187224223, "dur": 12, + "args": { + "Device": 24572, "External id": 28, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::_cat", "pid": 24572, "tid": "24572", + "ts": 1621401187224169, "dur": 79, + "args": { + "Device": 24572, "External id": 26, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::cat", "pid": 24572, "tid": "24572", + "ts": 1621401187224159, "dur": 96, + "args": { + "Device": 24572, "External id": 25, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::stack", "pid": 24572, "tid": "24572", + "ts": 1621401187224056, "dur": 213, + "args": { + "Device": 24572, "External id": 20, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 22 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "enumerate(DataLoader)#_SingleProcessDataLoaderIter.__next__", "pid": 24572, "tid": "24572", + "ts": 1621401187223604, "dur": 725, + "args": { + "Device": 24572, "External id": 9, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty_strided", "pid": 24572, "tid": "24572", + "ts": 1621401187224415, "dur": 54, + "args": { + "Device": 24572, "External id": 30, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::copy_", "pid": 24572, "tid": "24572", + "ts": 1621401187224496, "dur": 80, + "args": { + "Device": 24572, "External id": 31, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 22 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::to", "pid": 24572, "tid": "24572", + "ts": 1621401187224398, "dur": 193, + "args": { + "Device": 24572, "External id": 29, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 22 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty_strided", "pid": 24572, "tid": "24572", + "ts": 1621401187224645, "dur": 51, + "args": { + "Device": 24572, "External id": 33, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::copy_", "pid": 24572, "tid": "24572", + "ts": 1621401187224720, "dur": 65, + "args": { + "Device": 24572, "External id": 34, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 22 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::to", "pid": 24572, "tid": "24572", + "ts": 1621401187224631, "dur": 168, + "args": { + "Device": 24572, "External id": 32, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 22 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::as_strided", "pid": 24572, "tid": "24572", + "ts": 1621401187224956, "dur": 14, + "args": { + "Device": 24572, "External id": 38, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::transpose", "pid": 24572, "tid": "24572", + "ts": 1621401187224945, "dur": 37, + "args": { + "Device": 24572, "External id": 37, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::t", "pid": 24572, "tid": "24572", + "ts": 1621401187224917, "dur": 101, + "args": { + "Device": 24572, "External id": 36, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 22 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::view", "pid": 24572, "tid": "24572", + "ts": 1621401187225058, "dur": 33, + "args": { + "Device": 24572, "External id": 40, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 23 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24572", + "ts": 1621401187225181, "dur": 41, + "args": { + "Device": 24572, "External id": 42, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::mm", "pid": 24572, "tid": "24572", + "ts": 1621401187225112, "dur": 197, + "args": { + "Device": 24572, "External id": 41, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 23 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::view", "pid": 24572, "tid": "24572", + "ts": 1621401187225367, "dur": 17, + "args": { + "Device": 24572, "External id": 44, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::_unsafe_view", "pid": 24572, "tid": "24572", + "ts": 1621401187225336, "dur": 79, + "args": { + "Device": 24572, "External id": 43, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 24 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::matmul", "pid": 24572, "tid": "24572", + "ts": 1621401187225037, "dur": 394, + "args": { + "Device": 24572, "External id": 39, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 23 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::add_", "pid": 24572, "tid": "24572", + "ts": 1621401187225449, "dur": 107, + "args": { + "Device": 24572, "External id": 45, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 25 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::linear", "pid": 24572, "tid": "24572", + "ts": 1621401187224907, "dur": 664, + "args": { + "Device": 24572, "External id": 35, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 22 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24572", + "ts": 1621401187225662, "dur": 25, + "args": { + "Device": 24572, "External id": 47, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::resize_", "pid": 24572, "tid": "24572", + "ts": 1621401187225746, "dur": 30, + "args": { + "Device": 24572, "External id": 50, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::clamp_min", "pid": 24572, "tid": "24572", + "ts": 1621401187225721, "dur": 105, + "args": { + "Device": 24572, "External id": 49, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::clamp", "pid": 24572, "tid": "24572", + "ts": 1621401187225709, "dur": 128, + "args": { + "Device": 24572, "External id": 48, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::clamp", "pid": 24572, "tid": "24572", + "ts": 1621401187225606, "dur": 263, + "args": { + "Device": 24572, "External id": 46, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 26 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::as_strided", "pid": 24572, "tid": "24572", + "ts": 1621401187225978, "dur": 14, + "args": { + "Device": 24572, "External id": 54, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::transpose", "pid": 24572, "tid": "24572", + "ts": 1621401187225968, "dur": 36, + "args": { + "Device": 24572, "External id": 53, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::t", "pid": 24572, "tid": "24572", + "ts": 1621401187225941, "dur": 98, + "args": { + "Device": 24572, "External id": 52, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 27 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::view", "pid": 24572, "tid": "24572", + "ts": 1621401187226077, "dur": 60, + "args": { + "Device": 24572, "External id": 56, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 28 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24572", + "ts": 1621401187226233, "dur": 41, + "args": { + "Device": 24572, "External id": 58, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::mm", "pid": 24572, "tid": "24572", + "ts": 1621401187226161, "dur": 197, + "args": { + "Device": 24572, "External id": 57, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 29 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::view", "pid": 24572, "tid": "24572", + "ts": 1621401187226416, "dur": 17, + "args": { + "Device": 24572, "External id": 60, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::_unsafe_view", "pid": 24572, "tid": "24572", + "ts": 1621401187226384, "dur": 79, + "args": { + "Device": 24572, "External id": 59, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 30 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::matmul", "pid": 24572, "tid": "24572", + "ts": 1621401187226057, "dur": 422, + "args": { + "Device": 24572, "External id": 55, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 28 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::add_", "pid": 24572, "tid": "24572", + "ts": 1621401187226497, "dur": 103, + "args": { + "Device": 24572, "External id": 61, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 31 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::linear", "pid": 24572, "tid": "24572", + "ts": 1621401187225932, "dur": 683, + "args": { + "Device": 24572, "External id": 51, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 27 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::broadcast_tensors", "pid": 24572, "tid": "24572", + "ts": 1621401187226708, "dur": 11, + "args": { + "Device": 24572, "External id": 62, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 32 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24572", + "ts": 1621401187226827, "dur": 41, + "args": { + "Device": 24572, "External id": 64, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24572", + "ts": 1621401187226955, "dur": 35, + "args": { + "Device": 24572, "External id": 66, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::as_strided", "pid": 24572, "tid": "24572", + "ts": 1621401187227020, "dur": 11, + "args": { + "Device": 24572, "External id": 67, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::sum", "pid": 24572, "tid": "24572", + "ts": 1621401187226930, "dur": 176, + "args": { + "Device": 24572, "External id": 65, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::mse_loss", "pid": 24572, "tid": "24572", + "ts": 1621401187226753, "dur": 445, + "args": { + "Device": 24572, "External id": 63, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 32 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24572", + "ts": 1621401187227327, "dur": 21, + "args": { + "Device": 24572, "External id": 69, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::zero_", "pid": 24572, "tid": "24572", + "ts": 1621401187227368, "dur": 5, + "args": { + "Device": 24572, "External id": 70, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::zeros", "pid": 24572, "tid": "24572", + "ts": 1621401187227314, "dur": 65, + "args": { + "Device": 24572, "External id": 68, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24572", + "ts": 1621401187227464, "dur": 18, + "args": { + "Device": 24572, "External id": 72, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::fill_", "pid": 24572, "tid": "24572", + "ts": 1621401187227576, "dur": 49, + "args": { + "Device": 24572, "External id": 74, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::zero_", "pid": 24572, "tid": "24572", + "ts": 1621401187227553, "dur": 97, + "args": { + "Device": 24572, "External id": 73, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 33 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::fill_", "pid": 24572, "tid": "24572", + "ts": 1621401187227707, "dur": 43, + "args": { + "Device": 24572, "External id": 76, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::zero_", "pid": 24572, "tid": "24572", + "ts": 1621401187227689, "dur": 79, + "args": { + "Device": 24572, "External id": 75, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 33 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::fill_", "pid": 24572, "tid": "24572", + "ts": 1621401187227823, "dur": 42, + "args": { + "Device": 24572, "External id": 78, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::zero_", "pid": 24572, "tid": "24572", + "ts": 1621401187227805, "dur": 77, + "args": { + "Device": 24572, "External id": 77, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 33 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::fill_", "pid": 24572, "tid": "24572", + "ts": 1621401187227937, "dur": 41, + "args": { + "Device": 24572, "External id": 80, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::zero_", "pid": 24572, "tid": "24572", + "ts": 1621401187227919, "dur": 77, + "args": { + "Device": 24572, "External id": 79, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 33 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "Optimizer.zero_grad#SGD.zero_grad", "pid": 24572, "tid": "24572", + "ts": 1621401187227446, "dur": 606, + "args": { + "Device": 24572, "External id": 71, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty_strided", "pid": 24572, "tid": "24572", + "ts": 1621401187228150, "dur": 53, + "args": { + "Device": 24572, "External id": 83, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty_like", "pid": 24572, "tid": "24572", + "ts": 1621401187228137, "dur": 81, + "args": { + "Device": 24572, "External id": 82, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::fill_", "pid": 24572, "tid": "24572", + "ts": 1621401187228235, "dur": 50, + "args": { + "Device": 24572, "External id": 84, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::ones_like", "pid": 24572, "tid": "24572", + "ts": 1621401187228128, "dur": 169, + "args": { + "Device": 24572, "External id": 81, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24610", + "ts": 1621401187228708, "dur": 79, + "args": { + "Device": 24572, "External id": 89, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty_like", "pid": 24572, "tid": "24610", + "ts": 1621401187228680, "dur": 146, + "args": { + "Device": 24572, "External id": 88, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::fill_", "pid": 24572, "tid": "24610", + "ts": 1621401187228885, "dur": 93, + "args": { + "Device": 24572, "External id": 91, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::zero_", "pid": 24572, "tid": "24610", + "ts": 1621401187228858, "dur": 147, + "args": { + "Device": 24572, "External id": 90, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::zeros_like", "pid": 24572, "tid": "24610", + "ts": 1621401187228647, "dur": 369, + "args": { + "Device": 24572, "External id": 87, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::mse_loss_backward", "pid": 24572, "tid": "24610", + "ts": 1621401187229048, "dur": 122, + "args": { + "Device": 24572, "External id": 92, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::mse_loss_backward", "pid": 24572, "tid": "24610", + "ts": 1621401187228603, "dur": 614, + "args": { + "Device": 24572, "External id": 86, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "MseLossBackward", "pid": 24572, "tid": "24610", + "ts": 1621401187228516, "dur": 727, + "args": { + "Device": 24572, "External id": 85, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 1, "Sequence number": 32 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "AddBackward1", "pid": 24572, "tid": "24610", + "ts": 1621401187229384, "dur": 17, + "args": { + "Device": 24572, "External id": 93, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 1, "Sequence number": 31 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24610", + "ts": 1621401187229506, "dur": 73, + "args": { + "Device": 24572, "External id": 95, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::sum", "pid": 24572, "tid": "24610", + "ts": 1621401187229459, "dur": 279, + "args": { + "Device": 24572, "External id": 94, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::view", "pid": 24572, "tid": "24610", + "ts": 1621401187229788, "dur": 65, + "args": { + "Device": 24572, "External id": 96, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::add_", "pid": 24572, "tid": "24610", + "ts": 1621401187230059, "dur": 131, + "args": { + "Device": 24572, "External id": 98, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "torch::autograd::AccumulateGrad", "pid": 24572, "tid": "24610", + "ts": 1621401187230028, "dur": 228, + "args": { + "Device": 24572, "External id": 97, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::view", "pid": 24572, "tid": "24610", + "ts": 1621401187230405, "dur": 61, + "args": { + "Device": 24572, "External id": 101, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::reshape", "pid": 24572, "tid": "24610", + "ts": 1621401187230383, "dur": 107, + "args": { + "Device": 24572, "External id": 100, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "UnsafeViewBackward", "pid": 24572, "tid": "24610", + "ts": 1621401187230354, "dur": 146, + "args": { + "Device": 24572, "External id": 99, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 1, "Sequence number": 30 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::as_strided", "pid": 24572, "tid": "24610", + "ts": 1621401187230751, "dur": 22, + "args": { + "Device": 24572, "External id": 105, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::transpose", "pid": 24572, "tid": "24610", + "ts": 1621401187230732, "dur": 65, + "args": { + "Device": 24572, "External id": 104, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::t", "pid": 24572, "tid": "24610", + "ts": 1621401187230710, "dur": 124, + "args": { + "Device": 24572, "External id": 103, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::conj", "pid": 24572, "tid": "24610", + "ts": 1621401187230862, "dur": 7, + "args": { + "Device": 24572, "External id": 106, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24610", + "ts": 1621401187230935, "dur": 73, + "args": { + "Device": 24572, "External id": 108, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::mm", "pid": 24572, "tid": "24610", + "ts": 1621401187230889, "dur": 235, + "args": { + "Device": 24572, "External id": 107, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::as_strided", "pid": 24572, "tid": "24610", + "ts": 1621401187231211, "dur": 23, + "args": { + "Device": 24572, "External id": 111, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::transpose", "pid": 24572, "tid": "24610", + "ts": 1621401187231191, "dur": 69, + "args": { + "Device": 24572, "External id": 110, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::t", "pid": 24572, "tid": "24610", + "ts": 1621401187231168, "dur": 129, + "args": { + "Device": 24572, "External id": 109, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::as_strided", "pid": 24572, "tid": "24610", + "ts": 1621401187231376, "dur": 17, + "args": { + "Device": 24572, "External id": 114, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::transpose", "pid": 24572, "tid": "24610", + "ts": 1621401187231360, "dur": 49, + "args": { + "Device": 24572, "External id": 113, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::t", "pid": 24572, "tid": "24610", + "ts": 1621401187231340, "dur": 100, + "args": { + "Device": 24572, "External id": 112, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::conj", "pid": 24572, "tid": "24610", + "ts": 1621401187231465, "dur": 6, + "args": { + "Device": 24572, "External id": 115, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24610", + "ts": 1621401187231534, "dur": 72, + "args": { + "Device": 24572, "External id": 117, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::mm", "pid": 24572, "tid": "24610", + "ts": 1621401187231491, "dur": 225, + "args": { + "Device": 24572, "External id": 116, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "MmBackward", "pid": 24572, "tid": "24610", + "ts": 1621401187230626, "dur": 1124, + "args": { + "Device": 24572, "External id": 102, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 1, "Sequence number": 29 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::view", "pid": 24572, "tid": "24610", + "ts": 1621401187231992, "dur": 61, + "args": { + "Device": 24572, "External id": 120, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::reshape", "pid": 24572, "tid": "24610", + "ts": 1621401187231970, "dur": 108, + "args": { + "Device": 24572, "External id": 119, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "ViewBackward", "pid": 24572, "tid": "24610", + "ts": 1621401187231941, "dur": 166, + "args": { + "Device": 24572, "External id": 118, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 1, "Sequence number": 28 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::as_strided", "pid": 24572, "tid": "24610", + "ts": 1621401187232305, "dur": 21, + "args": { + "Device": 24572, "External id": 124, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::transpose", "pid": 24572, "tid": "24610", + "ts": 1621401187232286, "dur": 62, + "args": { + "Device": 24572, "External id": 123, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::t", "pid": 24572, "tid": "24610", + "ts": 1621401187232265, "dur": 123, + "args": { + "Device": 24572, "External id": 122, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "TBackward", "pid": 24572, "tid": "24610", + "ts": 1621401187232239, "dur": 161, + "args": { + "Device": 24572, "External id": 121, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 1, "Sequence number": 27 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::add_", "pid": 24572, "tid": "24610", + "ts": 1621401187232535, "dur": 85, + "args": { + "Device": 24572, "External id": 126, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "torch::autograd::AccumulateGrad", "pid": 24572, "tid": "24610", + "ts": 1621401187232515, "dur": 148, + "args": { + "Device": 24572, "External id": 125, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24610", + "ts": 1621401187232790, "dur": 47, + "args": { + "Device": 24572, "External id": 129, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::fill_", "pid": 24572, "tid": "24610", + "ts": 1621401187232866, "dur": 68, + "args": { + "Device": 24572, "External id": 130, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::scalar_tensor", "pid": 24572, "tid": "24610", + "ts": 1621401187232776, "dur": 174, + "args": { + "Device": 24572, "External id": 128, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24610", + "ts": 1621401187233023, "dur": 27, + "args": { + "Device": 24572, "External id": 132, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::_local_scalar_dense", "pid": 24572, "tid": "24610", + "ts": 1621401187233192, "dur": 6, + "args": { + "Device": 24572, "External id": 135, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::item", "pid": 24572, "tid": "24610", + "ts": 1621401187233184, "dur": 24, + "args": { + "Device": 24572, "External id": 134, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::resize_", "pid": 24572, "tid": "24610", + "ts": 1621401187233251, "dur": 41, + "args": { + "Device": 24572, "External id": 136, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::ge", "pid": 24572, "tid": "24610", + "ts": 1621401187233168, "dur": 182, + "args": { + "Device": 24572, "External id": 133, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::ge", "pid": 24572, "tid": "24610", + "ts": 1621401187232971, "dur": 404, + "args": { + "Device": 24572, "External id": 131, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::as_strided", "pid": 24572, "tid": "24610", + "ts": 1621401187233430, "dur": 15, + "args": { + "Device": 24572, "External id": 139, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::expand", "pid": 24572, "tid": "24610", + "ts": 1621401187233414, "dur": 62, + "args": { + "Device": 24572, "External id": 138, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::as_strided", "pid": 24572, "tid": "24610", + "ts": 1621401187233508, "dur": 10, + "args": { + "Device": 24572, "External id": 141, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::expand", "pid": 24572, "tid": "24610", + "ts": 1621401187233494, "dur": 48, + "args": { + "Device": 24572, "External id": 140, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::as_strided", "pid": 24572, "tid": "24610", + "ts": 1621401187233571, "dur": 10, + "args": { + "Device": 24572, "External id": 143, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::expand", "pid": 24572, "tid": "24610", + "ts": 1621401187233558, "dur": 43, + "args": { + "Device": 24572, "External id": 142, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24610", + "ts": 1621401187233649, "dur": 46, + "args": { + "Device": 24572, "External id": 145, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::_s_where", "pid": 24572, "tid": "24610", + "ts": 1621401187233620, "dur": 167, + "args": { + "Device": 24572, "External id": 144, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::where", "pid": 24572, "tid": "24610", + "ts": 1621401187233398, "dur": 409, + "args": { + "Device": 24572, "External id": 137, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "ClampBackward1", "pid": 24572, "tid": "24610", + "ts": 1621401187232724, "dur": 1110, + "args": { + "Device": 24572, "External id": 127, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 1, "Sequence number": 26 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "AddBackward1", "pid": 24572, "tid": "24610", + "ts": 1621401187233941, "dur": 12, + "args": { + "Device": 24572, "External id": 146, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 1, "Sequence number": 25 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24610", + "ts": 1621401187234021, "dur": 46, + "args": { + "Device": 24572, "External id": 148, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::sum", "pid": 24572, "tid": "24610", + "ts": 1621401187233990, "dur": 182, + "args": { + "Device": 24572, "External id": 147, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::view", "pid": 24572, "tid": "24610", + "ts": 1621401187234208, "dur": 43, + "args": { + "Device": 24572, "External id": 149, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::add_", "pid": 24572, "tid": "24610", + "ts": 1621401187234378, "dur": 84, + "args": { + "Device": 24572, "External id": 151, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "torch::autograd::AccumulateGrad", "pid": 24572, "tid": "24610", + "ts": 1621401187234357, "dur": 144, + "args": { + "Device": 24572, "External id": 150, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::view", "pid": 24572, "tid": "24610", + "ts": 1621401187234593, "dur": 39, + "args": { + "Device": 24572, "External id": 154, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::reshape", "pid": 24572, "tid": "24610", + "ts": 1621401187234580, "dur": 67, + "args": { + "Device": 24572, "External id": 153, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "UnsafeViewBackward", "pid": 24572, "tid": "24610", + "ts": 1621401187234561, "dur": 92, + "args": { + "Device": 24572, "External id": 152, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 1, "Sequence number": 24 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::as_strided", "pid": 24572, "tid": "24610", + "ts": 1621401187234803, "dur": 14, + "args": { + "Device": 24572, "External id": 158, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::transpose", "pid": 24572, "tid": "24610", + "ts": 1621401187234792, "dur": 41, + "args": { + "Device": 24572, "External id": 157, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::t", "pid": 24572, "tid": "24610", + "ts": 1621401187234778, "dur": 79, + "args": { + "Device": 24572, "External id": 156, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::conj", "pid": 24572, "tid": "24610", + "ts": 1621401187234874, "dur": 4, + "args": { + "Device": 24572, "External id": 159, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24610", + "ts": 1621401187234918, "dur": 47, + "args": { + "Device": 24572, "External id": 161, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::mm", "pid": 24572, "tid": "24610", + "ts": 1621401187234890, "dur": 149, + "args": { + "Device": 24572, "External id": 160, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::as_strided", "pid": 24572, "tid": "24610", + "ts": 1621401187235092, "dur": 15, + "args": { + "Device": 24572, "External id": 164, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::transpose", "pid": 24572, "tid": "24610", + "ts": 1621401187235080, "dur": 39, + "args": { + "Device": 24572, "External id": 163, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::t", "pid": 24572, "tid": "24610", + "ts": 1621401187235067, "dur": 75, + "args": { + "Device": 24572, "External id": 162, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "MmBackward", "pid": 24572, "tid": "24610", + "ts": 1621401187234734, "dur": 424, + "args": { + "Device": 24572, "External id": 155, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 1, "Sequence number": 23 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::as_strided", "pid": 24572, "tid": "24610", + "ts": 1621401187235312, "dur": 13, + "args": { + "Device": 24572, "External id": 168, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::transpose", "pid": 24572, "tid": "24610", + "ts": 1621401187235301, "dur": 40, + "args": { + "Device": 24572, "External id": 167, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::t", "pid": 24572, "tid": "24610", + "ts": 1621401187235288, "dur": 78, + "args": { + "Device": 24572, "External id": 166, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "TBackward", "pid": 24572, "tid": "24610", + "ts": 1621401187235271, "dur": 103, + "args": { + "Device": 24572, "External id": 165, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 1, "Sequence number": 22 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::add_", "pid": 24572, "tid": "24610", + "ts": 1621401187235487, "dur": 85, + "args": { + "Device": 24572, "External id": 170, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "torch::autograd::AccumulateGrad", "pid": 24572, "tid": "24610", + "ts": 1621401187235467, "dur": 147, + "args": { + "Device": 24572, "External id": 169, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24572", + "ts": 1621401187235803, "dur": 24, + "args": { + "Device": 24572, "External id": 172, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::zero_", "pid": 24572, "tid": "24572", + "ts": 1621401187235850, "dur": 5, + "args": { + "Device": 24572, "External id": 173, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::zeros", "pid": 24572, "tid": "24572", + "ts": 1621401187235787, "dur": 75, + "args": { + "Device": 24572, "External id": 171, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24572", + "ts": 1621401187235954, "dur": 20, + "args": { + "Device": 24572, "External id": 175, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::add_", "pid": 24572, "tid": "24572", + "ts": 1621401187236091, "dur": 82, + "args": { + "Device": 24572, "External id": 176, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::add_", "pid": 24572, "tid": "24572", + "ts": 1621401187236221, "dur": 70, + "args": { + "Device": 24572, "External id": 177, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::add_", "pid": 24572, "tid": "24572", + "ts": 1621401187236334, "dur": 68, + "args": { + "Device": 24572, "External id": 178, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::add_", "pid": 24572, "tid": "24572", + "ts": 1621401187236444, "dur": 68, + "args": { + "Device": 24572, "External id": 179, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "Optimizer.step#SGD.step", "pid": 24572, "tid": "24572", + "ts": 1621401187235935, "dur": 663, + "args": { + "Device": 24572, "External id": 174, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "ProfilerStep#2", "pid": 24572, "tid": "24572", + "ts": 1621401187223358, "dur": 13410, + "args": { + "Device": 24572, "External id": 4, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Memcpy", + "name": "Memcpy HtoD (Pageable -> Device)", "pid": 0, "tid": "stream 7", + "ts": 1621401187224556, "dur": 1, + "args": { + "device": 0, "context": 1, + "stream": 7, "correlation": 311, "external id": 31, + "bytes": 640, "memory bandwidth (GB/s)": 0.46511627906976744 + } + }, + { + "ph": "f", "id": 311, "pid": 0, "tid": "stream 7", "ts": 1621401187224556, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaMemcpyAsync", "pid": 24572, "tid": "24572", + "ts": 1621401187224533, "dur": 20, + "args": { + "cbid": 41, "correlation": 311, + "external id": 31, "external ts": 1621401187224496 + } + }, + { + "ph": "s", "id": 311, "pid": 24572, "tid": 24572, "ts": 1621401187224533, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaStreamSynchronize", "pid": 24572, "tid": "24572", + "ts": 1621401187224554, "dur": 8, + "args": { + "cbid": 131, "correlation": 312, + "external id": 31, "external ts": 1621401187224496 + } + }, + { + "ph": "X", "cat": "Memcpy", + "name": "Memcpy HtoD (Pageable -> Device)", "pid": 0, "tid": "stream 7", + "ts": 1621401187224767, "dur": 1, + "args": { + "device": 0, "context": 1, + "stream": 7, "correlation": 323, "external id": 34, + "bytes": 128, "memory bandwidth (GB/s)": 0.09523809523809523 + } + }, + { + "ph": "f", "id": 323, "pid": 0, "tid": "stream 7", "ts": 1621401187224767, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaMemcpyAsync", "pid": 24572, "tid": "24572", + "ts": 1621401187224752, "dur": 12, + "args": { + "cbid": 41, "correlation": 323, + "external id": 34, "external ts": 1621401187224720 + } + }, + { + "ph": "s", "id": 323, "pid": 24572, "tid": 24572, "ts": 1621401187224752, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaStreamSynchronize", "pid": 24572, "tid": "24572", + "ts": 1621401187224765, "dur": 7, + "args": { + "cbid": 131, "correlation": 324, + "external id": 34, "external ts": 1621401187224720 + } + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", "pid": 24572, "tid": "24572", + "ts": 1621401187225253, "dur": 2, + "args": { + "cbid": 251, "correlation": 332, + "external id": 41, "external ts": 1621401187225112 + } + }, + { + "ph": "X", "cat": "Kernel", + "name": "void gemmSN_TN_kernel_64addr, cublasGemvTensorStridedBatched >(cublasGemmSmallNParams, cublasGemvTensorStridedBatched, float>)", "pid": 0, "tid": "stream 7", + "ts": 1621401187225275, "dur": 3, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 333, "external id": 41, + "registers per thread": 72, + "shared memory": 13824, + "blocks per SM": 0.025, + "warps per SM": 0.1, + "grid": [1, 2, 1], + "block": [128, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 333, "pid": 0, "tid": "stream 7", "ts": 1621401187225275, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24572", + "ts": 1621401187225258, "dur": 16, + "args": { + "cbid": 211, "correlation": 333, + "external id": 41, "external ts": 1621401187225112 + } + }, + { + "ph": "s", "id": 333, "pid": 24572, "tid": 24572, "ts": 1621401187225258, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::unrolled_elementwise_kernel, at::detail::Array, OffsetCalculator<2, unsigned int>, OffsetCalculator<1, unsigned int>, at::native::memory::LoadWithoutCast, at::native::memory::StoreWithoutCast>(int, at::native::AddFunctor, at::detail::Array, OffsetCalculator<2, unsigned int>, OffsetCalculator<1, unsigned int>, at::native::memory::LoadWithoutCast, at::native::memory::StoreWithoutCast)", "pid": 0, "tid": "stream 7", + "ts": 1621401187225530, "dur": 2, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 338, "external id": 45, + "registers per thread": 22, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 338, "pid": 0, "tid": "stream 7", "ts": 1621401187225530, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24572", + "ts": 1621401187225512, "dur": 16, + "args": { + "cbid": 211, "correlation": 338, + "external id": 45, "external ts": 1621401187225449 + } + }, + { + "ph": "s", "id": 338, "pid": 24572, "tid": 24572, "ts": 1621401187225512, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::vectorized_elementwise_kernel<4, at::native::(anonymous namespace)::clamp_min_scalar_kernel_impl(at::TensorIterator&, c10::Scalar)::{lambda()#1}::operator()() const::{lambda()#8}::operator()() const::{lambda(float)#1}, at::detail::Array >(int, at::native::(anonymous namespace)::clamp_min_scalar_kernel_impl(at::TensorIterator&, c10::Scalar)::{lambda()#1}::operator()() const::{lambda()#8}::operator()() const::{lambda(float)#1}, at::detail::Array)", "pid": 0, "tid": "stream 7", + "ts": 1621401187225820, "dur": 1, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 352, "external id": 49, + "registers per thread": 18, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 352, "pid": 0, "tid": "stream 7", "ts": 1621401187225820, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24572", + "ts": 1621401187225803, "dur": 15, + "args": { + "cbid": 211, "correlation": 352, + "external id": 49, "external ts": 1621401187225721 + } + }, + { + "ph": "s", "id": 352, "pid": 24572, "tid": 24572, "ts": 1621401187225803, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", "pid": 24572, "tid": "24572", + "ts": 1621401187226305, "dur": 2, + "args": { + "cbid": 251, "correlation": 363, + "external id": 57, "external ts": 1621401187226161 + } + }, + { + "ph": "X", "cat": "Kernel", + "name": "void gemmSN_TN_kernel_64addr, cublasGemvTensorStridedBatched >(cublasGemmSmallNParams, cublasGemvTensorStridedBatched, float>)", "pid": 0, "tid": "stream 7", + "ts": 1621401187226325, "dur": 2, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 364, "external id": 57, + "registers per thread": 72, + "shared memory": 13824, + "blocks per SM": 0.025, + "warps per SM": 0.1, + "grid": [1, 2, 1], + "block": [128, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 364, "pid": 0, "tid": "stream 7", "ts": 1621401187226325, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24572", + "ts": 1621401187226309, "dur": 15, + "args": { + "cbid": 211, "correlation": 364, + "external id": 57, "external ts": 1621401187226161 + } + }, + { + "ph": "s", "id": 364, "pid": 24572, "tid": 24572, "ts": 1621401187226309, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::unrolled_elementwise_kernel, at::detail::Array, OffsetCalculator<2, unsigned int>, OffsetCalculator<1, unsigned int>, at::native::memory::LoadWithoutCast, at::native::memory::StoreWithoutCast>(int, at::native::AddFunctor, at::detail::Array, OffsetCalculator<2, unsigned int>, OffsetCalculator<1, unsigned int>, at::native::memory::LoadWithoutCast, at::native::memory::StoreWithoutCast)", "pid": 0, "tid": "stream 7", + "ts": 1621401187226575, "dur": 2, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 369, "external id": 61, + "registers per thread": 22, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 369, "pid": 0, "tid": "stream 7", "ts": 1621401187226575, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24572", + "ts": 1621401187226558, "dur": 15, + "args": { + "cbid": 211, "correlation": 369, + "external id": 61, "external ts": 1621401187226497 + } + }, + { + "ph": "s", "id": 369, "pid": 24572, "tid": 24572, "ts": 1621401187226558, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::vectorized_elementwise_kernel<4, at::native::mse_kernel_cuda(at::TensorIterator&)::{lambda()#1}::operator()() const::{lambda()#4}::operator()() const::{lambda(float, float)#1}, at::detail::Array >(int, at::native::mse_kernel_cuda(at::TensorIterator&)::{lambda()#1}::operator()() const::{lambda()#4}::operator()() const::{lambda(float, float)#1}, at::detail::Array)", "pid": 0, "tid": "stream 7", + "ts": 1621401187226912, "dur": 1, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 377, "external id": 63, + "registers per thread": 20, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 377, "pid": 0, "tid": "stream 7", "ts": 1621401187226912, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24572", + "ts": 1621401187226895, "dur": 16, + "args": { + "cbid": 211, "correlation": 377, + "external id": 63, "external ts": 1621401187226753 + } + }, + { + "ph": "s", "id": 377, "pid": 24572, "tid": 24572, "ts": 1621401187226895, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::reduce_kernel<512, 1, at::native::ReduceOp::operator()(at::TensorIterator&)::{lambda(float, float)#1}>, unsigned int, float, 4> >(at::native::ReduceOp::operator()(at::TensorIterator&)::{lambda(float, float)#1}>, unsigned int, float, 4>)", "pid": 0, "tid": "stream 7", + "ts": 1621401187227092, "dur": 2, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 388, "external id": 65, + "registers per thread": 32, + "shared memory": 16, + "blocks per SM": 0.0125, + "warps per SM": 0.0125, + "grid": [1, 1, 1], + "block": [32, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 388, "pid": 0, "tid": "stream 7", "ts": 1621401187227092, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24572", + "ts": 1621401187227075, "dur": 15, + "args": { + "cbid": 211, "correlation": 388, + "external id": 65, "external ts": 1621401187226930 + } + }, + { + "ph": "s", "id": 388, "pid": 24572, "tid": 24572, "ts": 1621401187227075, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "pid": 0, "tid": "stream 7", + "ts": 1621401187227619, "dur": 1, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 395, "external id": 74, + "registers per thread": 16, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 395, "pid": 0, "tid": "stream 7", "ts": 1621401187227619, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24572", + "ts": 1621401187227601, "dur": 16, + "args": { + "cbid": 211, "correlation": 395, + "external id": 74, "external ts": 1621401187227576 + } + }, + { + "ph": "s", "id": 395, "pid": 24572, "tid": 24572, "ts": 1621401187227601, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "pid": 0, "tid": "stream 7", + "ts": 1621401187227745, "dur": 1, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 402, "external id": 76, + "registers per thread": 16, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 402, "pid": 0, "tid": "stream 7", "ts": 1621401187227745, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24572", + "ts": 1621401187227729, "dur": 14, + "args": { + "cbid": 211, "correlation": 402, + "external id": 76, "external ts": 1621401187227707 + } + }, + { + "ph": "s", "id": 402, "pid": 24572, "tid": 24572, "ts": 1621401187227729, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "pid": 0, "tid": "stream 7", + "ts": 1621401187227859, "dur": 1, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 409, "external id": 78, + "registers per thread": 16, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 409, "pid": 0, "tid": "stream 7", "ts": 1621401187227859, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24572", + "ts": 1621401187227844, "dur": 13, + "args": { + "cbid": 211, "correlation": 409, + "external id": 78, "external ts": 1621401187227823 + } + }, + { + "ph": "s", "id": 409, "pid": 24572, "tid": 24572, "ts": 1621401187227844, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "pid": 0, "tid": "stream 7", + "ts": 1621401187227973, "dur": 1, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 416, "external id": 80, + "registers per thread": 16, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 416, "pid": 0, "tid": "stream 7", "ts": 1621401187227973, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24572", + "ts": 1621401187227958, "dur": 13, + "args": { + "cbid": 211, "correlation": 416, + "external id": 80, "external ts": 1621401187227937 + } + }, + { + "ph": "s", "id": 416, "pid": 24572, "tid": 24572, "ts": 1621401187227958, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "pid": 0, "tid": "stream 7", + "ts": 1621401187228279, "dur": 1, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 429, "external id": 84, + "registers per thread": 16, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 429, "pid": 0, "tid": "stream 7", "ts": 1621401187228279, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24572", + "ts": 1621401187228262, "dur": 15, + "args": { + "cbid": 211, "correlation": 429, + "external id": 84, "external ts": 1621401187228235 + } + }, + { + "ph": "s", "id": 429, "pid": 24572, "tid": 24572, "ts": 1621401187228262, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "pid": 0, "tid": "stream 7", + "ts": 1621401187228962, "dur": 1, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 440, "external id": 91, + "registers per thread": 16, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 440, "pid": 0, "tid": "stream 7", "ts": 1621401187228962, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24610", + "ts": 1621401187228932, "dur": 30, + "args": { + "cbid": 211, "correlation": 440, + "external id": 91, "external ts": 1621401187228885 + } + }, + { + "ph": "s", "id": 440, "pid": 24572, "tid": 24610, "ts": 1621401187228932, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::unrolled_elementwise_kernel, OffsetCalculator<3, unsigned int>, at::detail::Array<1, unsigned int>, at::native::memory::LoadWithoutCast, OffsetCalculator::StoreWithoutCast>(int, at::native::mse_backward_cuda_kernel(at::TensorIterator&, c10::Scalar const&)::{lambda()#1}::operator()() const::{lambda()#4}::operator()() const::{lambda(float, float, float)#1}, at::detail::Array, OffsetCalculator<3, unsigned int>, at::detail::Array<1, unsigned int>, at::native::memory::LoadWithoutCast, OffsetCalculator::StoreWithoutCast)", "pid": 0, "tid": "stream 7", + "ts": 1621401187229153, "dur": 2, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 446, "external id": 92, + "registers per thread": 28, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 446, "pid": 0, "tid": "stream 7", "ts": 1621401187229153, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24610", + "ts": 1621401187229127, "dur": 26, + "args": { + "cbid": 211, "correlation": 446, + "external id": 92, "external ts": 1621401187229048 + } + }, + { + "ph": "s", "id": 446, "pid": 24572, "tid": 24610, "ts": 1621401187229127, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::reduce_kernel<256, 2, at::native::ReduceOp::operator()(at::TensorIterator&)::{lambda(float, float)#1}>, unsigned int, float, 4> >(at::native::ReduceOp::operator()(at::TensorIterator&)::{lambda(float, float)#1}>, unsigned int, float, 4>)", "pid": 0, "tid": "stream 7", + "ts": 1621401187229711, "dur": 4, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 460, "external id": 94, + "registers per thread": 35, + "shared memory": 16, + "blocks per SM": 0.0125, + "warps per SM": 0.00625, + "grid": [1, 1, 1], + "block": [1, 16, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 460, "pid": 0, "tid": "stream 7", "ts": 1621401187229711, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24610", + "ts": 1621401187229681, "dur": 30, + "args": { + "cbid": 211, "correlation": 460, + "external id": 94, "external ts": 1621401187229459 + } + }, + { + "ph": "s", "id": 460, "pid": 24572, "tid": 24610, "ts": 1621401187229681, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::vectorized_elementwise_kernel<4, at::native::AddFunctor, at::detail::Array >(int, at::native::AddFunctor, at::detail::Array)", "pid": 0, "tid": "stream 7", + "ts": 1621401187230162, "dur": 1, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 467, "external id": 98, + "registers per thread": 20, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 467, "pid": 0, "tid": "stream 7", "ts": 1621401187230162, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24610", + "ts": 1621401187230133, "dur": 29, + "args": { + "cbid": 211, "correlation": 467, + "external id": 98, "external ts": 1621401187230059 + } + }, + { + "ph": "s", "id": 467, "pid": 24572, "tid": 24610, "ts": 1621401187230133, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", "pid": 24572, "tid": "24610", + "ts": 1621401187231063, "dur": 4, + "args": { + "cbid": 251, "correlation": 480, + "external id": 107, "external ts": 1621401187230889 + } + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", "pid": 24572, "tid": "24610", + "ts": 1621401187231069, "dur": 1, + "args": { + "cbid": 251, "correlation": 481, + "external id": 107, "external ts": 1621401187230889 + } + }, + { + "ph": "X", "cat": "Kernel", + "name": "volta_sgemm_128x32_nt", "pid": 0, "tid": "stream 7", + "ts": 1621401187231100, "dur": 3, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 482, "external id": 107, + "registers per thread": 55, + "shared memory": 16384, + "blocks per SM": 0.0125, + "warps per SM": 0.1, + "grid": [1, 1, 1], + "block": [256, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 482, "pid": 0, "tid": "stream 7", "ts": 1621401187231100, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24610", + "ts": 1621401187231073, "dur": 27, + "args": { + "cbid": 211, "correlation": 482, + "external id": 107, "external ts": 1621401187230889 + } + }, + { + "ph": "s", "id": 482, "pid": 24572, "tid": 24610, "ts": 1621401187231073, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", "pid": 24572, "tid": "24610", + "ts": 1621401187231658, "dur": 3, + "args": { + "cbid": 251, "correlation": 491, + "external id": 116, "external ts": 1621401187231491 + } + }, + { + "ph": "X", "cat": "Kernel", + "name": "void gemmSN_NN_kernel, cublasGemvTensorStridedBatched >(cublasGemmSmallNParams, cublasGemvTensorStridedBatched, float>)", "pid": 0, "tid": "stream 7", + "ts": 1621401187231692, "dur": 2, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 492, "external id": 116, + "registers per thread": 64, + "shared memory": 12288, + "blocks per SM": 0.05, + "warps per SM": 0.4, + "grid": [1, 4, 1], + "block": [256, 1, 1], + "theoretical occupancy %": 1 + } + }, + { + "ph": "f", "id": 492, "pid": 0, "tid": "stream 7", "ts": 1621401187231692, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24610", + "ts": 1621401187231665, "dur": 27, + "args": { + "cbid": 211, "correlation": 492, + "external id": 116, "external ts": 1621401187231491 + } + }, + { + "ph": "s", "id": 492, "pid": 24572, "tid": 24610, "ts": 1621401187231665, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::vectorized_elementwise_kernel<4, at::native::AddFunctor, at::detail::Array >(int, at::native::AddFunctor, at::detail::Array)", "pid": 0, "tid": "stream 7", + "ts": 1621401187232603, "dur": 1, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 503, "external id": 126, + "registers per thread": 20, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 503, "pid": 0, "tid": "stream 7", "ts": 1621401187232603, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24610", + "ts": 1621401187232583, "dur": 19, + "args": { + "cbid": 211, "correlation": 503, + "external id": 126, "external ts": 1621401187232535 + } + }, + { + "ph": "s", "id": 503, "pid": 24572, "tid": 24610, "ts": 1621401187232583, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "pid": 0, "tid": "stream 7", + "ts": 1621401187232921, "dur": 1, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 513, "external id": 130, + "registers per thread": 16, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 513, "pid": 0, "tid": "stream 7", "ts": 1621401187232921, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24610", + "ts": 1621401187232901, "dur": 19, + "args": { + "cbid": 211, "correlation": 513, + "external id": 130, "external ts": 1621401187232866 + } + }, + { + "ph": "s", "id": 513, "pid": 24572, "tid": 24610, "ts": 1621401187232901, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::vectorized_elementwise_kernel<4, at::native::BUnaryFunctor >, at::detail::Array >(int, at::native::BUnaryFunctor >, at::detail::Array)", "pid": 0, "tid": "stream 7", + "ts": 1621401187233342, "dur": 1, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 526, "external id": 133, + "registers per thread": 16, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 526, "pid": 0, "tid": "stream 7", "ts": 1621401187233342, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24610", + "ts": 1621401187233323, "dur": 18, + "args": { + "cbid": 211, "correlation": 526, + "external id": 133, "external ts": 1621401187233168 + } + }, + { + "ph": "s", "id": 526, "pid": 24572, "tid": 24610, "ts": 1621401187233323, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::unrolled_elementwise_kernel, OffsetCalculator<3, unsigned int>, at::detail::Array<1, unsigned int>, at::native::memory::LoadWithoutCast, OffsetCalculator::StoreWithoutCast>(int, at::native::(anonymous namespace)::where_kernel_impl(at::TensorIterator&, c10::ScalarType)::{lambda()#1}::operator()() const::{lambda()#8}::operator()() const::{lambda(bool, float, float)#1}, at::detail::Array, OffsetCalculator<3, unsigned int>, at::detail::Array<1, unsigned int>, at::native::memory::LoadWithoutCast, OffsetCalculator::StoreWithoutCast)", "pid": 0, "tid": "stream 7", + "ts": 1621401187233770, "dur": 2, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 535, "external id": 144, + "registers per thread": 26, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 535, "pid": 0, "tid": "stream 7", "ts": 1621401187233770, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24610", + "ts": 1621401187233751, "dur": 19, + "args": { + "cbid": 211, "correlation": 535, + "external id": 144, "external ts": 1621401187233620 + } + }, + { + "ph": "s", "id": 535, "pid": 24572, "tid": 24610, "ts": 1621401187233751, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::reduce_kernel<512, 1, at::native::ReduceOp::operator()(at::TensorIterator&)::{lambda(float, float)#1}>, unsigned int, float, 4> >(at::native::ReduceOp::operator()(at::TensorIterator&)::{lambda(float, float)#1}>, unsigned int, float, 4>)", "pid": 0, "tid": "stream 7", + "ts": 1621401187234156, "dur": 3, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 548, "external id": 147, + "registers per thread": 32, + "shared memory": 16, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [4, 16, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 548, "pid": 0, "tid": "stream 7", "ts": 1621401187234156, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24610", + "ts": 1621401187234135, "dur": 19, + "args": { + "cbid": 211, "correlation": 548, + "external id": 147, "external ts": 1621401187233990 + } + }, + { + "ph": "s", "id": 548, "pid": 24572, "tid": 24610, "ts": 1621401187234135, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::vectorized_elementwise_kernel<4, at::native::AddFunctor, at::detail::Array >(int, at::native::AddFunctor, at::detail::Array)", "pid": 0, "tid": "stream 7", + "ts": 1621401187234445, "dur": 1, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 555, "external id": 151, + "registers per thread": 20, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 555, "pid": 0, "tid": "stream 7", "ts": 1621401187234445, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24610", + "ts": 1621401187234425, "dur": 19, + "args": { + "cbid": 211, "correlation": 555, + "external id": 151, "external ts": 1621401187234378 + } + }, + { + "ph": "s", "id": 555, "pid": 24572, "tid": 24610, "ts": 1621401187234425, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", "pid": 24572, "tid": "24610", + "ts": 1621401187235000, "dur": 2, + "args": { + "cbid": 251, "correlation": 568, + "external id": 160, "external ts": 1621401187234890 + } + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", "pid": 24572, "tid": "24610", + "ts": 1621401187235004, "dur": 0, + "args": { + "cbid": 251, "correlation": 569, + "external id": 160, "external ts": 1621401187234890 + } + }, + { + "ph": "X", "cat": "Kernel", + "name": "volta_sgemm_128x32_nt", "pid": 0, "tid": "stream 7", + "ts": 1621401187235025, "dur": 3, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 570, "external id": 160, + "registers per thread": 55, + "shared memory": 16384, + "blocks per SM": 0.0125, + "warps per SM": 0.1, + "grid": [1, 1, 1], + "block": [256, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 570, "pid": 0, "tid": "stream 7", "ts": 1621401187235025, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24610", + "ts": 1621401187235006, "dur": 17, + "args": { + "cbid": 211, "correlation": 570, + "external id": 160, "external ts": 1621401187234890 + } + }, + { + "ph": "s", "id": 570, "pid": 24572, "tid": 24610, "ts": 1621401187235006, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::vectorized_elementwise_kernel<4, at::native::AddFunctor, at::detail::Array >(int, at::native::AddFunctor, at::detail::Array)", "pid": 0, "tid": "stream 7", + "ts": 1621401187235555, "dur": 1, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 579, "external id": 170, + "registers per thread": 20, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 579, "pid": 0, "tid": "stream 7", "ts": 1621401187235555, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24610", + "ts": 1621401187235535, "dur": 19, + "args": { + "cbid": 211, "correlation": 579, + "external id": 170, "external ts": 1621401187235487 + } + }, + { + "ph": "s", "id": 579, "pid": 24572, "tid": 24610, "ts": 1621401187235535, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::vectorized_elementwise_kernel<4, at::native::AddFunctor, at::detail::Array >(int, at::native::AddFunctor, at::detail::Array)", "pid": 0, "tid": "stream 7", + "ts": 1621401187236158, "dur": 1, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 585, "external id": 176, + "registers per thread": 20, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 585, "pid": 0, "tid": "stream 7", "ts": 1621401187236158, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24572", + "ts": 1621401187236138, "dur": 18, + "args": { + "cbid": 211, "correlation": 585, + "external id": 176, "external ts": 1621401187236091 + } + }, + { + "ph": "s", "id": 585, "pid": 24572, "tid": 24572, "ts": 1621401187236138, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::vectorized_elementwise_kernel<4, at::native::AddFunctor, at::detail::Array >(int, at::native::AddFunctor, at::detail::Array)", "pid": 0, "tid": "stream 7", + "ts": 1621401187236278, "dur": 1, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 590, "external id": 177, + "registers per thread": 20, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 590, "pid": 0, "tid": "stream 7", "ts": 1621401187236278, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24572", + "ts": 1621401187236261, "dur": 15, + "args": { + "cbid": 211, "correlation": 590, + "external id": 177, "external ts": 1621401187236221 + } + }, + { + "ph": "s", "id": 590, "pid": 24572, "tid": 24572, "ts": 1621401187236261, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::vectorized_elementwise_kernel<4, at::native::AddFunctor, at::detail::Array >(int, at::native::AddFunctor, at::detail::Array)", "pid": 0, "tid": "stream 7", + "ts": 1621401187236390, "dur": 1, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 595, "external id": 178, + "registers per thread": 20, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 595, "pid": 0, "tid": "stream 7", "ts": 1621401187236390, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24572", + "ts": 1621401187236373, "dur": 15, + "args": { + "cbid": 211, "correlation": 595, + "external id": 178, "external ts": 1621401187236334 + } + }, + { + "ph": "s", "id": 595, "pid": 24572, "tid": 24572, "ts": 1621401187236373, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::vectorized_elementwise_kernel<4, at::native::AddFunctor, at::detail::Array >(int, at::native::AddFunctor, at::detail::Array)", "pid": 0, "tid": "stream 7", + "ts": 1621401187236501, "dur": 1, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 600, "external id": 179, + "registers per thread": 20, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 600, "pid": 0, "tid": "stream 7", "ts": 1621401187236501, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24572", + "ts": 1621401187236483, "dur": 15, + "args": { + "cbid": 211, "correlation": 600, + "external id": 179, "external ts": 1621401187236444 + } + }, + { + "ph": "s", "id": 600, "pid": 24572, "tid": 24572, "ts": 1621401187236483, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaDeviceSynchronize", "pid": 24572, "tid": "24572", + "ts": 1621401187236853, "dur": 10, + "args": { + "cbid": 165, "correlation": 605, + "external id": 0, "external ts": 0 + } + }, + { + "name": "process_name", "ph": "M", "ts": 1621401187223005, "pid": 24572, "tid": 0, + "args": { + "name": "python" + } + }, + { + "name": "process_labels", "ph": "M", "ts": 1621401187223005, "pid": 24572, "tid": 0, + "args": { + "labels": "CPU" + } + }, + { + "name": "process_name", "ph": "M", "ts": 1621401187223005, "pid": 0, "tid": 0, + "args": { + "name": "python" + } + }, + { + "name": "process_labels", "ph": "M", "ts": 1621401187223005, "pid": 0, "tid": 0, + "args": { + "labels": "GPU 0" + } + }, + { + "name": "process_name", "ph": "M", "ts": 1621401187223005, "pid": 1, "tid": 0, + "args": { + "name": "python" + } + }, + { + "name": "process_labels", "ph": "M", "ts": 1621401187223005, "pid": 1, "tid": 0, + "args": { + "labels": "GPU 1" + } + }, + { + "name": "process_name", "ph": "M", "ts": 1621401187223005, "pid": 2, "tid": 0, + "args": { + "name": "python" + } + }, + { + "name": "process_labels", "ph": "M", "ts": 1621401187223005, "pid": 2, "tid": 0, + "args": { + "labels": "GPU 2" + } + }, + { + "name": "process_name", "ph": "M", "ts": 1621401187223005, "pid": 3, "tid": 0, + "args": { + "name": "python" + } + }, + { + "name": "process_labels", "ph": "M", "ts": 1621401187223005, "pid": 3, "tid": 0, + "args": { + "labels": "GPU 3" + } + }, + { + "name": "process_name", "ph": "M", "ts": 1621401187223005, "pid": 4, "tid": 0, + "args": { + "name": "python" + } + }, + { + "name": "process_labels", "ph": "M", "ts": 1621401187223005, "pid": 4, "tid": 0, + "args": { + "labels": "GPU 4" + } + }, + { + "name": "process_name", "ph": "M", "ts": 1621401187223005, "pid": 5, "tid": 0, + "args": { + "name": "python" + } + }, + { + "name": "process_labels", "ph": "M", "ts": 1621401187223005, "pid": 5, "tid": 0, + "args": { + "labels": "GPU 5" + } + }, + { + "name": "process_name", "ph": "M", "ts": 1621401187223005, "pid": 6, "tid": 0, + "args": { + "name": "python" + } + }, + { + "name": "process_labels", "ph": "M", "ts": 1621401187223005, "pid": 6, "tid": 0, + "args": { + "labels": "GPU 6" + } + }, + { + "name": "process_name", "ph": "M", "ts": 1621401187223005, "pid": 7, "tid": 0, + "args": { + "name": "python" + } + }, + { + "name": "process_labels", "ph": "M", "ts": 1621401187223005, "pid": 7, "tid": 0, + "args": { + "labels": "GPU 7" + } + }, + { + "name": "thread_name", "ph": "M", "ts": 1621401187223005, "pid": 24572, "tid": "24610", + "args": { + "name": "thread 24610 (python)" + } + }, + { + "name": "thread_name", "ph": "M", "ts": 1621401187223005, "pid": 24572, "tid": "24572", + "args": { + "name": "thread 24572 (python)" + } + }, + { + "ph": "X", "cat": "Trace", "ts": 1621401187223005, "dur": 13896, + "pid": "Traces", "tid": "PyTorch Profiler", + "name": "PyTorch Profiler (0)", + "args": { + "Op count": 0 + } + }, + { + "name": "Iteration Start: PyTorch Profiler", "ph": "i", "s": "g", + "pid": "Traces", "tid": "Trace PyTorch Profiler", "ts": 1621401187223005 + }, + { + "name": "Record Window End", "ph": "i", "s": "g", + "pid": "", "tid": "", "ts": 1621401187237108 + } +, {"ph":"C", "name":"GPU 0 Utilization", "pid":0, "ts":1621401187223005, "args":{"GPU Utilization":1}}, {"ph":"C", "name":"GPU 0 Utilization", "pid":0, "ts":1621401187223005, "args":{"GPU Utilization":0}}, {"ph":"C", "name":"GPU 0 Utilization", "pid":0, "ts":1621401187223005, "args":{"GPU Utilization":0.0}}, {"ph":"C", "name":"GPU 0 Utilization", "pid":0, "ts":1621401187224005, "args":{"GPU Utilization":0.0}}, {"ph":"C", "name":"GPU 0 Utilization", "pid":0, "ts":1621401187225005, "args":{"GPU Utilization":0.6}}, {"ph":"C", "name":"GPU 0 Utilization", "pid":0, "ts":1621401187226005, "args":{"GPU Utilization":0.5}}, {"ph":"C", "name":"GPU 0 Utilization", "pid":0, "ts":1621401187227005, "args":{"GPU Utilization":0.6}}, {"ph":"C", "name":"GPU 0 Utilization", "pid":0, "ts":1621401187228005, "args":{"GPU Utilization":0.2}}, {"ph":"C", "name":"GPU 0 Utilization", "pid":0, "ts":1621401187229005, "args":{"GPU Utilization":0.6}}, {"ph":"C", "name":"GPU 0 Utilization", "pid":0, "ts":1621401187230005, "args":{"GPU Utilization":0.1}}, {"ph":"C", "name":"GPU 0 Utilization", "pid":0, "ts":1621401187231005, "args":{"GPU Utilization":0.5}}, {"ph":"C", "name":"GPU 0 Utilization", "pid":0, "ts":1621401187232005, "args":{"GPU Utilization":0.2}}, {"ph":"C", "name":"GPU 0 Utilization", "pid":0, "ts":1621401187233005, "args":{"GPU Utilization":0.3}}, {"ph":"C", "name":"GPU 0 Utilization", "pid":0, "ts":1621401187234005, "args":{"GPU Utilization":0.4}}, {"ph":"C", "name":"GPU 0 Utilization", "pid":0, "ts":1621401187235005, "args":{"GPU Utilization":0.4219409282700422}}, {"ph":"C", "name":"GPU 0 Utilization", "pid":0, "ts":1621401187236901, "args":{"GPU Utilization":0}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187223005, "args":{"Est. SM Efficiency":1}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187223005, "args":{"Est. SM Efficiency":0}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187225275, "args":{"Est. SM Efficiency":0.25}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187225278, "args":{"Est. SM Efficiency":0}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187225530, "args":{"Est. SM Efficiency":0.125}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187225532, "args":{"Est. SM Efficiency":0}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187225820, "args":{"Est. SM Efficiency":0.125}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187225821, "args":{"Est. SM Efficiency":0}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187226325, "args":{"Est. SM Efficiency":0.25}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187226327, "args":{"Est. SM Efficiency":0}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187226575, "args":{"Est. SM Efficiency":0.125}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187226577, "args":{"Est. SM Efficiency":0}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187226912, "args":{"Est. SM Efficiency":0.125}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187226913, "args":{"Est. SM Efficiency":0}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187227092, "args":{"Est. SM Efficiency":0.125}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187227094, "args":{"Est. SM Efficiency":0}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187227619, "args":{"Est. SM Efficiency":0.125}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187227620, "args":{"Est. SM Efficiency":0}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187227745, "args":{"Est. SM Efficiency":0.125}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187227746, "args":{"Est. SM Efficiency":0}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187227859, "args":{"Est. SM Efficiency":0.125}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187227860, "args":{"Est. SM Efficiency":0}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187227973, "args":{"Est. SM Efficiency":0.125}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187227974, "args":{"Est. SM Efficiency":0}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187228279, "args":{"Est. SM Efficiency":0.125}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187228280, "args":{"Est. SM Efficiency":0}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187228962, "args":{"Est. SM Efficiency":0.125}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187228963, "args":{"Est. SM Efficiency":0}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187229153, "args":{"Est. SM Efficiency":0.125}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187229155, "args":{"Est. SM Efficiency":0}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187229711, "args":{"Est. SM Efficiency":0.125}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187229715, "args":{"Est. SM Efficiency":0}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187230162, "args":{"Est. SM Efficiency":0.125}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187230163, "args":{"Est. SM Efficiency":0}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187231100, "args":{"Est. SM Efficiency":0.125}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187231103, "args":{"Est. SM Efficiency":0}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187231692, "args":{"Est. SM Efficiency":0.5}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187231694, "args":{"Est. SM Efficiency":0}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187232603, "args":{"Est. SM Efficiency":0.125}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187232604, "args":{"Est. SM Efficiency":0}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187232921, "args":{"Est. SM Efficiency":0.125}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187232922, "args":{"Est. SM Efficiency":0}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187233342, "args":{"Est. SM Efficiency":0.125}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187233343, "args":{"Est. SM Efficiency":0}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187233770, "args":{"Est. SM Efficiency":0.125}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187233772, "args":{"Est. SM Efficiency":0}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187234156, "args":{"Est. SM Efficiency":0.125}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187234159, "args":{"Est. SM Efficiency":0}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187234445, "args":{"Est. SM Efficiency":0.125}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187234446, "args":{"Est. SM Efficiency":0}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187235025, "args":{"Est. SM Efficiency":0.125}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187235028, "args":{"Est. SM Efficiency":0}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187235555, "args":{"Est. SM Efficiency":0.125}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187235556, "args":{"Est. SM Efficiency":0}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187236158, "args":{"Est. SM Efficiency":0.125}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187236159, "args":{"Est. SM Efficiency":0}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187236278, "args":{"Est. SM Efficiency":0.125}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187236279, "args":{"Est. SM Efficiency":0}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187236390, "args":{"Est. SM Efficiency":0.125}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187236391, "args":{"Est. SM Efficiency":0}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187236501, "args":{"Est. SM Efficiency":0.125}}, {"ph":"C", "name":"GPU 0 Est. SM Efficiency", "pid":0, "ts":1621401187236502, "args":{"Est. SM Efficiency":0}}]} \ No newline at end of file diff --git a/tb_plugin/test/gpu_metrics_input.json b/tb_plugin/test/gpu_metrics_input.json new file mode 100644 index 000000000..71530b1d6 --- /dev/null +++ b/tb_plugin/test/gpu_metrics_input.json @@ -0,0 +1,3105 @@ + +{ + "schemaVersion": 1, + + "computeProperties": [ + + { + "id": 0, "name": "Tesla V100-DGXS-32GB", "totalGlobalMem": 34084028416, + "major": 7, "minor": 0, + "maxThreadsPerBlock": 1024, "maxThreadsPerMultiProcessor": 2048, + "regsPerBlock": 65536, "regsPerMultiprocessor": 65536, "warpSize": 32, + "sharedMemPerBlock": 49152, "sharedMemPerMultiprocessor": 98304, + "multiProcessorCount": 80, "sharedMemPerBlockOptin": 98304 + }, + + { + "id": 1, "name": "Tesla V100-DGXS-32GB", "totalGlobalMem": 34087305216, + "major": 7, "minor": 0, + "maxThreadsPerBlock": 1024, "maxThreadsPerMultiProcessor": 2048, + "regsPerBlock": 65536, "regsPerMultiprocessor": 65536, "warpSize": 32, + "sharedMemPerBlock": 49152, "sharedMemPerMultiprocessor": 98304, + "multiProcessorCount": 80, "sharedMemPerBlockOptin": 98304 + }, + + { + "id": 2, "name": "Tesla V100-DGXS-32GB", "totalGlobalMem": 34087305216, + "major": 7, "minor": 0, + "maxThreadsPerBlock": 1024, "maxThreadsPerMultiProcessor": 2048, + "regsPerBlock": 65536, "regsPerMultiprocessor": 65536, "warpSize": 32, + "sharedMemPerBlock": 49152, "sharedMemPerMultiprocessor": 98304, + "multiProcessorCount": 80, "sharedMemPerBlockOptin": 98304 + }, + + { + "id": 3, "name": "Tesla V100-DGXS-32GB", "totalGlobalMem": 34087305216, + "major": 7, "minor": 0, + "maxThreadsPerBlock": 1024, "maxThreadsPerMultiProcessor": 2048, + "regsPerBlock": 65536, "regsPerMultiprocessor": 65536, "warpSize": 32, + "sharedMemPerBlock": 49152, "sharedMemPerMultiprocessor": 98304, + "multiProcessorCount": 80, "sharedMemPerBlockOptin": 98304 + } + ], + "traceEvents": [ + + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24572", + "ts": 1621401187223197, "dur": 21, + "args": { + "Device": 24572, "External id": 2, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::zero_", "pid": 24572, "tid": "24572", + "ts": 1621401187223264, "dur": 5, + "args": { + "Device": 24572, "External id": 3, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::zeros", "pid": 24572, "tid": "24572", + "ts": 1621401187223182, "dur": 99, + "args": { + "Device": 24572, "External id": 1, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24572", + "ts": 1621401187223376, "dur": 19, + "args": { + "Device": 24572, "External id": 5, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24572", + "ts": 1621401187223480, "dur": 18, + "args": { + "Device": 24572, "External id": 7, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::zero_", "pid": 24572, "tid": "24572", + "ts": 1621401187223530, "dur": 5, + "args": { + "Device": 24572, "External id": 8, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::zeros", "pid": 24572, "tid": "24572", + "ts": 1621401187223469, "dur": 72, + "args": { + "Device": 24572, "External id": 6, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24572", + "ts": 1621401187223622, "dur": 19, + "args": { + "Device": 24572, "External id": 10, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::as_strided", "pid": 24572, "tid": "24572", + "ts": 1621401187223790, "dur": 12, + "args": { + "Device": 24572, "External id": 13, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::unsqueeze", "pid": 24572, "tid": "24572", + "ts": 1621401187223777, "dur": 50, + "args": { + "Device": 24572, "External id": 12, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::as_strided", "pid": 24572, "tid": "24572", + "ts": 1621401187223850, "dur": 7, + "args": { + "Device": 24572, "External id": 15, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::unsqueeze", "pid": 24572, "tid": "24572", + "ts": 1621401187223841, "dur": 24, + "args": { + "Device": 24572, "External id": 14, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24572", + "ts": 1621401187223904, "dur": 16, + "args": { + "Device": 24572, "External id": 18, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::resize_", "pid": 24572, "tid": "24572", + "ts": 1621401187223945, "dur": 14, + "args": { + "Device": 24572, "External id": 19, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::_cat", "pid": 24572, "tid": "24572", + "ts": 1621401187223888, "dur": 87, + "args": { + "Device": 24572, "External id": 17, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::cat", "pid": 24572, "tid": "24572", + "ts": 1621401187223876, "dur": 106, + "args": { + "Device": 24572, "External id": 16, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::stack", "pid": 24572, "tid": "24572", + "ts": 1621401187223752, "dur": 245, + "args": { + "Device": 24572, "External id": 11, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 22 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::as_strided", "pid": 24572, "tid": "24572", + "ts": 1621401187224094, "dur": 12, + "args": { + "Device": 24572, "External id": 22, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::unsqueeze", "pid": 24572, "tid": "24572", + "ts": 1621401187224074, "dur": 43, + "args": { + "Device": 24572, "External id": 21, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::as_strided", "pid": 24572, "tid": "24572", + "ts": 1621401187224137, "dur": 6, + "args": { + "Device": 24572, "External id": 24, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::unsqueeze", "pid": 24572, "tid": "24572", + "ts": 1621401187224128, "dur": 21, + "args": { + "Device": 24572, "External id": 23, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24572", + "ts": 1621401187224184, "dur": 15, + "args": { + "Device": 24572, "External id": 27, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::resize_", "pid": 24572, "tid": "24572", + "ts": 1621401187224223, "dur": 12, + "args": { + "Device": 24572, "External id": 28, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::_cat", "pid": 24572, "tid": "24572", + "ts": 1621401187224169, "dur": 79, + "args": { + "Device": 24572, "External id": 26, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::cat", "pid": 24572, "tid": "24572", + "ts": 1621401187224159, "dur": 96, + "args": { + "Device": 24572, "External id": 25, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::stack", "pid": 24572, "tid": "24572", + "ts": 1621401187224056, "dur": 213, + "args": { + "Device": 24572, "External id": 20, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 22 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "enumerate(DataLoader)#_SingleProcessDataLoaderIter.__next__", "pid": 24572, "tid": "24572", + "ts": 1621401187223604, "dur": 725, + "args": { + "Device": 24572, "External id": 9, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty_strided", "pid": 24572, "tid": "24572", + "ts": 1621401187224415, "dur": 54, + "args": { + "Device": 24572, "External id": 30, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::copy_", "pid": 24572, "tid": "24572", + "ts": 1621401187224496, "dur": 80, + "args": { + "Device": 24572, "External id": 31, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 22 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::to", "pid": 24572, "tid": "24572", + "ts": 1621401187224398, "dur": 193, + "args": { + "Device": 24572, "External id": 29, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 22 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty_strided", "pid": 24572, "tid": "24572", + "ts": 1621401187224645, "dur": 51, + "args": { + "Device": 24572, "External id": 33, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::copy_", "pid": 24572, "tid": "24572", + "ts": 1621401187224720, "dur": 65, + "args": { + "Device": 24572, "External id": 34, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 22 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::to", "pid": 24572, "tid": "24572", + "ts": 1621401187224631, "dur": 168, + "args": { + "Device": 24572, "External id": 32, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 22 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::as_strided", "pid": 24572, "tid": "24572", + "ts": 1621401187224956, "dur": 14, + "args": { + "Device": 24572, "External id": 38, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::transpose", "pid": 24572, "tid": "24572", + "ts": 1621401187224945, "dur": 37, + "args": { + "Device": 24572, "External id": 37, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::t", "pid": 24572, "tid": "24572", + "ts": 1621401187224917, "dur": 101, + "args": { + "Device": 24572, "External id": 36, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 22 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::view", "pid": 24572, "tid": "24572", + "ts": 1621401187225058, "dur": 33, + "args": { + "Device": 24572, "External id": 40, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 23 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24572", + "ts": 1621401187225181, "dur": 41, + "args": { + "Device": 24572, "External id": 42, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::mm", "pid": 24572, "tid": "24572", + "ts": 1621401187225112, "dur": 197, + "args": { + "Device": 24572, "External id": 41, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 23 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::view", "pid": 24572, "tid": "24572", + "ts": 1621401187225367, "dur": 17, + "args": { + "Device": 24572, "External id": 44, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::_unsafe_view", "pid": 24572, "tid": "24572", + "ts": 1621401187225336, "dur": 79, + "args": { + "Device": 24572, "External id": 43, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 24 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::matmul", "pid": 24572, "tid": "24572", + "ts": 1621401187225037, "dur": 394, + "args": { + "Device": 24572, "External id": 39, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 23 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::add_", "pid": 24572, "tid": "24572", + "ts": 1621401187225449, "dur": 107, + "args": { + "Device": 24572, "External id": 45, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 25 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::linear", "pid": 24572, "tid": "24572", + "ts": 1621401187224907, "dur": 664, + "args": { + "Device": 24572, "External id": 35, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 22 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24572", + "ts": 1621401187225662, "dur": 25, + "args": { + "Device": 24572, "External id": 47, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::resize_", "pid": 24572, "tid": "24572", + "ts": 1621401187225746, "dur": 30, + "args": { + "Device": 24572, "External id": 50, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::clamp_min", "pid": 24572, "tid": "24572", + "ts": 1621401187225721, "dur": 105, + "args": { + "Device": 24572, "External id": 49, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::clamp", "pid": 24572, "tid": "24572", + "ts": 1621401187225709, "dur": 128, + "args": { + "Device": 24572, "External id": 48, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::clamp", "pid": 24572, "tid": "24572", + "ts": 1621401187225606, "dur": 263, + "args": { + "Device": 24572, "External id": 46, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 26 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::as_strided", "pid": 24572, "tid": "24572", + "ts": 1621401187225978, "dur": 14, + "args": { + "Device": 24572, "External id": 54, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::transpose", "pid": 24572, "tid": "24572", + "ts": 1621401187225968, "dur": 36, + "args": { + "Device": 24572, "External id": 53, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::t", "pid": 24572, "tid": "24572", + "ts": 1621401187225941, "dur": 98, + "args": { + "Device": 24572, "External id": 52, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 27 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::view", "pid": 24572, "tid": "24572", + "ts": 1621401187226077, "dur": 60, + "args": { + "Device": 24572, "External id": 56, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 28 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24572", + "ts": 1621401187226233, "dur": 41, + "args": { + "Device": 24572, "External id": 58, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::mm", "pid": 24572, "tid": "24572", + "ts": 1621401187226161, "dur": 197, + "args": { + "Device": 24572, "External id": 57, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 29 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::view", "pid": 24572, "tid": "24572", + "ts": 1621401187226416, "dur": 17, + "args": { + "Device": 24572, "External id": 60, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::_unsafe_view", "pid": 24572, "tid": "24572", + "ts": 1621401187226384, "dur": 79, + "args": { + "Device": 24572, "External id": 59, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 30 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::matmul", "pid": 24572, "tid": "24572", + "ts": 1621401187226057, "dur": 422, + "args": { + "Device": 24572, "External id": 55, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 28 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::add_", "pid": 24572, "tid": "24572", + "ts": 1621401187226497, "dur": 103, + "args": { + "Device": 24572, "External id": 61, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 31 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::linear", "pid": 24572, "tid": "24572", + "ts": 1621401187225932, "dur": 683, + "args": { + "Device": 24572, "External id": 51, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 27 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::broadcast_tensors", "pid": 24572, "tid": "24572", + "ts": 1621401187226708, "dur": 11, + "args": { + "Device": 24572, "External id": 62, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 32 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24572", + "ts": 1621401187226827, "dur": 41, + "args": { + "Device": 24572, "External id": 64, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24572", + "ts": 1621401187226955, "dur": 35, + "args": { + "Device": 24572, "External id": 66, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::as_strided", "pid": 24572, "tid": "24572", + "ts": 1621401187227020, "dur": 11, + "args": { + "Device": 24572, "External id": 67, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::sum", "pid": 24572, "tid": "24572", + "ts": 1621401187226930, "dur": 176, + "args": { + "Device": 24572, "External id": 65, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::mse_loss", "pid": 24572, "tid": "24572", + "ts": 1621401187226753, "dur": 445, + "args": { + "Device": 24572, "External id": 63, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 32 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24572", + "ts": 1621401187227327, "dur": 21, + "args": { + "Device": 24572, "External id": 69, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::zero_", "pid": 24572, "tid": "24572", + "ts": 1621401187227368, "dur": 5, + "args": { + "Device": 24572, "External id": 70, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::zeros", "pid": 24572, "tid": "24572", + "ts": 1621401187227314, "dur": 65, + "args": { + "Device": 24572, "External id": 68, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24572", + "ts": 1621401187227464, "dur": 18, + "args": { + "Device": 24572, "External id": 72, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::fill_", "pid": 24572, "tid": "24572", + "ts": 1621401187227576, "dur": 49, + "args": { + "Device": 24572, "External id": 74, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::zero_", "pid": 24572, "tid": "24572", + "ts": 1621401187227553, "dur": 97, + "args": { + "Device": 24572, "External id": 73, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 33 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::fill_", "pid": 24572, "tid": "24572", + "ts": 1621401187227707, "dur": 43, + "args": { + "Device": 24572, "External id": 76, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::zero_", "pid": 24572, "tid": "24572", + "ts": 1621401187227689, "dur": 79, + "args": { + "Device": 24572, "External id": 75, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 33 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::fill_", "pid": 24572, "tid": "24572", + "ts": 1621401187227823, "dur": 42, + "args": { + "Device": 24572, "External id": 78, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::zero_", "pid": 24572, "tid": "24572", + "ts": 1621401187227805, "dur": 77, + "args": { + "Device": 24572, "External id": 77, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 33 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::fill_", "pid": 24572, "tid": "24572", + "ts": 1621401187227937, "dur": 41, + "args": { + "Device": 24572, "External id": 80, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::zero_", "pid": 24572, "tid": "24572", + "ts": 1621401187227919, "dur": 77, + "args": { + "Device": 24572, "External id": 79, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 0, "Sequence number": 33 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "Optimizer.zero_grad#SGD.zero_grad", "pid": 24572, "tid": "24572", + "ts": 1621401187227446, "dur": 606, + "args": { + "Device": 24572, "External id": 71, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty_strided", "pid": 24572, "tid": "24572", + "ts": 1621401187228150, "dur": 53, + "args": { + "Device": 24572, "External id": 83, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty_like", "pid": 24572, "tid": "24572", + "ts": 1621401187228137, "dur": 81, + "args": { + "Device": 24572, "External id": 82, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::fill_", "pid": 24572, "tid": "24572", + "ts": 1621401187228235, "dur": 50, + "args": { + "Device": 24572, "External id": 84, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::ones_like", "pid": 24572, "tid": "24572", + "ts": 1621401187228128, "dur": 169, + "args": { + "Device": 24572, "External id": 81, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24610", + "ts": 1621401187228708, "dur": 79, + "args": { + "Device": 24572, "External id": 89, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty_like", "pid": 24572, "tid": "24610", + "ts": 1621401187228680, "dur": 146, + "args": { + "Device": 24572, "External id": 88, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::fill_", "pid": 24572, "tid": "24610", + "ts": 1621401187228885, "dur": 93, + "args": { + "Device": 24572, "External id": 91, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::zero_", "pid": 24572, "tid": "24610", + "ts": 1621401187228858, "dur": 147, + "args": { + "Device": 24572, "External id": 90, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::zeros_like", "pid": 24572, "tid": "24610", + "ts": 1621401187228647, "dur": 369, + "args": { + "Device": 24572, "External id": 87, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::mse_loss_backward", "pid": 24572, "tid": "24610", + "ts": 1621401187229048, "dur": 122, + "args": { + "Device": 24572, "External id": 92, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::mse_loss_backward", "pid": 24572, "tid": "24610", + "ts": 1621401187228603, "dur": 614, + "args": { + "Device": 24572, "External id": 86, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "MseLossBackward", "pid": 24572, "tid": "24610", + "ts": 1621401187228516, "dur": 727, + "args": { + "Device": 24572, "External id": 85, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 1, "Sequence number": 32 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "AddBackward1", "pid": 24572, "tid": "24610", + "ts": 1621401187229384, "dur": 17, + "args": { + "Device": 24572, "External id": 93, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 1, "Sequence number": 31 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24610", + "ts": 1621401187229506, "dur": 73, + "args": { + "Device": 24572, "External id": 95, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::sum", "pid": 24572, "tid": "24610", + "ts": 1621401187229459, "dur": 279, + "args": { + "Device": 24572, "External id": 94, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::view", "pid": 24572, "tid": "24610", + "ts": 1621401187229788, "dur": 65, + "args": { + "Device": 24572, "External id": 96, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::add_", "pid": 24572, "tid": "24610", + "ts": 1621401187230059, "dur": 131, + "args": { + "Device": 24572, "External id": 98, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "torch::autograd::AccumulateGrad", "pid": 24572, "tid": "24610", + "ts": 1621401187230028, "dur": 228, + "args": { + "Device": 24572, "External id": 97, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::view", "pid": 24572, "tid": "24610", + "ts": 1621401187230405, "dur": 61, + "args": { + "Device": 24572, "External id": 101, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::reshape", "pid": 24572, "tid": "24610", + "ts": 1621401187230383, "dur": 107, + "args": { + "Device": 24572, "External id": 100, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "UnsafeViewBackward", "pid": 24572, "tid": "24610", + "ts": 1621401187230354, "dur": 146, + "args": { + "Device": 24572, "External id": 99, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 1, "Sequence number": 30 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::as_strided", "pid": 24572, "tid": "24610", + "ts": 1621401187230751, "dur": 22, + "args": { + "Device": 24572, "External id": 105, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::transpose", "pid": 24572, "tid": "24610", + "ts": 1621401187230732, "dur": 65, + "args": { + "Device": 24572, "External id": 104, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::t", "pid": 24572, "tid": "24610", + "ts": 1621401187230710, "dur": 124, + "args": { + "Device": 24572, "External id": 103, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::conj", "pid": 24572, "tid": "24610", + "ts": 1621401187230862, "dur": 7, + "args": { + "Device": 24572, "External id": 106, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24610", + "ts": 1621401187230935, "dur": 73, + "args": { + "Device": 24572, "External id": 108, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::mm", "pid": 24572, "tid": "24610", + "ts": 1621401187230889, "dur": 235, + "args": { + "Device": 24572, "External id": 107, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::as_strided", "pid": 24572, "tid": "24610", + "ts": 1621401187231211, "dur": 23, + "args": { + "Device": 24572, "External id": 111, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::transpose", "pid": 24572, "tid": "24610", + "ts": 1621401187231191, "dur": 69, + "args": { + "Device": 24572, "External id": 110, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::t", "pid": 24572, "tid": "24610", + "ts": 1621401187231168, "dur": 129, + "args": { + "Device": 24572, "External id": 109, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::as_strided", "pid": 24572, "tid": "24610", + "ts": 1621401187231376, "dur": 17, + "args": { + "Device": 24572, "External id": 114, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::transpose", "pid": 24572, "tid": "24610", + "ts": 1621401187231360, "dur": 49, + "args": { + "Device": 24572, "External id": 113, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::t", "pid": 24572, "tid": "24610", + "ts": 1621401187231340, "dur": 100, + "args": { + "Device": 24572, "External id": 112, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::conj", "pid": 24572, "tid": "24610", + "ts": 1621401187231465, "dur": 6, + "args": { + "Device": 24572, "External id": 115, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24610", + "ts": 1621401187231534, "dur": 72, + "args": { + "Device": 24572, "External id": 117, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::mm", "pid": 24572, "tid": "24610", + "ts": 1621401187231491, "dur": 225, + "args": { + "Device": 24572, "External id": 116, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "MmBackward", "pid": 24572, "tid": "24610", + "ts": 1621401187230626, "dur": 1124, + "args": { + "Device": 24572, "External id": 102, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 1, "Sequence number": 29 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::view", "pid": 24572, "tid": "24610", + "ts": 1621401187231992, "dur": 61, + "args": { + "Device": 24572, "External id": 120, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::reshape", "pid": 24572, "tid": "24610", + "ts": 1621401187231970, "dur": 108, + "args": { + "Device": 24572, "External id": 119, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "ViewBackward", "pid": 24572, "tid": "24610", + "ts": 1621401187231941, "dur": 166, + "args": { + "Device": 24572, "External id": 118, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 1, "Sequence number": 28 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::as_strided", "pid": 24572, "tid": "24610", + "ts": 1621401187232305, "dur": 21, + "args": { + "Device": 24572, "External id": 124, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::transpose", "pid": 24572, "tid": "24610", + "ts": 1621401187232286, "dur": 62, + "args": { + "Device": 24572, "External id": 123, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::t", "pid": 24572, "tid": "24610", + "ts": 1621401187232265, "dur": 123, + "args": { + "Device": 24572, "External id": 122, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "TBackward", "pid": 24572, "tid": "24610", + "ts": 1621401187232239, "dur": 161, + "args": { + "Device": 24572, "External id": 121, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 1, "Sequence number": 27 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::add_", "pid": 24572, "tid": "24610", + "ts": 1621401187232535, "dur": 85, + "args": { + "Device": 24572, "External id": 126, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "torch::autograd::AccumulateGrad", "pid": 24572, "tid": "24610", + "ts": 1621401187232515, "dur": 148, + "args": { + "Device": 24572, "External id": 125, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24610", + "ts": 1621401187232790, "dur": 47, + "args": { + "Device": 24572, "External id": 129, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::fill_", "pid": 24572, "tid": "24610", + "ts": 1621401187232866, "dur": 68, + "args": { + "Device": 24572, "External id": 130, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::scalar_tensor", "pid": 24572, "tid": "24610", + "ts": 1621401187232776, "dur": 174, + "args": { + "Device": 24572, "External id": 128, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24610", + "ts": 1621401187233023, "dur": 27, + "args": { + "Device": 24572, "External id": 132, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::_local_scalar_dense", "pid": 24572, "tid": "24610", + "ts": 1621401187233192, "dur": 6, + "args": { + "Device": 24572, "External id": 135, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::item", "pid": 24572, "tid": "24610", + "ts": 1621401187233184, "dur": 24, + "args": { + "Device": 24572, "External id": 134, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::resize_", "pid": 24572, "tid": "24610", + "ts": 1621401187233251, "dur": 41, + "args": { + "Device": 24572, "External id": 136, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::ge", "pid": 24572, "tid": "24610", + "ts": 1621401187233168, "dur": 182, + "args": { + "Device": 24572, "External id": 133, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::ge", "pid": 24572, "tid": "24610", + "ts": 1621401187232971, "dur": 404, + "args": { + "Device": 24572, "External id": 131, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::as_strided", "pid": 24572, "tid": "24610", + "ts": 1621401187233430, "dur": 15, + "args": { + "Device": 24572, "External id": 139, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::expand", "pid": 24572, "tid": "24610", + "ts": 1621401187233414, "dur": 62, + "args": { + "Device": 24572, "External id": 138, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::as_strided", "pid": 24572, "tid": "24610", + "ts": 1621401187233508, "dur": 10, + "args": { + "Device": 24572, "External id": 141, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::expand", "pid": 24572, "tid": "24610", + "ts": 1621401187233494, "dur": 48, + "args": { + "Device": 24572, "External id": 140, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::as_strided", "pid": 24572, "tid": "24610", + "ts": 1621401187233571, "dur": 10, + "args": { + "Device": 24572, "External id": 143, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::expand", "pid": 24572, "tid": "24610", + "ts": 1621401187233558, "dur": 43, + "args": { + "Device": 24572, "External id": 142, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24610", + "ts": 1621401187233649, "dur": 46, + "args": { + "Device": 24572, "External id": 145, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::_s_where", "pid": 24572, "tid": "24610", + "ts": 1621401187233620, "dur": 167, + "args": { + "Device": 24572, "External id": 144, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::where", "pid": 24572, "tid": "24610", + "ts": 1621401187233398, "dur": 409, + "args": { + "Device": 24572, "External id": 137, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "ClampBackward1", "pid": 24572, "tid": "24610", + "ts": 1621401187232724, "dur": 1110, + "args": { + "Device": 24572, "External id": 127, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 1, "Sequence number": 26 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "AddBackward1", "pid": 24572, "tid": "24610", + "ts": 1621401187233941, "dur": 12, + "args": { + "Device": 24572, "External id": 146, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 1, "Sequence number": 25 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24610", + "ts": 1621401187234021, "dur": 46, + "args": { + "Device": 24572, "External id": 148, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::sum", "pid": 24572, "tid": "24610", + "ts": 1621401187233990, "dur": 182, + "args": { + "Device": 24572, "External id": 147, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::view", "pid": 24572, "tid": "24610", + "ts": 1621401187234208, "dur": 43, + "args": { + "Device": 24572, "External id": 149, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::add_", "pid": 24572, "tid": "24610", + "ts": 1621401187234378, "dur": 84, + "args": { + "Device": 24572, "External id": 151, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "torch::autograd::AccumulateGrad", "pid": 24572, "tid": "24610", + "ts": 1621401187234357, "dur": 144, + "args": { + "Device": 24572, "External id": 150, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::view", "pid": 24572, "tid": "24610", + "ts": 1621401187234593, "dur": 39, + "args": { + "Device": 24572, "External id": 154, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::reshape", "pid": 24572, "tid": "24610", + "ts": 1621401187234580, "dur": 67, + "args": { + "Device": 24572, "External id": 153, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "UnsafeViewBackward", "pid": 24572, "tid": "24610", + "ts": 1621401187234561, "dur": 92, + "args": { + "Device": 24572, "External id": 152, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 1, "Sequence number": 24 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::as_strided", "pid": 24572, "tid": "24610", + "ts": 1621401187234803, "dur": 14, + "args": { + "Device": 24572, "External id": 158, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::transpose", "pid": 24572, "tid": "24610", + "ts": 1621401187234792, "dur": 41, + "args": { + "Device": 24572, "External id": 157, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::t", "pid": 24572, "tid": "24610", + "ts": 1621401187234778, "dur": 79, + "args": { + "Device": 24572, "External id": 156, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::conj", "pid": 24572, "tid": "24610", + "ts": 1621401187234874, "dur": 4, + "args": { + "Device": 24572, "External id": 159, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24610", + "ts": 1621401187234918, "dur": 47, + "args": { + "Device": 24572, "External id": 161, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::mm", "pid": 24572, "tid": "24610", + "ts": 1621401187234890, "dur": 149, + "args": { + "Device": 24572, "External id": 160, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::as_strided", "pid": 24572, "tid": "24610", + "ts": 1621401187235092, "dur": 15, + "args": { + "Device": 24572, "External id": 164, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::transpose", "pid": 24572, "tid": "24610", + "ts": 1621401187235080, "dur": 39, + "args": { + "Device": 24572, "External id": 163, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::t", "pid": 24572, "tid": "24610", + "ts": 1621401187235067, "dur": 75, + "args": { + "Device": 24572, "External id": 162, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "MmBackward", "pid": 24572, "tid": "24610", + "ts": 1621401187234734, "dur": 424, + "args": { + "Device": 24572, "External id": 155, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 1, "Sequence number": 23 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::as_strided", "pid": 24572, "tid": "24610", + "ts": 1621401187235312, "dur": 13, + "args": { + "Device": 24572, "External id": 168, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::transpose", "pid": 24572, "tid": "24610", + "ts": 1621401187235301, "dur": 40, + "args": { + "Device": 24572, "External id": 167, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::t", "pid": 24572, "tid": "24610", + "ts": 1621401187235288, "dur": 78, + "args": { + "Device": 24572, "External id": 166, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "TBackward", "pid": 24572, "tid": "24610", + "ts": 1621401187235271, "dur": 103, + "args": { + "Device": 24572, "External id": 165, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 , + "Fwd thread id": 1, "Sequence number": 22 + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::add_", "pid": 24572, "tid": "24610", + "ts": 1621401187235487, "dur": 85, + "args": { + "Device": 24572, "External id": 170, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "torch::autograd::AccumulateGrad", "pid": 24572, "tid": "24610", + "ts": 1621401187235467, "dur": 147, + "args": { + "Device": 24572, "External id": 169, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24572", + "ts": 1621401187235803, "dur": 24, + "args": { + "Device": 24572, "External id": 172, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::zero_", "pid": 24572, "tid": "24572", + "ts": 1621401187235850, "dur": 5, + "args": { + "Device": 24572, "External id": 173, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::zeros", "pid": 24572, "tid": "24572", + "ts": 1621401187235787, "dur": 75, + "args": { + "Device": 24572, "External id": 171, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::empty", "pid": 24572, "tid": "24572", + "ts": 1621401187235954, "dur": 20, + "args": { + "Device": 24572, "External id": 175, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::add_", "pid": 24572, "tid": "24572", + "ts": 1621401187236091, "dur": 82, + "args": { + "Device": 24572, "External id": 176, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::add_", "pid": 24572, "tid": "24572", + "ts": 1621401187236221, "dur": 70, + "args": { + "Device": 24572, "External id": 177, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::add_", "pid": 24572, "tid": "24572", + "ts": 1621401187236334, "dur": 68, + "args": { + "Device": 24572, "External id": 178, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::add_", "pid": 24572, "tid": "24572", + "ts": 1621401187236444, "dur": 68, + "args": { + "Device": 24572, "External id": 179, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "Optimizer.step#SGD.step", "pid": 24572, "tid": "24572", + "ts": 1621401187235935, "dur": 663, + "args": { + "Device": 24572, "External id": 174, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Operator", + "name": "ProfilerStep#2", "pid": 24572, "tid": "24572", + "ts": 1621401187223358, "dur": 13410, + "args": { + "Device": 24572, "External id": 4, + "Trace name": "PyTorch Profiler", "Trace iteration": 0 + + } + }, + { + "ph": "X", "cat": "Memcpy", + "name": "Memcpy HtoD (Pageable -> Device)", "pid": 0, "tid": "stream 7", + "ts": 1621401187224556, "dur": 1, + "args": { + "device": 0, "context": 1, + "stream": 7, "correlation": 311, "external id": 31, + "bytes": 640, "memory bandwidth (GB/s)": 0.46511627906976744 + } + }, + { + "ph": "f", "id": 311, "pid": 0, "tid": "stream 7", "ts": 1621401187224556, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaMemcpyAsync", "pid": 24572, "tid": "24572", + "ts": 1621401187224533, "dur": 20, + "args": { + "cbid": 41, "correlation": 311, + "external id": 31, "external ts": 1621401187224496 + } + }, + { + "ph": "s", "id": 311, "pid": 24572, "tid": 24572, "ts": 1621401187224533, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaStreamSynchronize", "pid": 24572, "tid": "24572", + "ts": 1621401187224554, "dur": 8, + "args": { + "cbid": 131, "correlation": 312, + "external id": 31, "external ts": 1621401187224496 + } + }, + { + "ph": "X", "cat": "Memcpy", + "name": "Memcpy HtoD (Pageable -> Device)", "pid": 0, "tid": "stream 7", + "ts": 1621401187224767, "dur": 1, + "args": { + "device": 0, "context": 1, + "stream": 7, "correlation": 323, "external id": 34, + "bytes": 128, "memory bandwidth (GB/s)": 0.09523809523809523 + } + }, + { + "ph": "f", "id": 323, "pid": 0, "tid": "stream 7", "ts": 1621401187224767, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaMemcpyAsync", "pid": 24572, "tid": "24572", + "ts": 1621401187224752, "dur": 12, + "args": { + "cbid": 41, "correlation": 323, + "external id": 34, "external ts": 1621401187224720 + } + }, + { + "ph": "s", "id": 323, "pid": 24572, "tid": 24572, "ts": 1621401187224752, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaStreamSynchronize", "pid": 24572, "tid": "24572", + "ts": 1621401187224765, "dur": 7, + "args": { + "cbid": 131, "correlation": 324, + "external id": 34, "external ts": 1621401187224720 + } + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", "pid": 24572, "tid": "24572", + "ts": 1621401187225253, "dur": 2, + "args": { + "cbid": 251, "correlation": 332, + "external id": 41, "external ts": 1621401187225112 + } + }, + { + "ph": "X", "cat": "Kernel", + "name": "void gemmSN_TN_kernel_64addr, cublasGemvTensorStridedBatched >(cublasGemmSmallNParams, cublasGemvTensorStridedBatched, float>)", "pid": 0, "tid": "stream 7", + "ts": 1621401187225275, "dur": 3, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 333, "external id": 41, + "registers per thread": 72, + "shared memory": 13824, + "blocks per SM": 0.025, + "warps per SM": 0.1, + "grid": [1, 2, 1], + "block": [128, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 333, "pid": 0, "tid": "stream 7", "ts": 1621401187225275, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24572", + "ts": 1621401187225258, "dur": 16, + "args": { + "cbid": 211, "correlation": 333, + "external id": 41, "external ts": 1621401187225112 + } + }, + { + "ph": "s", "id": 333, "pid": 24572, "tid": 24572, "ts": 1621401187225258, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::unrolled_elementwise_kernel, at::detail::Array, OffsetCalculator<2, unsigned int>, OffsetCalculator<1, unsigned int>, at::native::memory::LoadWithoutCast, at::native::memory::StoreWithoutCast>(int, at::native::AddFunctor, at::detail::Array, OffsetCalculator<2, unsigned int>, OffsetCalculator<1, unsigned int>, at::native::memory::LoadWithoutCast, at::native::memory::StoreWithoutCast)", "pid": 0, "tid": "stream 7", + "ts": 1621401187225530, "dur": 2, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 338, "external id": 45, + "registers per thread": 22, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 338, "pid": 0, "tid": "stream 7", "ts": 1621401187225530, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24572", + "ts": 1621401187225512, "dur": 16, + "args": { + "cbid": 211, "correlation": 338, + "external id": 45, "external ts": 1621401187225449 + } + }, + { + "ph": "s", "id": 338, "pid": 24572, "tid": 24572, "ts": 1621401187225512, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::vectorized_elementwise_kernel<4, at::native::(anonymous namespace)::clamp_min_scalar_kernel_impl(at::TensorIterator&, c10::Scalar)::{lambda()#1}::operator()() const::{lambda()#8}::operator()() const::{lambda(float)#1}, at::detail::Array >(int, at::native::(anonymous namespace)::clamp_min_scalar_kernel_impl(at::TensorIterator&, c10::Scalar)::{lambda()#1}::operator()() const::{lambda()#8}::operator()() const::{lambda(float)#1}, at::detail::Array)", "pid": 0, "tid": "stream 7", + "ts": 1621401187225820, "dur": 1, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 352, "external id": 49, + "registers per thread": 18, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 352, "pid": 0, "tid": "stream 7", "ts": 1621401187225820, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24572", + "ts": 1621401187225803, "dur": 15, + "args": { + "cbid": 211, "correlation": 352, + "external id": 49, "external ts": 1621401187225721 + } + }, + { + "ph": "s", "id": 352, "pid": 24572, "tid": 24572, "ts": 1621401187225803, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", "pid": 24572, "tid": "24572", + "ts": 1621401187226305, "dur": 2, + "args": { + "cbid": 251, "correlation": 363, + "external id": 57, "external ts": 1621401187226161 + } + }, + { + "ph": "X", "cat": "Kernel", + "name": "void gemmSN_TN_kernel_64addr, cublasGemvTensorStridedBatched >(cublasGemmSmallNParams, cublasGemvTensorStridedBatched, float>)", "pid": 0, "tid": "stream 7", + "ts": 1621401187226325, "dur": 2, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 364, "external id": 57, + "registers per thread": 72, + "shared memory": 13824, + "blocks per SM": 0.025, + "warps per SM": 0.1, + "grid": [1, 2, 1], + "block": [128, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 364, "pid": 0, "tid": "stream 7", "ts": 1621401187226325, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24572", + "ts": 1621401187226309, "dur": 15, + "args": { + "cbid": 211, "correlation": 364, + "external id": 57, "external ts": 1621401187226161 + } + }, + { + "ph": "s", "id": 364, "pid": 24572, "tid": 24572, "ts": 1621401187226309, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::unrolled_elementwise_kernel, at::detail::Array, OffsetCalculator<2, unsigned int>, OffsetCalculator<1, unsigned int>, at::native::memory::LoadWithoutCast, at::native::memory::StoreWithoutCast>(int, at::native::AddFunctor, at::detail::Array, OffsetCalculator<2, unsigned int>, OffsetCalculator<1, unsigned int>, at::native::memory::LoadWithoutCast, at::native::memory::StoreWithoutCast)", "pid": 0, "tid": "stream 7", + "ts": 1621401187226575, "dur": 2, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 369, "external id": 61, + "registers per thread": 22, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 369, "pid": 0, "tid": "stream 7", "ts": 1621401187226575, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24572", + "ts": 1621401187226558, "dur": 15, + "args": { + "cbid": 211, "correlation": 369, + "external id": 61, "external ts": 1621401187226497 + } + }, + { + "ph": "s", "id": 369, "pid": 24572, "tid": 24572, "ts": 1621401187226558, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::vectorized_elementwise_kernel<4, at::native::mse_kernel_cuda(at::TensorIterator&)::{lambda()#1}::operator()() const::{lambda()#4}::operator()() const::{lambda(float, float)#1}, at::detail::Array >(int, at::native::mse_kernel_cuda(at::TensorIterator&)::{lambda()#1}::operator()() const::{lambda()#4}::operator()() const::{lambda(float, float)#1}, at::detail::Array)", "pid": 0, "tid": "stream 7", + "ts": 1621401187226912, "dur": 1, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 377, "external id": 63, + "registers per thread": 20, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 377, "pid": 0, "tid": "stream 7", "ts": 1621401187226912, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24572", + "ts": 1621401187226895, "dur": 16, + "args": { + "cbid": 211, "correlation": 377, + "external id": 63, "external ts": 1621401187226753 + } + }, + { + "ph": "s", "id": 377, "pid": 24572, "tid": 24572, "ts": 1621401187226895, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::reduce_kernel<512, 1, at::native::ReduceOp::operator()(at::TensorIterator&)::{lambda(float, float)#1}>, unsigned int, float, 4> >(at::native::ReduceOp::operator()(at::TensorIterator&)::{lambda(float, float)#1}>, unsigned int, float, 4>)", "pid": 0, "tid": "stream 7", + "ts": 1621401187227092, "dur": 2, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 388, "external id": 65, + "registers per thread": 32, + "shared memory": 16, + "blocks per SM": 0.0125, + "warps per SM": 0.0125, + "grid": [1, 1, 1], + "block": [32, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 388, "pid": 0, "tid": "stream 7", "ts": 1621401187227092, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24572", + "ts": 1621401187227075, "dur": 15, + "args": { + "cbid": 211, "correlation": 388, + "external id": 65, "external ts": 1621401187226930 + } + }, + { + "ph": "s", "id": 388, "pid": 24572, "tid": 24572, "ts": 1621401187227075, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "pid": 0, "tid": "stream 7", + "ts": 1621401187227619, "dur": 1, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 395, "external id": 74, + "registers per thread": 16, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 395, "pid": 0, "tid": "stream 7", "ts": 1621401187227619, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24572", + "ts": 1621401187227601, "dur": 16, + "args": { + "cbid": 211, "correlation": 395, + "external id": 74, "external ts": 1621401187227576 + } + }, + { + "ph": "s", "id": 395, "pid": 24572, "tid": 24572, "ts": 1621401187227601, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "pid": 0, "tid": "stream 7", + "ts": 1621401187227745, "dur": 1, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 402, "external id": 76, + "registers per thread": 16, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 402, "pid": 0, "tid": "stream 7", "ts": 1621401187227745, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24572", + "ts": 1621401187227729, "dur": 14, + "args": { + "cbid": 211, "correlation": 402, + "external id": 76, "external ts": 1621401187227707 + } + }, + { + "ph": "s", "id": 402, "pid": 24572, "tid": 24572, "ts": 1621401187227729, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "pid": 0, "tid": "stream 7", + "ts": 1621401187227859, "dur": 1, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 409, "external id": 78, + "registers per thread": 16, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 409, "pid": 0, "tid": "stream 7", "ts": 1621401187227859, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24572", + "ts": 1621401187227844, "dur": 13, + "args": { + "cbid": 211, "correlation": 409, + "external id": 78, "external ts": 1621401187227823 + } + }, + { + "ph": "s", "id": 409, "pid": 24572, "tid": 24572, "ts": 1621401187227844, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "pid": 0, "tid": "stream 7", + "ts": 1621401187227973, "dur": 1, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 416, "external id": 80, + "registers per thread": 16, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 416, "pid": 0, "tid": "stream 7", "ts": 1621401187227973, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24572", + "ts": 1621401187227958, "dur": 13, + "args": { + "cbid": 211, "correlation": 416, + "external id": 80, "external ts": 1621401187227937 + } + }, + { + "ph": "s", "id": 416, "pid": 24572, "tid": 24572, "ts": 1621401187227958, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "pid": 0, "tid": "stream 7", + "ts": 1621401187228279, "dur": 1, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 429, "external id": 84, + "registers per thread": 16, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 429, "pid": 0, "tid": "stream 7", "ts": 1621401187228279, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24572", + "ts": 1621401187228262, "dur": 15, + "args": { + "cbid": 211, "correlation": 429, + "external id": 84, "external ts": 1621401187228235 + } + }, + { + "ph": "s", "id": 429, "pid": 24572, "tid": 24572, "ts": 1621401187228262, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "pid": 0, "tid": "stream 7", + "ts": 1621401187228962, "dur": 1, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 440, "external id": 91, + "registers per thread": 16, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 440, "pid": 0, "tid": "stream 7", "ts": 1621401187228962, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24610", + "ts": 1621401187228932, "dur": 30, + "args": { + "cbid": 211, "correlation": 440, + "external id": 91, "external ts": 1621401187228885 + } + }, + { + "ph": "s", "id": 440, "pid": 24572, "tid": 24610, "ts": 1621401187228932, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::unrolled_elementwise_kernel, OffsetCalculator<3, unsigned int>, at::detail::Array<1, unsigned int>, at::native::memory::LoadWithoutCast, OffsetCalculator::StoreWithoutCast>(int, at::native::mse_backward_cuda_kernel(at::TensorIterator&, c10::Scalar const&)::{lambda()#1}::operator()() const::{lambda()#4}::operator()() const::{lambda(float, float, float)#1}, at::detail::Array, OffsetCalculator<3, unsigned int>, at::detail::Array<1, unsigned int>, at::native::memory::LoadWithoutCast, OffsetCalculator::StoreWithoutCast)", "pid": 0, "tid": "stream 7", + "ts": 1621401187229153, "dur": 2, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 446, "external id": 92, + "registers per thread": 28, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 446, "pid": 0, "tid": "stream 7", "ts": 1621401187229153, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24610", + "ts": 1621401187229127, "dur": 26, + "args": { + "cbid": 211, "correlation": 446, + "external id": 92, "external ts": 1621401187229048 + } + }, + { + "ph": "s", "id": 446, "pid": 24572, "tid": 24610, "ts": 1621401187229127, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::reduce_kernel<256, 2, at::native::ReduceOp::operator()(at::TensorIterator&)::{lambda(float, float)#1}>, unsigned int, float, 4> >(at::native::ReduceOp::operator()(at::TensorIterator&)::{lambda(float, float)#1}>, unsigned int, float, 4>)", "pid": 0, "tid": "stream 7", + "ts": 1621401187229711, "dur": 4, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 460, "external id": 94, + "registers per thread": 35, + "shared memory": 16, + "blocks per SM": 0.0125, + "warps per SM": 0.00625, + "grid": [1, 1, 1], + "block": [1, 16, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 460, "pid": 0, "tid": "stream 7", "ts": 1621401187229711, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24610", + "ts": 1621401187229681, "dur": 30, + "args": { + "cbid": 211, "correlation": 460, + "external id": 94, "external ts": 1621401187229459 + } + }, + { + "ph": "s", "id": 460, "pid": 24572, "tid": 24610, "ts": 1621401187229681, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::vectorized_elementwise_kernel<4, at::native::AddFunctor, at::detail::Array >(int, at::native::AddFunctor, at::detail::Array)", "pid": 0, "tid": "stream 7", + "ts": 1621401187230162, "dur": 1, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 467, "external id": 98, + "registers per thread": 20, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 467, "pid": 0, "tid": "stream 7", "ts": 1621401187230162, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24610", + "ts": 1621401187230133, "dur": 29, + "args": { + "cbid": 211, "correlation": 467, + "external id": 98, "external ts": 1621401187230059 + } + }, + { + "ph": "s", "id": 467, "pid": 24572, "tid": 24610, "ts": 1621401187230133, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", "pid": 24572, "tid": "24610", + "ts": 1621401187231063, "dur": 4, + "args": { + "cbid": 251, "correlation": 480, + "external id": 107, "external ts": 1621401187230889 + } + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", "pid": 24572, "tid": "24610", + "ts": 1621401187231069, "dur": 1, + "args": { + "cbid": 251, "correlation": 481, + "external id": 107, "external ts": 1621401187230889 + } + }, + { + "ph": "X", "cat": "Kernel", + "name": "volta_sgemm_128x32_nt", "pid": 0, "tid": "stream 7", + "ts": 1621401187231100, "dur": 3, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 482, "external id": 107, + "registers per thread": 55, + "shared memory": 16384, + "blocks per SM": 0.0125, + "warps per SM": 0.1, + "grid": [1, 1, 1], + "block": [256, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 482, "pid": 0, "tid": "stream 7", "ts": 1621401187231100, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24610", + "ts": 1621401187231073, "dur": 27, + "args": { + "cbid": 211, "correlation": 482, + "external id": 107, "external ts": 1621401187230889 + } + }, + { + "ph": "s", "id": 482, "pid": 24572, "tid": 24610, "ts": 1621401187231073, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", "pid": 24572, "tid": "24610", + "ts": 1621401187231658, "dur": 3, + "args": { + "cbid": 251, "correlation": 491, + "external id": 116, "external ts": 1621401187231491 + } + }, + { + "ph": "X", "cat": "Kernel", + "name": "void gemmSN_NN_kernel, cublasGemvTensorStridedBatched >(cublasGemmSmallNParams, cublasGemvTensorStridedBatched, float>)", "pid": 0, "tid": "stream 7", + "ts": 1621401187231692, "dur": 2, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 492, "external id": 116, + "registers per thread": 64, + "shared memory": 12288, + "blocks per SM": 0.05, + "warps per SM": 0.4, + "grid": [1, 4, 1], + "block": [256, 1, 1], + "theoretical occupancy %": 1 + } + }, + { + "ph": "f", "id": 492, "pid": 0, "tid": "stream 7", "ts": 1621401187231692, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24610", + "ts": 1621401187231665, "dur": 27, + "args": { + "cbid": 211, "correlation": 492, + "external id": 116, "external ts": 1621401187231491 + } + }, + { + "ph": "s", "id": 492, "pid": 24572, "tid": 24610, "ts": 1621401187231665, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::vectorized_elementwise_kernel<4, at::native::AddFunctor, at::detail::Array >(int, at::native::AddFunctor, at::detail::Array)", "pid": 0, "tid": "stream 7", + "ts": 1621401187232603, "dur": 1, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 503, "external id": 126, + "registers per thread": 20, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 503, "pid": 0, "tid": "stream 7", "ts": 1621401187232603, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24610", + "ts": 1621401187232583, "dur": 19, + "args": { + "cbid": 211, "correlation": 503, + "external id": 126, "external ts": 1621401187232535 + } + }, + { + "ph": "s", "id": 503, "pid": 24572, "tid": 24610, "ts": 1621401187232583, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "pid": 0, "tid": "stream 7", + "ts": 1621401187232921, "dur": 1, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 513, "external id": 130, + "registers per thread": 16, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 513, "pid": 0, "tid": "stream 7", "ts": 1621401187232921, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24610", + "ts": 1621401187232901, "dur": 19, + "args": { + "cbid": 211, "correlation": 513, + "external id": 130, "external ts": 1621401187232866 + } + }, + { + "ph": "s", "id": 513, "pid": 24572, "tid": 24610, "ts": 1621401187232901, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::vectorized_elementwise_kernel<4, at::native::BUnaryFunctor >, at::detail::Array >(int, at::native::BUnaryFunctor >, at::detail::Array)", "pid": 0, "tid": "stream 7", + "ts": 1621401187233342, "dur": 1, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 526, "external id": 133, + "registers per thread": 16, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 526, "pid": 0, "tid": "stream 7", "ts": 1621401187233342, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24610", + "ts": 1621401187233323, "dur": 18, + "args": { + "cbid": 211, "correlation": 526, + "external id": 133, "external ts": 1621401187233168 + } + }, + { + "ph": "s", "id": 526, "pid": 24572, "tid": 24610, "ts": 1621401187233323, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::unrolled_elementwise_kernel, OffsetCalculator<3, unsigned int>, at::detail::Array<1, unsigned int>, at::native::memory::LoadWithoutCast, OffsetCalculator::StoreWithoutCast>(int, at::native::(anonymous namespace)::where_kernel_impl(at::TensorIterator&, c10::ScalarType)::{lambda()#1}::operator()() const::{lambda()#8}::operator()() const::{lambda(bool, float, float)#1}, at::detail::Array, OffsetCalculator<3, unsigned int>, at::detail::Array<1, unsigned int>, at::native::memory::LoadWithoutCast, OffsetCalculator::StoreWithoutCast)", "pid": 0, "tid": "stream 7", + "ts": 1621401187233770, "dur": 2, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 535, "external id": 144, + "registers per thread": 26, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 535, "pid": 0, "tid": "stream 7", "ts": 1621401187233770, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24610", + "ts": 1621401187233751, "dur": 19, + "args": { + "cbid": 211, "correlation": 535, + "external id": 144, "external ts": 1621401187233620 + } + }, + { + "ph": "s", "id": 535, "pid": 24572, "tid": 24610, "ts": 1621401187233751, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::reduce_kernel<512, 1, at::native::ReduceOp::operator()(at::TensorIterator&)::{lambda(float, float)#1}>, unsigned int, float, 4> >(at::native::ReduceOp::operator()(at::TensorIterator&)::{lambda(float, float)#1}>, unsigned int, float, 4>)", "pid": 0, "tid": "stream 7", + "ts": 1621401187234156, "dur": 3, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 548, "external id": 147, + "registers per thread": 32, + "shared memory": 16, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [4, 16, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 548, "pid": 0, "tid": "stream 7", "ts": 1621401187234156, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24610", + "ts": 1621401187234135, "dur": 19, + "args": { + "cbid": 211, "correlation": 548, + "external id": 147, "external ts": 1621401187233990 + } + }, + { + "ph": "s", "id": 548, "pid": 24572, "tid": 24610, "ts": 1621401187234135, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::vectorized_elementwise_kernel<4, at::native::AddFunctor, at::detail::Array >(int, at::native::AddFunctor, at::detail::Array)", "pid": 0, "tid": "stream 7", + "ts": 1621401187234445, "dur": 1, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 555, "external id": 151, + "registers per thread": 20, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 555, "pid": 0, "tid": "stream 7", "ts": 1621401187234445, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24610", + "ts": 1621401187234425, "dur": 19, + "args": { + "cbid": 211, "correlation": 555, + "external id": 151, "external ts": 1621401187234378 + } + }, + { + "ph": "s", "id": 555, "pid": 24572, "tid": 24610, "ts": 1621401187234425, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", "pid": 24572, "tid": "24610", + "ts": 1621401187235000, "dur": 2, + "args": { + "cbid": 251, "correlation": 568, + "external id": 160, "external ts": 1621401187234890 + } + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", "pid": 24572, "tid": "24610", + "ts": 1621401187235004, "dur": 0, + "args": { + "cbid": 251, "correlation": 569, + "external id": 160, "external ts": 1621401187234890 + } + }, + { + "ph": "X", "cat": "Kernel", + "name": "volta_sgemm_128x32_nt", "pid": 0, "tid": "stream 7", + "ts": 1621401187235025, "dur": 3, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 570, "external id": 160, + "registers per thread": 55, + "shared memory": 16384, + "blocks per SM": 0.0125, + "warps per SM": 0.1, + "grid": [1, 1, 1], + "block": [256, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 570, "pid": 0, "tid": "stream 7", "ts": 1621401187235025, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24610", + "ts": 1621401187235006, "dur": 17, + "args": { + "cbid": 211, "correlation": 570, + "external id": 160, "external ts": 1621401187234890 + } + }, + { + "ph": "s", "id": 570, "pid": 24572, "tid": 24610, "ts": 1621401187235006, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::vectorized_elementwise_kernel<4, at::native::AddFunctor, at::detail::Array >(int, at::native::AddFunctor, at::detail::Array)", "pid": 0, "tid": "stream 7", + "ts": 1621401187235555, "dur": 1, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 579, "external id": 170, + "registers per thread": 20, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 579, "pid": 0, "tid": "stream 7", "ts": 1621401187235555, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24610", + "ts": 1621401187235535, "dur": 19, + "args": { + "cbid": 211, "correlation": 579, + "external id": 170, "external ts": 1621401187235487 + } + }, + { + "ph": "s", "id": 579, "pid": 24572, "tid": 24610, "ts": 1621401187235535, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::vectorized_elementwise_kernel<4, at::native::AddFunctor, at::detail::Array >(int, at::native::AddFunctor, at::detail::Array)", "pid": 0, "tid": "stream 7", + "ts": 1621401187236158, "dur": 1, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 585, "external id": 176, + "registers per thread": 20, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 585, "pid": 0, "tid": "stream 7", "ts": 1621401187236158, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24572", + "ts": 1621401187236138, "dur": 18, + "args": { + "cbid": 211, "correlation": 585, + "external id": 176, "external ts": 1621401187236091 + } + }, + { + "ph": "s", "id": 585, "pid": 24572, "tid": 24572, "ts": 1621401187236138, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::vectorized_elementwise_kernel<4, at::native::AddFunctor, at::detail::Array >(int, at::native::AddFunctor, at::detail::Array)", "pid": 0, "tid": "stream 7", + "ts": 1621401187236278, "dur": 1, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 590, "external id": 177, + "registers per thread": 20, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 590, "pid": 0, "tid": "stream 7", "ts": 1621401187236278, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24572", + "ts": 1621401187236261, "dur": 15, + "args": { + "cbid": 211, "correlation": 590, + "external id": 177, "external ts": 1621401187236221 + } + }, + { + "ph": "s", "id": 590, "pid": 24572, "tid": 24572, "ts": 1621401187236261, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::vectorized_elementwise_kernel<4, at::native::AddFunctor, at::detail::Array >(int, at::native::AddFunctor, at::detail::Array)", "pid": 0, "tid": "stream 7", + "ts": 1621401187236390, "dur": 1, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 595, "external id": 178, + "registers per thread": 20, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 595, "pid": 0, "tid": "stream 7", "ts": 1621401187236390, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24572", + "ts": 1621401187236373, "dur": 15, + "args": { + "cbid": 211, "correlation": 595, + "external id": 178, "external ts": 1621401187236334 + } + }, + { + "ph": "s", "id": 595, "pid": 24572, "tid": 24572, "ts": 1621401187236373, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Kernel", + "name": "void at::native::vectorized_elementwise_kernel<4, at::native::AddFunctor, at::detail::Array >(int, at::native::AddFunctor, at::detail::Array)", "pid": 0, "tid": "stream 7", + "ts": 1621401187236501, "dur": 1, + "args": { + "queued": 0, "device": 0, "context": 1, + "stream": 7, "correlation": 600, "external id": 179, + "registers per thread": 20, + "shared memory": 0, + "blocks per SM": 0.0125, + "warps per SM": 0.025, + "grid": [1, 1, 1], + "block": [64, 1, 1], + "theoretical occupancy %": 0 + } + }, + { + "ph": "f", "id": 600, "pid": 0, "tid": "stream 7", "ts": 1621401187236501, + "cat": "async", "name": "launch", "bp": "e" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 24572, "tid": "24572", + "ts": 1621401187236483, "dur": 15, + "args": { + "cbid": 211, "correlation": 600, + "external id": 179, "external ts": 1621401187236444 + } + }, + { + "ph": "s", "id": 600, "pid": 24572, "tid": 24572, "ts": 1621401187236483, + "cat": "async", "name": "launch" + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaDeviceSynchronize", "pid": 24572, "tid": "24572", + "ts": 1621401187236853, "dur": 10, + "args": { + "cbid": 165, "correlation": 605, + "external id": 0, "external ts": 0 + } + }, + { + "name": "process_name", "ph": "M", "ts": 1621401187223005, "pid": 24572, "tid": 0, + "args": { + "name": "python" + } + }, + { + "name": "process_labels", "ph": "M", "ts": 1621401187223005, "pid": 24572, "tid": 0, + "args": { + "labels": "CPU" + } + }, + { + "name": "process_name", "ph": "M", "ts": 1621401187223005, "pid": 0, "tid": 0, + "args": { + "name": "python" + } + }, + { + "name": "process_labels", "ph": "M", "ts": 1621401187223005, "pid": 0, "tid": 0, + "args": { + "labels": "GPU 0" + } + }, + { + "name": "process_name", "ph": "M", "ts": 1621401187223005, "pid": 1, "tid": 0, + "args": { + "name": "python" + } + }, + { + "name": "process_labels", "ph": "M", "ts": 1621401187223005, "pid": 1, "tid": 0, + "args": { + "labels": "GPU 1" + } + }, + { + "name": "process_name", "ph": "M", "ts": 1621401187223005, "pid": 2, "tid": 0, + "args": { + "name": "python" + } + }, + { + "name": "process_labels", "ph": "M", "ts": 1621401187223005, "pid": 2, "tid": 0, + "args": { + "labels": "GPU 2" + } + }, + { + "name": "process_name", "ph": "M", "ts": 1621401187223005, "pid": 3, "tid": 0, + "args": { + "name": "python" + } + }, + { + "name": "process_labels", "ph": "M", "ts": 1621401187223005, "pid": 3, "tid": 0, + "args": { + "labels": "GPU 3" + } + }, + { + "name": "process_name", "ph": "M", "ts": 1621401187223005, "pid": 4, "tid": 0, + "args": { + "name": "python" + } + }, + { + "name": "process_labels", "ph": "M", "ts": 1621401187223005, "pid": 4, "tid": 0, + "args": { + "labels": "GPU 4" + } + }, + { + "name": "process_name", "ph": "M", "ts": 1621401187223005, "pid": 5, "tid": 0, + "args": { + "name": "python" + } + }, + { + "name": "process_labels", "ph": "M", "ts": 1621401187223005, "pid": 5, "tid": 0, + "args": { + "labels": "GPU 5" + } + }, + { + "name": "process_name", "ph": "M", "ts": 1621401187223005, "pid": 6, "tid": 0, + "args": { + "name": "python" + } + }, + { + "name": "process_labels", "ph": "M", "ts": 1621401187223005, "pid": 6, "tid": 0, + "args": { + "labels": "GPU 6" + } + }, + { + "name": "process_name", "ph": "M", "ts": 1621401187223005, "pid": 7, "tid": 0, + "args": { + "name": "python" + } + }, + { + "name": "process_labels", "ph": "M", "ts": 1621401187223005, "pid": 7, "tid": 0, + "args": { + "labels": "GPU 7" + } + }, + { + "name": "thread_name", "ph": "M", "ts": 1621401187223005, "pid": 24572, "tid": "24610", + "args": { + "name": "thread 24610 (python)" + } + }, + { + "name": "thread_name", "ph": "M", "ts": 1621401187223005, "pid": 24572, "tid": "24572", + "args": { + "name": "thread 24572 (python)" + } + }, + { + "ph": "X", "cat": "Trace", "ts": 1621401187223005, "dur": 13896, + "pid": "Traces", "tid": "PyTorch Profiler", + "name": "PyTorch Profiler (0)", + "args": { + "Op count": 0 + } + }, + { + "name": "Iteration Start: PyTorch Profiler", "ph": "i", "s": "g", + "pid": "Traces", "tid": "Trace PyTorch Profiler", "ts": 1621401187223005 + }, + { + "name": "Record Window End", "ph": "i", "s": "g", + "pid": "", "tid": "", "ts": 1621401187237108 + } +]} \ No newline at end of file diff --git a/tb_plugin/test/result_check_file.txt b/tb_plugin/test/result_check_file.txt index 96ece45b1..c59f1ad50 100644 --- a/tb_plugin/test/result_check_file.txt +++ b/tb_plugin/test/result_check_file.txt @@ -1,10 +1,10 @@ -{"steps": {"columns": [{"type": "string", "name": "Step"}, {"type": "number", "name": "Kernel"}, {"type": "string", "role": "tooltip", "p": {"html": "true"}}, {"type": "number", "name": "Memcpy"}, {"type": "string", "role": "tooltip", "p": {"html": "true"}}, {"type": "number", "name": "Memset"}, {"type": "string", "role": "tooltip", "p": {"html": "true"}}, {"type": "number", "name": "Runtime"}, {"type": "string", "role": "tooltip", "p": {"html": "true"}}, {"type": "number", "name": "DataLoader"}, {"type": "string", "role": "tooltip", "p": {"html": "true"}}, {"type": "number", "name": "CPU Exec"}, {"type": "string", "role": "tooltip", "p": {"html": "true"}}, {"type": "number", "name": "Other"}, {"type": "string", "role": "tooltip", "p": {"html": "true"}}], "rows": [["5", 100863, "
Step 5
Total: 129983us
Kernel: 100863us
Percentage: 77.6%
", 1948, "
Step 5
Total: 129983us
Memcpy: 1948us
Percentage: 1.5%
", 69, "
Step 5
Total: 129983us
Memset: 69us
Percentage: 0.05%
", 3346, "
Step 5
Total: 129983us
Runtime: 3346us
Percentage: 2.57%
", 10023, "
Step 5
Total: 129983us
DataLoader: 10023us
Percentage: 7.71%
", 12460, "
Step 5
Total: 129983us
CPU Exec: 12460us
Percentage: 9.59%
", 1274, "
Step 5
Total: 129983us
Other: 1274us
Percentage: 0.98%
"], ["6", 100576, "
Step 6
Total: 158499us
Kernel: 100576us
Percentage: 63.46%
", 2436, "
Step 6
Total: 158499us
Memcpy: 2436us
Percentage: 1.54%
", 69, "
Step 6
Total: 158499us
Memset: 69us
Percentage: 0.04%
", 3144, "
Step 6
Total: 158499us
Runtime: 3144us
Percentage: 1.98%
", 37553, "
Step 6
Total: 158499us
DataLoader: 37553us
Percentage: 23.69%
", 13420, "
Step 6
Total: 158499us
CPU Exec: 13420us
Percentage: 8.47%
", 1301, "
Step 6
Total: 158499us
Other: 1301us
Percentage: 0.82%
"], ["7", 100821, "
Step 7
Total: 140334us
Kernel: 100821us
Percentage: 71.84%
", 2111, "
Step 7
Total: 140334us
Memcpy: 2111us
Percentage: 1.5%
", 69, "
Step 7
Total: 140334us
Memset: 69us
Percentage: 0.05%
", 1756, "
Step 7
Total: 140334us
Runtime: 1756us
Percentage: 1.25%
", 28965, "
Step 7
Total: 140334us
DataLoader: 28965us
Percentage: 20.64%
", 5907, "
Step 7
Total: 140334us
CPU Exec: 5907us
Percentage: 4.21%
", 705, "
Step 7
Total: 140334us
Other: 705us
Percentage: 0.5%
"], ["8", 101109, "
Step 8
Total: 163126us
Kernel: 101109us
Percentage: 61.98%
", 2078, "
Step 8
Total: 163126us
Memcpy: 2078us
Percentage: 1.27%
", 69, "
Step 8
Total: 163126us
Memset: 69us
Percentage: 0.04%
", 2040, "
Step 8
Total: 163126us
Runtime: 2040us
Percentage: 1.25%
", 49998, "
Step 8
Total: 163126us
DataLoader: 49998us
Percentage: 30.65%
", 7087, "
Step 8
Total: 163126us
CPU Exec: 7087us
Percentage: 4.34%
", 745, "
Step 8
Total: 163126us
Other: 745us
Percentage: 0.46%
"], ["9", 101108, "
Step 9
Total: 141407us
Kernel: 101108us
Percentage: 71.5%
", 2072, "
Step 9
Total: 141407us
Memcpy: 2072us
Percentage: 1.47%
", 69, "
Step 9
Total: 141407us
Memset: 69us
Percentage: 0.05%
", 2926, "
Step 9
Total: 141407us
Runtime: 2926us
Percentage: 2.07%
", 25338, "
Step 9
Total: 141407us
DataLoader: 25338us
Percentage: 17.92%
", 9084, "
Step 9
Total: 141407us
CPU Exec: 9084us
Percentage: 6.42%
", 810, "
Step 9
Total: 141407us
Other: 810us
Percentage: 0.57%
"], ["10", 100732, "
Step 10
Total: 159068us
Kernel: 100732us
Percentage: 63.33%
", 2089, "
Step 10
Total: 159068us
Memcpy: 2089us
Percentage: 1.31%
", 69, "
Step 10
Total: 159068us
Memset: 69us
Percentage: 0.04%
", 4174, "
Step 10
Total: 159068us
Runtime: 4174us
Percentage: 2.62%
", 35514, "
Step 10
Total: 159068us
DataLoader: 35514us
Percentage: 22.33%
", 14748, "
Step 10
Total: 159068us
CPU Exec: 14748us
Percentage: 9.27%
", 1742, "
Step 10
Total: 159068us
Other: 1742us
Percentage: 1.1%
"]]}, "performance": [{"name": "Average Step Time", "description": "", "value": 148736, "extra": 100, "children": [{"name": "Kernel", "description": "", "value": 100868, "extra": 67.82}, {"name": "Memcpy", "description": "", "value": 2122, "extra": 1.43}, {"name": "Memset", "description": "", "value": 69, "extra": 0.05}, {"name": "Runtime", "description": "", "value": 2898, "extra": 1.95}, {"name": "DataLoader", "description": "", "value": 31232, "extra": 21.0}, {"name": "CPU Exec", "description": "", "value": 10451, "extra": 7.03}, {"name": "Other", "description": "", "value": 1096, "extra": 0.74}]}], "recommendations": "
  • This run has high time cost on input data loading. 21.0% of the step time is in DataLoader. You could try to set num_workers on DataLoader's construction and enable multi-processes on data loading. Reference: Single- and Multi-process Data Loading
", "environments": [{"title": "Number of Worker(s)", "value": "1"}, {"title": "Device Type", "value": "GPU"}]} -{"device_total_time": {"title": "Device Total Time (us)", "columns": [{"type": "string", "name": "name"}, {"type": "number", "name": "value"}], "rows": [["aten::cudnn_convolution_backward", 285514], ["CudnnConvolutionBackward", 285514], ["aten::cudnn_convolution_backward_weight", 149670], ["aten::cudnn_convolution_backward_input", 135844], ["aten::cudnn_convolution", 135735], ["aten::_convolution", 135735], ["aten::convolution", 135735], ["aten::conv2d", 135735], ["aten::cudnn_batch_norm_backward", 56884], ["CudnnBatchNormBackward", 56884], ["aten::cudnn_batch_norm", 33292], ["aten::_batch_norm_impl_index", 33292], ["aten::batch_norm", 33292], ["aten::threshold_backward", 26258], ["ReluBackward1", 26258], ["aten::add_", 23357], ["aten::threshold_", 17759], ["aten::relu_", 17759], ["aten::copy_", 12734], ["aten::to", 12734], ["aten::max_pool2d_with_indices_backward", 5046], ["MaxPool2DWithIndicesBackward", 5046], ["torch::autograd::AccumulateGrad", 2915], ["aten::fill_", 2414], ["aten::zero_", 2408], ["aten::mul_", 2380], ["aten::max_pool2d_with_indices", 1341], ["aten::max_pool2d", 1341], ["aten::zeros_like", 948], ["aten::add", 325], ["aten::mm", 295], ["AddmmBackward", 295], ["aten::mean", 256], ["aten::adaptive_avg_pool2d", 256], ["aten::addmm", 201], ["aten::div", 162], ["MeanBackward1", 162], ["aten::_log_softmax_backward_data", 64], ["LogSoftmaxBackward", 64], ["aten::_log_softmax", 60], ["aten::log_softmax", 60], ["aten::nll_loss_forward", 20], ["aten::nll_loss", 20], ["aten::nll_loss_backward", 18], ["NllLossBackward", 18], ["aten::ones_like", 6]]}, "device_self_time": {"title": "Device Self Time (us)", "columns": [{"type": "string", "name": "name"}, {"type": "number", "name": "value"}], "rows": [["aten::cudnn_convolution_backward_weight", 149670], ["aten::cudnn_convolution_backward_input", 135844], ["aten::cudnn_convolution", 135735], ["aten::cudnn_batch_norm_backward", 56884], ["aten::cudnn_batch_norm", 33292], ["aten::threshold_backward", 26258], ["aten::add_", 23357], ["aten::threshold_", 17759], ["aten::copy_", 12734], ["aten::max_pool2d_with_indices_backward", 4098], ["aten::fill_", 2414], ["aten::mul_", 2380], ["aten::max_pool2d_with_indices", 1341], ["aten::add", 325], ["aten::mm", 295], ["aten::mean", 256], ["aten::addmm", 201], ["aten::div", 162], ["aten::_log_softmax_backward_data", 64], ["aten::_log_softmax", 60], ["aten::nll_loss_forward", 20], ["aten::nll_loss_backward", 18]]}, "host_total_time": {"title": "Host Total Time (us)", "columns": [{"type": "string", "name": "name"}, {"type": "number", "name": "value"}], "rows": [["aten::add_", 96814], ["CudnnConvolutionBackward", 90857], ["aten::cudnn_convolution_backward", 87104], ["aten::conv2d", 61610], ["aten::copy_", 60140], ["aten::convolution", 57644], ["aten::batch_norm", 55154], ["aten::_convolution", 53789], ["aten::_batch_norm_impl_index", 51122], ["aten::cudnn_convolution", 49275], ["aten::cudnn_batch_norm", 47638], ["aten::to", 46057], ["aten::cudnn_convolution_backward_weight", 39006], ["aten::cudnn_convolution_backward_input", 38583], ["aten::mul_", 36843], ["aten::zero_", 36160], ["torch::autograd::AccumulateGrad", 34208], ["aten::empty", 33098], ["aten::stack", 33058], ["CudnnBatchNormBackward", 32186], ["aten::cat", 31169], ["aten::_cat", 30970], ["aten::div", 30671], ["aten::cudnn_batch_norm_backward", 27883], ["aten::contiguous", 24479], ["aten::fill_", 21081], ["aten::relu_", 16620], ["ReluBackward1", 15142], ["aten::add", 14945], ["aten::threshold_backward", 12601], ["aten::threshold_", 9128], ["aten::empty_like", 8255], ["aten::view", 4811], ["aten::resize_", 3415], ["aten::permute", 3161], ["aten::set_", 2994], ["aten::empty_strided", 1725], ["AddmmBackward", 1462], ["aten::unsqueeze", 1293], ["aten::addmm", 1274], ["aten::as_strided", 948], ["aten::mm", 847], ["MaxPool2DWithIndicesBackward", 763], ["aten::max_pool2d", 732], ["NllLossBackward", 719], ["aten::max_pool2d_with_indices_backward", 686], ["aten::t", 664], ["aten::zeros", 651], ["aten::max_pool2d_with_indices", 644], ["MeanBackward1", 606], ["aten::nll_loss_backward", 590], ["aten::adaptive_avg_pool2d", 566], ["aten::log_softmax", 527], ["aten::nll_loss", 500], ["aten::mean", 484], ["LogSoftmaxBackward", 451], ["aten::_log_softmax", 447], ["aten::nll_loss_forward", 425], ["aten::ones_like", 410], ["aten::_log_softmax_backward_data", 357], ["aten::zeros_like", 339], ["aten::transpose", 309], ["AddBackward0", 309], ["aten::reshape", 228], ["aten::flatten", 206], ["aten::expand", 141], ["TBackward", 140], ["ViewBackward", 121], ["aten::narrow", 87], ["aten::detach_", 64], ["aten::resize_as_", 54], ["aten::slice", 52], ["aten::conj", 46], ["detach_", 33]]}, "host_self_time": {"title": "Host Self Time (us)", "columns": [{"type": "string", "name": "name"}, {"type": "number", "name": "value"}], "rows": [["aten::add_", 64646], ["aten::copy_", 45838], ["aten::cudnn_convolution", 34235], ["aten::empty", 33098], ["aten::_cat", 30756], ["aten::cudnn_batch_norm", 26997], ["aten::div", 25915], ["aten::cudnn_convolution_backward_input", 25552], ["aten::mul_", 24290], ["aten::cudnn_convolution_backward_weight", 22973], ["aten::cudnn_batch_norm_backward", 15840], ["aten::zero_", 15259], ["aten::add", 9687], ["aten::cudnn_convolution_backward", 9515], ["aten::fill_", 9102], ["aten::relu_", 7492], ["aten::threshold_backward", 7183], ["torch::autograd::AccumulateGrad", 6798], ["aten::view", 4811], ["aten::to", 4680], ["aten::_convolution", 4514], ["aten::empty_like", 4430], ["CudnnBatchNormBackward", 4303], ["aten::threshold_", 4261], ["aten::batch_norm", 4032], ["aten::conv2d", 3966], ["aten::convolution", 3855], ["CudnnConvolutionBackward", 3753], ["aten::_batch_norm_impl_index", 3484], ["aten::resize_", 3415], ["aten::set_", 2994], ["aten::permute", 2703], ["ReluBackward1", 2541], ["aten::contiguous", 2069], ["aten::empty_strided", 1725], ["aten::as_strided", 948], ["aten::unsqueeze", 925], ["aten::addmm", 699], ["aten::stack", 596], ["aten::zeros", 463], ["aten::mm", 439], ["aten::max_pool2d_with_indices", 367], ["aten::t", 355], ["aten::nll_loss_forward", 310], ["AddBackward0", 309], ["aten::mean", 280], ["aten::nll_loss_backward", 278], ["aten::transpose", 226], ["aten::_log_softmax", 225], ["aten::max_pool2d_with_indices_backward", 211], ["aten::cat", 199], ["AddmmBackward", 195], ["aten::_log_softmax_backward_data", 153], ["NllLossBackward", 129], ["aten::expand", 113], ["MeanBackward1", 103], ["aten::ones_like", 100], ["LogSoftmaxBackward", 94], ["aten::max_pool2d", 88], ["aten::adaptive_avg_pool2d", 82], ["aten::log_softmax", 80], ["MaxPool2DWithIndicesBackward", 77], ["aten::nll_loss", 75], ["aten::reshape", 71], ["aten::flatten", 65], ["aten::zeros_like", 53], ["aten::conj", 46], ["aten::resize_as_", 44], ["aten::slice", 41], ["aten::narrow", 35], ["ViewBackward", 34], ["detach_", 33], ["aten::detach_", 31], ["TBackward", 29]]}} -{"data": {"columns": [{"type": "string", "name": "Name"}, {"type": "number", "name": "Calls"}, {"type": "number", "name": "Device Self Duration (us)"}, {"type": "number", "name": "Device Total Duration (us)"}, {"type": "number", "name": "Host Self Duration (us)"}, {"type": "number", "name": "Host Total Duration (us)"}], "rows": [["aten::cudnn_convolution_backward_weight", 318, 149670, 149670, 22973, 39006], ["aten::cudnn_convolution_backward_input", 312, 135844, 135844, 25552, 38583], ["aten::cudnn_convolution", 318, 135735, 135735, 34235, 49275], ["aten::cudnn_batch_norm_backward", 318, 56884, 56884, 15840, 27883], ["aten::cudnn_batch_norm", 318, 33292, 33292, 26997, 47638], ["aten::threshold_backward", 294, 26258, 26258, 7183, 12601], ["aten::add_", 2994, 23357, 23357, 64646, 96814], ["aten::threshold_", 294, 17759, 17759, 4261, 9128], ["aten::copy_", 588, 12734, 12734, 45838, 60140], ["aten::max_pool2d_with_indices_backward", 6, 4098, 5046, 211, 686], ["aten::fill_", 978, 2414, 2414, 9102, 21081], ["aten::mul_", 966, 2380, 2380, 24290, 36843], ["aten::max_pool2d_with_indices", 6, 1341, 1341, 367, 644], ["aten::add", 318, 325, 325, 9687, 14945], ["aten::mm", 12, 295, 295, 439, 847], ["aten::mean", 6, 256, 256, 280, 484], ["aten::addmm", 6, 201, 201, 699, 1274], ["aten::div", 198, 162, 162, 25915, 30671], ["aten::_log_softmax_backward_data", 6, 64, 64, 153, 357], ["aten::_log_softmax", 6, 60, 60, 225, 447], ["aten::nll_loss_forward", 6, 20, 20, 310, 425], ["aten::nll_loss_backward", 6, 18, 18, 278, 590], ["aten::empty", 5748, 0, 0, 33098, 33098], ["aten::zero_", 996, 0, 2408, 15259, 36160], ["aten::zeros", 24, 0, 0, 463, 651], ["aten::set_", 192, 0, 0, 2994, 2994], ["aten::view", 840, 0, 0, 4811, 4811], ["aten::as_strided", 432, 0, 0, 948, 948], ["aten::permute", 192, 0, 0, 2703, 3161], ["aten::empty_like", 534, 0, 0, 4430, 8255], ["aten::contiguous", 192, 0, 0, 2069, 24479], ["aten::empty_strided", 402, 0, 0, 1725, 1725], ["aten::to", 408, 0, 12734, 4680, 46057], ["aten::unsqueeze", 192, 0, 0, 925, 1293], ["aten::resize_", 1926, 0, 0, 3415, 3415], ["aten::slice", 6, 0, 0, 41, 52], ["aten::narrow", 6, 0, 0, 35, 87], ["aten::_cat", 6, 0, 0, 30756, 30970], ["aten::cat", 6, 0, 0, 199, 31169], ["aten::stack", 6, 0, 0, 596, 33058], ["detach_", 6, 0, 0, 33, 33], ["aten::detach_", 6, 0, 0, 31, 64], ["aten::_convolution", 318, 0, 135735, 4514, 53789], ["aten::convolution", 318, 0, 135735, 3855, 57644], ["aten::conv2d", 318, 0, 135735, 3966, 61610], ["aten::_batch_norm_impl_index", 318, 0, 33292, 3484, 51122], ["aten::batch_norm", 318, 0, 33292, 4032, 55154], ["aten::relu_", 294, 0, 17759, 7492, 16620], ["aten::max_pool2d", 6, 0, 1341, 88, 732], ["aten::adaptive_avg_pool2d", 6, 0, 256, 82, 566], ["aten::reshape", 12, 0, 0, 71, 228], ["aten::flatten", 6, 0, 0, 65, 206], ["aten::transpose", 30, 0, 0, 226, 309], ["aten::t", 30, 0, 0, 355, 664], ["aten::expand", 12, 0, 0, 113, 141], ["aten::log_softmax", 6, 0, 60, 80, 527], ["aten::nll_loss", 6, 0, 20, 75, 500], ["aten::ones_like", 6, 0, 6, 100, 410], ["NllLossBackward", 6, 0, 18, 129, 719], ["LogSoftmaxBackward", 6, 0, 64, 94, 451], ["aten::conj", 12, 0, 0, 46, 46], ["AddmmBackward", 6, 0, 295, 195, 1462], ["torch::autograd::AccumulateGrad", 966, 0, 2915, 6798, 34208], ["TBackward", 6, 0, 0, 29, 140], ["ViewBackward", 6, 0, 0, 34, 121], ["MeanBackward1", 6, 0, 162, 103, 606], ["ReluBackward1", 294, 0, 26258, 2541, 15142], ["AddBackward0", 96, 0, 0, 309, 309], ["CudnnBatchNormBackward", 318, 0, 56884, 4303, 32186], ["aten::cudnn_convolution_backward", 318, 0, 285514, 9515, 87104], ["CudnnConvolutionBackward", 318, 0, 285514, 3753, 90857], ["aten::zeros_like", 6, 0, 948, 53, 339], ["aten::resize_as_", 6, 0, 0, 44, 54], ["MaxPool2DWithIndicesBackward", 6, 0, 5046, 77, 763]]}} -{"data": {"columns": [{"type": "string", "name": "Name"}, {"type": "number", "name": "Calls"}, {"type": "number", "name": "Total Duration (us)"}, {"type": "number", "name": "Mean Duration (us)"}, {"type": "number", "name": "Max Duration (us)"}, {"type": "number", "name": "Min Duration (us)"}], "rows": [["void cudnn::detail::dgrad_engine(int, int, int, float const*, int, float const*, int, float*, kernel_grad_params, unsigned long long, int, unsigned long long, int, float, int, int, int)", 167, 86835, 520, 1084, 330], ["void cudnn::bn_bw_1C11_kernel_new(float, float, float, float, cudnnTensorStruct, float const*, cudnnTensorStruct, float const*, cudnnTensorStruct, float*, float const*, float*, float*, float const*, float const*, float)", 287, 61395, 214, 799, 43], ["void cudnn::cnn::wgrad_alg0_engine(int, int, int, float const*, int, float*, float const*, kernel_grad_params, unsigned long long, int, float, int, int, int, int)", 104, 53577, 515, 815, 393], ["void at::native::vectorized_elementwise_kernel<4, at::native::threshold_kernel_impl(at::TensorIterator&, float, float)::{lambda(float, float)#1}, at::detail::Array >(int, at::native::threshold_kernel_impl(at::TensorIterator&, float, float)::{lambda(float, float)#1}, at::detail::Array)", 609, 47050, 77, 364, 6], ["void at::native::vectorized_elementwise_kernel<4, at::native::AddFunctor, at::detail::Array >(int, at::native::AddFunctor, at::detail::Array)", 3489, 41190, 12, 364, 1], ["void implicit_convolve_sgemm(int, int, int, float const*, int, float*, float const*, kernel_conv_params, unsigned long long, int, float, float, int, float const*, float const*, bool, int, int)", 90, 40341, 448, 753, 381], ["void implicit_convolve_sgemm(int, int, int, float const*, int, float*, float const*, kernel_conv_params, unsigned long long, int, float, float, int, float const*, float const*, bool, int, int)", 60, 28063, 468, 851, 361], ["volta_scudnn_128x128_stridedB_splitK_medium_nn_v1", 72, 27624, 384, 667, 354], ["volta_scudnn_128x64_stridedB_splitK_xregs_large_nn_v1", 34, 27184, 800, 885, 664], ["void cudnn::bn_fw_tr_1C11_kernel_NCHW(cudnnTensorStruct, float const*, cudnnTensorStruct, float*, float const*, float const*, float, float, float*, float*, float*, float*, float, float)", 150, 26234, 175, 426, 50], ["volta_sgemm_128x64_nt", 126, 23737, 188, 206, 155], ["volta_scudnn_128x128_stridedB_splitK_small_nn_v1", 48, 21753, 453, 705, 328], ["volta_scudnn_winograd_128x128_ldg1_ldg4_relu_tile148t_nt_v1", 39, 14259, 366, 370, 361], ["volta_sgemm_128x64_nn", 60, 11407, 190, 207, 156], ["volta_scudnn_128x64_stridedB_interior_nn_v1", 34, 10904, 321, 525, 263], ["void implicit_convolve_sgemm(int, int, int, float const*, int, float*, float const*, kernel_conv_params, unsigned long long, int, float, float, int, float const*, float const*, bool, int, int)", 12, 8816, 735, 784, 660], ["void cudnn::cnn::wgrad_alg0_engine(int, int, int, float const*, int, float*, float const*, kernel_grad_params, unsigned long long, int, float, int, int, int, int)", 14, 8346, 596, 990, 207], ["volta_scudnn_128x64_relu_interior_nn_v1", 24, 7210, 300, 311, 295], ["void cudnn::bn_fw_tr_1C11_singleread(cudnnTensorStruct, float const*, cudnnTensorStruct, float*, float const*, float const*, float, float, float*, float*, float*, float*, float, float, cudnn::reduced_divisor, int, cudnn::reduced_divisor, cudnn::bnFwPersistentState*, int, float, float, float, int, float, float, cudnnStatus_t*, bool)", 168, 7058, 42, 87, 14], ["volta_scudnn_128x128_stridedB_interior_nn_v1", 21, 5717, 272, 275, 269], ["void cudnn::ops::scalePackedTensor_kernel(cudnnTensor4dStruct, float*, float)", 167, 5482, 33, 158, 6], ["void implicit_convolve_sgemm(int, int, int, float const*, int, float*, float const*, kernel_conv_params, unsigned long long, int, float, float, int, float const*, float const*, bool, int, int)", 12, 5341, 445, 449, 442], ["void cudnn::cnn::wgrad_alg0_engine(int, int, int, float const*, int, float*, float const*, kernel_grad_params, unsigned long long, int, float, int, int, int, int)", 7, 5298, 757, 780, 732], ["void cudnn::winograd_nonfused::winogradForwardOutput4x4(cudnn::winograd_nonfused::WinogradOutputParams)", 123, 5250, 43, 68, 19], ["void explicit_convolve_sgemm(int, int, int, float const*, int, float const*, int, float*, kernel_conv_params, unsigned long long, int, unsigned long long, int, float, float, int, float const*, float const*)", 6, 4895, 816, 822, 809], ["void at::native::(anonymous namespace)::max_pool_backward_nchw(int, float const*, long const*, int, int, int, int, int, int, int, int, int, int, int, int, int, int, float*)", 7, 4781, 683, 684, 682], ["void cudnn::winograd_nonfused::winogradForwardData4x4(cudnn::winograd_nonfused::WinogradDataParams)", 123, 4765, 39, 63, 17], ["volta_scudnn_128x128_stridedB_medium_nn_v1", 14, 4312, 308, 325, 299], ["volta_scudnn_128x64_relu_medium_nn_v1", 6, 3829, 638, 641, 637], ["volta_scudnn_128x128_stridedB_small_nn_v1", 7, 3709, 530, 533, 527], ["void explicit_convolve_sgemm(int, int, int, float const*, int, float const*, int, float*, kernel_conv_params, unsigned long long, int, unsigned long long, int, float, float, int, float const*, float const*)", 6, 3533, 589, 659, 573], ["volta_scudnn_128x64_relu_xregs_large_nn_v1", 6, 3532, 589, 672, 569], ["volta_scudnn_128x64_relu_small_nn_v1", 12, 3395, 283, 296, 270], ["void at::native::vectorized_elementwise_kernel<4, at::native::MulScalarFunctor, at::detail::Array >(int, at::native::MulScalarFunctor, at::detail::Array)", 1127, 2779, 2, 24, 1], ["void cudnn::winograd_nonfused::winogradForwardFilter4x4(cudnn::winograd_nonfused::WinogradFilterParams)", 123, 2617, 21, 66, 4], ["void cudnn::winograd_nonfused::winogradWgradData4x4(cudnn::winograd_nonfused::WinogradDataParams)", 63, 2602, 41, 60, 21], ["void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", 979, 2572, 3, 158, 0], ["void cudnn::winograd_nonfused::winogradWgradDelta4x4(cudnn::winograd_nonfused::WinogradDeltaParams)", 63, 2449, 39, 61, 16], ["void cudnn::bn_bw_1C11_singleread(float, float, float, float, cudnnTensorStruct, float const*, cudnnTensorStruct, float const*, cudnnTensorStruct, float*, float const*, float*, float*, float const*, float const*, float, cudnn::reduced_divisor, int, cudnn::reduced_divisor, cudnn::bnBwPersistentState*, int, float, float, float, int, float, cudnnStatus_t*, bool)", 54, 2283, 42, 74, 19], ["void cudnn::cnn::im2col4d_kernel(cudnn::cnn::im2col4d_params, cudnnConvolutionStruct, cudnnTensor4dStruct, float const*, float*)", 12, 1699, 142, 184, 98], ["void at::native::(anonymous namespace)::max_pool_forward_nchw(int, float const*, int, int, int, int, int, int, int, int, int, int, int, int, int, int, float*, long*)", 6, 1341, 224, 224, 223], ["void cudnn::winograd_nonfused::winogradWgradOutput4x4(cudnn::winograd_nonfused::WinogradWgradOutputParams)", 63, 1306, 21, 63, 4], ["void implicit_convolve_sgemm(int, int, int, float const*, int, float*, float const*, kernel_conv_params, unsigned long long, int, float, float, int, float const*, float const*, bool, int, int)", 6, 848, 141, 142, 140], ["volta_scudnn_128x64_stridedB_small_nn_v1", 7, 666, 95, 96, 94], ["cask_cudnn::computeOffsetsKernel(cask_cudnn::ComputeOffsetsParams)", 131, 330, 3, 4, 2], ["void at::native::vectorized_elementwise_kernel<4, at::native::BUnaryFunctor >, at::detail::Array >(int, at::native::BUnaryFunctor >, at::detail::Array)", 318, 325, 1, 2, 1], ["void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, float, 4> >(at::native::ReduceOp, unsigned int, float, 4>)", 6, 256, 43, 43, 42], ["cask_cudnn::computeWgradSplitKOffsetsKernel(cask_cudnn::ComputeSplitKOffsetsParams)", 154, 198, 1, 2, 1], ["cask_cudnn::computeWgradBOffsetsKernel(cask_cudnn::ComputeWgradBOffsetsParams)", 154, 174, 1, 2, 1], ["volta_sgemm_64x32_sliced1x4_nn", 6, 166, 28, 28, 27], ["void at::native::unrolled_elementwise_kernel, at::detail::Array, OffsetCalculator<1, unsigned int>, OffsetCalculator<1, unsigned int>, at::native::memory::LoadWithoutCast, at::native::memory::StoreWithoutCast>(int, at::native::MulScalarFunctor, at::detail::Array, OffsetCalculator<1, unsigned int>, OffsetCalculator<1, unsigned int>, at::native::memory::LoadWithoutCast, at::native::memory::StoreWithoutCast)", 6, 162, 27, 27, 27], ["volta_sgemm_64x32_sliced1x4_tn", 6, 145, 24, 25, 24], ["void cudnn::winograd::generateWinogradTilesKernel<0, float, float>(cudnn::winograd::GenerateWinogradTilesParams)", 39, 135, 3, 5, 3], ["volta_sgemm_128x32_nt", 6, 117, 20, 20, 19], ["cask_cudnn::computeBOffsetsKernel(cask_cudnn::ComputeBOffsetsParams)", 83, 90, 1, 2, 1], ["void (anonymous namespace)::softmax_warp_backward(float*, float const*, float const*, int, int, int)", 6, 64, 11, 11, 10], ["void (anonymous namespace)::softmax_warp_forward(float*, float const*, int, int, int)", 6, 60, 10, 10, 10], ["void at::native::reduce_kernel<128, 4, at::native::ReduceOp::operator()(at::TensorIterator&)::{lambda(float, float)#1}>, unsigned int, float, 4> >(at::native::ReduceOp::operator()(at::TensorIterator&)::{lambda(float, float)#1}>, unsigned int, float, 4>)", 6, 48, 8, 8, 8], ["void splitKreduce_kernel(cublasSplitKParams, float const*, float const*, float*, float const*, float const*)", 12, 35, 3, 4, 2], ["void at::native::unrolled_elementwise_kernel, OffsetCalculator<1, unsigned int>, char*, at::native::memory::LoadWithoutCast, at::detail::Array::StoreWithoutCast>(int, at::native::copy_device_to_device(at::TensorIterator&, bool)::{lambda()#2}::operator()() const::{lambda()#8}::operator()() const::{lambda(float)#1}, at::detail::Array, OffsetCalculator<1, unsigned int>, char*, at::native::memory::LoadWithoutCast, at::detail::Array::StoreWithoutCast)", 6, 33, 6, 6, 5], ["void cunn_ClassNLLCriterion_updateOutput_kernel(float*, float*, float*, long*, float*, int, int, int, int, long)", 6, 20, 3, 4, 3], ["void cunn_ClassNLLCriterion_updateGradInput_kernel(float*, float*, long*, float*, float*, int, int, int, int, long)", 6, 12, 2, 2, 2]]}} -{"total": {"columns": [{"type": "string", "name": "name"}, {"type": "number", "name": "value"}], "rows": [["void cudnn::detail::dgrad_engine(int, int, int, float const*, int, float const*, int, float*, kernel_grad_params, unsigned long long, int, unsigned long long, int, float, int, int, int)", 86835.0], ["void cudnn::bn_bw_1C11_kernel_new(float, float, float, float, cudnnTensorStruct, float const*, cudnnTensorStruct, float const*, cudnnTensorStruct, float*, float const*, float*, float*, float const*, float const*, float)", 61395.0], ["void cudnn::cnn::wgrad_alg0_engine(int, int, int, float const*, int, float*, float const*, kernel_grad_params, unsigned long long, int, float, int, int, int, int)", 53577.0], ["void at::native::vectorized_elementwise_kernel<4, at::native::threshold_kernel_impl(at::TensorIterator&, float, float)::{lambda(float, float)#1}, at::detail::Array >(int, at::native::threshold_kernel_impl(at::TensorIterator&, float, float)::{lambda(float, float)#1}, at::detail::Array)", 47050.0], ["void at::native::vectorized_elementwise_kernel<4, at::native::AddFunctor, at::detail::Array >(int, at::native::AddFunctor, at::detail::Array)", 41190.0], ["void implicit_convolve_sgemm(int, int, int, float const*, int, float*, float const*, kernel_conv_params, unsigned long long, int, float, float, int, float const*, float const*, bool, int, int)", 40341.0], ["void implicit_convolve_sgemm(int, int, int, float const*, int, float*, float const*, kernel_conv_params, unsigned long long, int, float, float, int, float const*, float const*, bool, int, int)", 28063.0], ["volta_scudnn_128x128_stridedB_splitK_medium_nn_v1", 27624.0], ["volta_scudnn_128x64_stridedB_splitK_xregs_large_nn_v1", 27184.0], ["void cudnn::bn_fw_tr_1C11_kernel_NCHW(cudnnTensorStruct, float const*, cudnnTensorStruct, float*, float const*, float const*, float, float, float*, float*, float*, float*, float, float)", 26234.0], ["volta_sgemm_128x64_nt", 23737.0], ["volta_scudnn_128x128_stridedB_splitK_small_nn_v1", 21753.0], ["volta_scudnn_winograd_128x128_ldg1_ldg4_relu_tile148t_nt_v1", 14259.0], ["volta_sgemm_128x64_nn", 11407.0], ["volta_scudnn_128x64_stridedB_interior_nn_v1", 10904.0], ["void implicit_convolve_sgemm(int, int, int, float const*, int, float*, float const*, kernel_conv_params, unsigned long long, int, float, float, int, float const*, float const*, bool, int, int)", 8816.0], ["void cudnn::cnn::wgrad_alg0_engine(int, int, int, float const*, int, float*, float const*, kernel_grad_params, unsigned long long, int, float, int, int, int, int)", 8346.0], ["volta_scudnn_128x64_relu_interior_nn_v1", 7210.0], ["void cudnn::bn_fw_tr_1C11_singleread(cudnnTensorStruct, float const*, cudnnTensorStruct, float*, float const*, float const*, float, float, float*, float*, float*, float*, float, float, cudnn::reduced_divisor, int, cudnn::reduced_divisor, cudnn::bnFwPersistentState*, int, float, float, float, int, float, float, cudnnStatus_t*, bool)", 7058.0], ["volta_scudnn_128x128_stridedB_interior_nn_v1", 5717.0], ["void cudnn::ops::scalePackedTensor_kernel(cudnnTensor4dStruct, float*, float)", 5482.0], ["void implicit_convolve_sgemm(int, int, int, float const*, int, float*, float const*, kernel_conv_params, unsigned long long, int, float, float, int, float const*, float const*, bool, int, int)", 5341.0], ["void cudnn::cnn::wgrad_alg0_engine(int, int, int, float const*, int, float*, float const*, kernel_grad_params, unsigned long long, int, float, int, int, int, int)", 5298.0], ["void cudnn::winograd_nonfused::winogradForwardOutput4x4(cudnn::winograd_nonfused::WinogradOutputParams)", 5250.0], ["void explicit_convolve_sgemm(int, int, int, float const*, int, float const*, int, float*, kernel_conv_params, unsigned long long, int, unsigned long long, int, float, float, int, float const*, float const*)", 4895.0], ["void at::native::(anonymous namespace)::max_pool_backward_nchw(int, float const*, long const*, int, int, int, int, int, int, int, int, int, int, int, int, int, int, float*)", 4781.0], ["void cudnn::winograd_nonfused::winogradForwardData4x4(cudnn::winograd_nonfused::WinogradDataParams)", 4765.0], ["volta_scudnn_128x128_stridedB_medium_nn_v1", 4312.0], ["volta_scudnn_128x64_relu_medium_nn_v1", 3829.0], ["volta_scudnn_128x128_stridedB_small_nn_v1", 3709.0], ["void explicit_convolve_sgemm(int, int, int, float const*, int, float const*, int, float*, kernel_conv_params, unsigned long long, int, unsigned long long, int, float, float, int, float const*, float const*)", 3533.0], ["volta_scudnn_128x64_relu_xregs_large_nn_v1", 3532.0], ["volta_scudnn_128x64_relu_small_nn_v1", 3395.0], ["void at::native::vectorized_elementwise_kernel<4, at::native::MulScalarFunctor, at::detail::Array >(int, at::native::MulScalarFunctor, at::detail::Array)", 2779.0], ["void cudnn::winograd_nonfused::winogradForwardFilter4x4(cudnn::winograd_nonfused::WinogradFilterParams)", 2617.0], ["void cudnn::winograd_nonfused::winogradWgradData4x4(cudnn::winograd_nonfused::WinogradDataParams)", 2602.0], ["void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", 2572.0], ["void cudnn::winograd_nonfused::winogradWgradDelta4x4(cudnn::winograd_nonfused::WinogradDeltaParams)", 2449.0], ["void cudnn::bn_bw_1C11_singleread(float, float, float, float, cudnnTensorStruct, float const*, cudnnTensorStruct, float const*, cudnnTensorStruct, float*, float const*, float*, float*, float const*, float const*, float, cudnn::reduced_divisor, int, cudnn::reduced_divisor, cudnn::bnBwPersistentState*, int, float, float, float, int, float, cudnnStatus_t*, bool)", 2283.0], ["void cudnn::cnn::im2col4d_kernel(cudnn::cnn::im2col4d_params, cudnnConvolutionStruct, cudnnTensor4dStruct, float const*, float*)", 1699.0], ["void at::native::(anonymous namespace)::max_pool_forward_nchw(int, float const*, int, int, int, int, int, int, int, int, int, int, int, int, int, int, float*, long*)", 1341.0], ["void cudnn::winograd_nonfused::winogradWgradOutput4x4(cudnn::winograd_nonfused::WinogradWgradOutputParams)", 1306.0], ["void implicit_convolve_sgemm(int, int, int, float const*, int, float*, float const*, kernel_conv_params, unsigned long long, int, float, float, int, float const*, float const*, bool, int, int)", 848.0], ["volta_scudnn_128x64_stridedB_small_nn_v1", 666.0], ["cask_cudnn::computeOffsetsKernel(cask_cudnn::ComputeOffsetsParams)", 330.0], ["void at::native::vectorized_elementwise_kernel<4, at::native::BUnaryFunctor >, at::detail::Array >(int, at::native::BUnaryFunctor >, at::detail::Array)", 325.0], ["void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, float, 4> >(at::native::ReduceOp, unsigned int, float, 4>)", 256.0], ["cask_cudnn::computeWgradSplitKOffsetsKernel(cask_cudnn::ComputeSplitKOffsetsParams)", 198.0], ["cask_cudnn::computeWgradBOffsetsKernel(cask_cudnn::ComputeWgradBOffsetsParams)", 174.0], ["volta_sgemm_64x32_sliced1x4_nn", 166.0], ["void at::native::unrolled_elementwise_kernel, at::detail::Array, OffsetCalculator<1, unsigned int>, OffsetCalculator<1, unsigned int>, at::native::memory::LoadWithoutCast, at::native::memory::StoreWithoutCast>(int, at::native::MulScalarFunctor, at::detail::Array, OffsetCalculator<1, unsigned int>, OffsetCalculator<1, unsigned int>, at::native::memory::LoadWithoutCast, at::native::memory::StoreWithoutCast)", 162.0], ["volta_sgemm_64x32_sliced1x4_tn", 145.0], ["void cudnn::winograd::generateWinogradTilesKernel<0, float, float>(cudnn::winograd::GenerateWinogradTilesParams)", 135.0], ["volta_sgemm_128x32_nt", 117.0], ["cask_cudnn::computeBOffsetsKernel(cask_cudnn::ComputeBOffsetsParams)", 90.0], ["void (anonymous namespace)::softmax_warp_backward(float*, float const*, float const*, int, int, int)", 64.0], ["void (anonymous namespace)::softmax_warp_forward(float*, float const*, int, int, int)", 60.0], ["void at::native::reduce_kernel<128, 4, at::native::ReduceOp::operator()(at::TensorIterator&)::{lambda(float, float)#1}>, unsigned int, float, 4> >(at::native::ReduceOp::operator()(at::TensorIterator&)::{lambda(float, float)#1}>, unsigned int, float, 4>)", 48.0], ["void splitKreduce_kernel(cublasSplitKParams, float const*, float const*, float*, float const*, float const*)", 35.0], ["void at::native::unrolled_elementwise_kernel, OffsetCalculator<1, unsigned int>, char*, at::native::memory::LoadWithoutCast, at::detail::Array::StoreWithoutCast>(int, at::native::copy_device_to_device(at::TensorIterator&, bool)::{lambda()#2}::operator()() const::{lambda()#8}::operator()() const::{lambda(float)#1}, at::detail::Array, OffsetCalculator<1, unsigned int>, char*, at::native::memory::LoadWithoutCast, at::detail::Array::StoreWithoutCast)", 33.0], ["void cunn_ClassNLLCriterion_updateOutput_kernel(float*, float*, float*, long*, float*, int, int, int, int, long)", 20.0], ["void cunn_ClassNLLCriterion_updateGradInput_kernel(float*, float*, long*, float*, float*, int, int, int, int, long)", 12.0]]}} -{"steps": {"columns": [{"type": "string", "name": "Step"}, {"type": "number", "name": "Kernel"}, {"type": "string", "role": "tooltip", "p": {"html": "true"}}, {"type": "number", "name": "Memcpy"}, {"type": "string", "role": "tooltip", "p": {"html": "true"}}, {"type": "number", "name": "Memset"}, {"type": "string", "role": "tooltip", "p": {"html": "true"}}, {"type": "number", "name": "Runtime"}, {"type": "string", "role": "tooltip", "p": {"html": "true"}}, {"type": "number", "name": "DataLoader"}, {"type": "string", "role": "tooltip", "p": {"html": "true"}}, {"type": "number", "name": "CPU Exec"}, {"type": "string", "role": "tooltip", "p": {"html": "true"}}, {"type": "number", "name": "Other"}, {"type": "string", "role": "tooltip", "p": {"html": "true"}}], "rows": [["5", 101214, "
Step 5
Total: 118439us
Kernel: 101214us
Percentage: 85.46%
", 3344, "
Step 5
Total: 118439us
Memcpy: 3344us
Percentage: 2.82%
", 54, "
Step 5
Total: 118439us
Memset: 54us
Percentage: 0.05%
", 2996, "
Step 5
Total: 118439us
Runtime: 2996us
Percentage: 2.53%
", 3, "
Step 5
Total: 118439us
DataLoader: 3us
Percentage: 0.0%
", 10088, "
Step 5
Total: 118439us
CPU Exec: 10088us
Percentage: 8.52%
", 740, "
Step 5
Total: 118439us
Other: 740us
Percentage: 0.62%
"], ["6", 101100, "
Step 6
Total: 116360us
Kernel: 101100us
Percentage: 86.89%
", 3239, "
Step 6
Total: 116360us
Memcpy: 3239us
Percentage: 2.78%
", 54, "
Step 6
Total: 116360us
Memset: 54us
Percentage: 0.05%
", 2811, "
Step 6
Total: 116360us
Runtime: 2811us
Percentage: 2.42%
", 15, "
Step 6
Total: 116360us
DataLoader: 15us
Percentage: 0.01%
", 8391, "
Step 6
Total: 116360us
CPU Exec: 8391us
Percentage: 7.21%
", 750, "
Step 6
Total: 116360us
Other: 750us
Percentage: 0.64%
"], ["7", 101159, "
Step 7
Total: 114583us
Kernel: 101159us
Percentage: 88.28%
", 3218, "
Step 7
Total: 114583us
Memcpy: 3218us
Percentage: 2.81%
", 54, "
Step 7
Total: 114583us
Memset: 54us
Percentage: 0.05%
", 2584, "
Step 7
Total: 114583us
Runtime: 2584us
Percentage: 2.26%
", 23, "
Step 7
Total: 114583us
DataLoader: 23us
Percentage: 0.02%
", 6908, "
Step 7
Total: 114583us
CPU Exec: 6908us
Percentage: 6.03%
", 637, "
Step 7
Total: 114583us
Other: 637us
Percentage: 0.56%
"], ["8", 101317, "
Step 8
Total: 119884us
Kernel: 101317us
Percentage: 84.51%
", 3251, "
Step 8
Total: 119884us
Memcpy: 3251us
Percentage: 2.71%
", 54, "
Step 8
Total: 119884us
Memset: 54us
Percentage: 0.05%
", 2929, "
Step 8
Total: 119884us
Runtime: 2929us
Percentage: 2.44%
", 13, "
Step 8
Total: 119884us
DataLoader: 13us
Percentage: 0.01%
", 11610, "
Step 8
Total: 119884us
CPU Exec: 11610us
Percentage: 9.68%
", 710, "
Step 8
Total: 119884us
Other: 710us
Percentage: 0.59%
"], ["9", 101022, "
Step 9
Total: 117173us
Kernel: 101022us
Percentage: 86.22%
", 3308, "
Step 9
Total: 117173us
Memcpy: 3308us
Percentage: 2.82%
", 54, "
Step 9
Total: 117173us
Memset: 54us
Percentage: 0.05%
", 3002, "
Step 9
Total: 117173us
Runtime: 3002us
Percentage: 2.56%
", 16, "
Step 9
Total: 117173us
DataLoader: 16us
Percentage: 0.01%
", 9021, "
Step 9
Total: 117173us
CPU Exec: 9021us
Percentage: 7.7%
", 750, "
Step 9
Total: 117173us
Other: 750us
Percentage: 0.64%
"], ["10", 101236, "
Step 10
Total: 139414us
Kernel: 101236us
Percentage: 72.62%
", 3361, "
Step 10
Total: 139414us
Memcpy: 3361us
Percentage: 2.41%
", 54, "
Step 10
Total: 139414us
Memset: 54us
Percentage: 0.04%
", 2192, "
Step 10
Total: 139414us
Runtime: 2192us
Percentage: 1.57%
", 0, "
Step 10
Total: 139414us
DataLoader: 0us
Percentage: 0.0%
", 8527, "
Step 10
Total: 139414us
CPU Exec: 8527us
Percentage: 6.12%
", 24044, "
Step 10
Total: 139414us
Other: 24044us
Percentage: 17.25%
"]]}, "performance": [{"name": "Average Step Time", "description": "", "value": 120976, "extra": 100, "children": [{"name": "Kernel", "description": "", "value": 101175, "extra": 83.63}, {"name": "Memcpy", "description": "", "value": 3287, "extra": 2.72}, {"name": "Memset", "description": "", "value": 54, "extra": 0.04}, {"name": "Runtime", "description": "", "value": 2752, "extra": 2.28}, {"name": "DataLoader", "description": "", "value": 12, "extra": 0.01}, {"name": "CPU Exec", "description": "", "value": 9091, "extra": 7.51}, {"name": "Other", "description": "", "value": 4605, "extra": 3.81}]}], "recommendations": "
  • N/A
", "environments": [{"title": "Number of Worker(s)", "value": "1"}, {"title": "Device Type", "value": "GPU"}]} -{"device_total_time": {"title": "Device Total Time (us)", "columns": [{"type": "string", "name": "name"}, {"type": "number", "name": "value"}], "rows": [["aten::cudnn_convolution_backward", 288342], ["CudnnConvolutionBackward", 288342], ["aten::cudnn_convolution_backward_weight", 151977], ["aten::cudnn_convolution_backward_input", 136365], ["aten::cudnn_convolution", 134544], ["aten::_convolution", 134544], ["aten::convolution", 134544], ["aten::conv2d", 134544], ["aten::cudnn_batch_norm_backward", 56960], ["CudnnBatchNormBackward", 56960], ["aten::cudnn_batch_norm", 33334], ["aten::_batch_norm_impl_index", 33334], ["aten::batch_norm", 33334], ["aten::threshold_backward", 26280], ["ReluBackward1", 26280], ["aten::add_", 23354], ["aten::to", 19721], ["aten::copy_", 19721], ["aten::threshold_", 17770], ["aten::relu_", 17770], ["aten::max_pool2d_with_indices_backward", 5053], ["MaxPool2DWithIndicesBackward", 5053], ["torch::autograd::AccumulateGrad", 2918], ["aten::fill_", 2376], ["aten::mul_", 2376], ["aten::zero_", 2370], ["aten::max_pool2d_with_indices", 1341], ["aten::max_pool2d", 1341], ["aten::zeros_like", 948], ["aten::add", 327], ["aten::mm", 288], ["AddmmBackward", 288], ["aten::mean", 258], ["aten::adaptive_avg_pool2d", 258], ["aten::addmm", 204], ["aten::div", 161], ["MeanBackward1", 161], ["aten::_log_softmax_backward_data", 63], ["LogSoftmaxBackward", 63], ["aten::_log_softmax", 60], ["aten::log_softmax", 60], ["aten::nll_loss_forward", 21], ["aten::nll_loss", 21], ["aten::nll_loss_backward", 19], ["NllLossBackward", 19], ["aten::ones_like", 6]]}, "device_self_time": {"title": "Device Self Time (us)", "columns": [{"type": "string", "name": "name"}, {"type": "number", "name": "value"}], "rows": [["aten::cudnn_convolution_backward_weight", 151977], ["aten::cudnn_convolution_backward_input", 136365], ["aten::cudnn_convolution", 134544], ["aten::cudnn_batch_norm_backward", 56960], ["aten::cudnn_batch_norm", 33334], ["aten::threshold_backward", 26280], ["aten::add_", 23354], ["aten::copy_", 19721], ["aten::threshold_", 17770], ["aten::max_pool2d_with_indices_backward", 4105], ["aten::fill_", 2376], ["aten::mul_", 2376], ["aten::max_pool2d_with_indices", 1341], ["aten::add", 327], ["aten::mm", 288], ["aten::mean", 258], ["aten::addmm", 204], ["aten::div", 161], ["aten::_log_softmax_backward_data", 63], ["aten::_log_softmax", 60], ["aten::nll_loss_forward", 21], ["aten::nll_loss_backward", 19]]}, "host_total_time": {"title": "Host Total Time (us)", "columns": [{"type": "string", "name": "name"}, {"type": "number", "name": "value"}], "rows": [["aten::to", 95877], ["aten::copy_", 95330], ["CudnnConvolutionBackward", 89870], ["aten::add_", 88364], ["aten::cudnn_convolution_backward", 85929], ["aten::conv2d", 60800], ["aten::convolution", 56995], ["aten::batch_norm", 53643], ["aten::_convolution", 53318], ["aten::_batch_norm_impl_index", 50036], ["aten::cudnn_convolution", 48860], ["aten::cudnn_batch_norm", 46680], ["torch::autograd::AccumulateGrad", 43241], ["aten::cudnn_convolution_backward_input", 39025], ["aten::cudnn_convolution_backward_weight", 37464], ["CudnnBatchNormBackward", 34153], ["aten::mul_", 32585], ["aten::zero_", 32441], ["aten::cudnn_batch_norm_backward", 29705], ["aten::empty", 29598], ["aten::fill_", 19488], ["aten::relu_", 16391], ["ReluBackward1", 15546], ["aten::add", 14077], ["aten::threshold_backward", 13019], ["aten::threshold_", 8921], ["aten::empty_like", 6343], ["aten::resize_", 3854], ["aten::view", 2824], ["AddmmBackward", 1508], ["aten::addmm", 1219], ["aten::mm", 857], ["MaxPool2DWithIndicesBackward", 779], ["NllLossBackward", 737], ["aten::t", 714], ["aten::max_pool2d", 709], ["aten::max_pool2d_with_indices_backward", 699], ["aten::zeros", 625], ["aten::max_pool2d_with_indices", 622], ["MeanBackward1", 615], ["aten::nll_loss_backward", 604], ["aten::adaptive_avg_pool2d", 530], ["aten::log_softmax", 513], ["aten::nll_loss", 504], ["LogSoftmaxBackward", 454], ["aten::mean", 449], ["aten::_log_softmax", 434], ["aten::nll_loss_forward", 430], ["aten::div", 426], ["aten::_log_softmax_backward_data", 383], ["aten::ones_like", 381], ["AddBackward0", 337], ["aten::transpose", 331], ["aten::zeros_like", 331], ["aten::empty_strided", 319], ["aten::reshape", 223], ["aten::flatten", 187], ["TBackward", 174], ["aten::expand", 150], ["ViewBackward", 130], ["aten::as_strided", 128], ["aten::set_", 118], ["aten::detach_", 95], ["aten::resize_as_", 60], ["aten::conj", 53], ["detach_", 32]]}, "host_self_time": {"title": "Host Self Time (us)", "columns": [{"type": "string", "name": "name"}, {"type": "number", "name": "value"}], "rows": [["aten::add_", 55210], ["aten::cudnn_convolution", 34694], ["aten::empty", 29598], ["aten::cudnn_batch_norm", 26054], ["aten::cudnn_convolution_backward_input", 25909], ["aten::cudnn_convolution_backward_weight", 22068], ["aten::mul_", 20698], ["aten::cudnn_batch_norm_backward", 17176], ["aten::zero_", 13103], ["torch::autograd::AccumulateGrad", 12619], ["aten::cudnn_convolution_backward", 9440], ["aten::add", 8964], ["aten::fill_", 8708], ["aten::relu_", 7470], ["aten::threshold_backward", 7358], ["aten::_convolution", 4458], ["CudnnBatchNormBackward", 4448], ["aten::threshold_", 4042], ["CudnnConvolutionBackward", 3941], ["aten::resize_", 3854], ["aten::conv2d", 3805], ["aten::convolution", 3677], ["aten::batch_norm", 3607], ["aten::empty_like", 3452], ["aten::_batch_norm_impl_index", 3356], ["aten::view", 2824], ["ReluBackward1", 2527], ["aten::addmm", 690], ["aten::zeros", 465], ["aten::mm", 460], ["aten::copy_", 426], ["aten::t", 383], ["aten::max_pool2d_with_indices", 363], ["AddBackward0", 337], ["aten::empty_strided", 319], ["aten::nll_loss_forward", 317], ["aten::to", 300], ["aten::mean", 283], ["aten::nll_loss_backward", 279], ["aten::div", 265], ["aten::transpose", 233], ["aten::_log_softmax", 224], ["aten::max_pool2d_with_indices_backward", 223], ["AddmmBackward", 213], ["aten::_log_softmax_backward_data", 160], ["NllLossBackward", 133], ["aten::as_strided", 128], ["aten::expand", 120], ["aten::set_", 118], ["aten::max_pool2d", 87], ["MeanBackward1", 87], ["aten::ones_like", 85], ["aten::adaptive_avg_pool2d", 81], ["MaxPool2DWithIndicesBackward", 80], ["aten::log_softmax", 79], ["aten::nll_loss", 74], ["LogSoftmaxBackward", 71], ["aten::reshape", 70], ["aten::detach_", 63], ["aten::flatten", 59], ["aten::zeros_like", 54], ["aten::conj", 53], ["aten::resize_as_", 49], ["TBackward", 43], ["ViewBackward", 35], ["detach_", 32]]}} -{"data": {"columns": [{"type": "string", "name": "Name"}, {"type": "number", "name": "Calls"}, {"type": "number", "name": "Device Self Duration (us)"}, {"type": "number", "name": "Device Total Duration (us)"}, {"type": "number", "name": "Host Self Duration (us)"}, {"type": "number", "name": "Host Total Duration (us)"}], "rows": [["aten::cudnn_convolution_backward_weight", 318, 151977, 151977, 22068, 37464], ["aten::cudnn_convolution_backward_input", 312, 136365, 136365, 25909, 39025], ["aten::cudnn_convolution", 318, 134544, 134544, 34694, 48860], ["aten::cudnn_batch_norm_backward", 318, 56960, 56960, 17176, 29705], ["aten::cudnn_batch_norm", 318, 33334, 33334, 26054, 46680], ["aten::threshold_backward", 294, 26280, 26280, 7358, 13019], ["aten::add_", 2994, 23354, 23354, 55210, 88364], ["aten::copy_", 12, 19721, 19721, 426, 95330], ["aten::threshold_", 294, 17770, 17770, 4042, 8921], ["aten::max_pool2d_with_indices_backward", 6, 4105, 5053, 223, 699], ["aten::fill_", 978, 2376, 2376, 8708, 19488], ["aten::mul_", 966, 2376, 2376, 20698, 32585], ["aten::max_pool2d_with_indices", 6, 1341, 1341, 363, 622], ["aten::add", 318, 327, 327, 8964, 14077], ["aten::mm", 12, 288, 288, 460, 857], ["aten::mean", 6, 258, 258, 283, 449], ["aten::addmm", 6, 204, 204, 690, 1219], ["aten::div", 6, 161, 161, 265, 426], ["aten::_log_softmax_backward_data", 6, 63, 63, 160, 383], ["aten::_log_softmax", 6, 60, 60, 224, 434], ["aten::nll_loss_forward", 6, 21, 21, 317, 430], ["aten::nll_loss_backward", 6, 19, 19, 279, 604], ["aten::empty", 5172, 0, 0, 29598, 29598], ["aten::zero_", 996, 0, 2370, 13103, 32441], ["aten::zeros", 24, 0, 0, 465, 625], ["aten::to", 30, 0, 19721, 300, 95877], ["detach_", 12, 0, 0, 32, 32], ["aten::detach_", 12, 0, 0, 63, 95], ["aten::set_", 12, 0, 0, 118, 118], ["aten::empty_strided", 18, 0, 0, 319, 319], ["aten::resize_", 1920, 0, 0, 3854, 3854], ["aten::_convolution", 318, 0, 134544, 4458, 53318], ["aten::convolution", 318, 0, 134544, 3677, 56995], ["aten::conv2d", 318, 0, 134544, 3805, 60800], ["aten::empty_like", 342, 0, 0, 3452, 6343], ["aten::view", 648, 0, 0, 2824, 2824], ["aten::_batch_norm_impl_index", 318, 0, 33334, 3356, 50036], ["aten::batch_norm", 318, 0, 33334, 3607, 53643], ["aten::relu_", 294, 0, 17770, 7470, 16391], ["aten::max_pool2d", 6, 0, 1341, 87, 709], ["aten::adaptive_avg_pool2d", 6, 0, 258, 81, 530], ["aten::reshape", 12, 0, 0, 70, 223], ["aten::flatten", 6, 0, 0, 59, 187], ["aten::as_strided", 42, 0, 0, 128, 128], ["aten::transpose", 30, 0, 0, 233, 331], ["aten::t", 30, 0, 0, 383, 714], ["aten::expand", 12, 0, 0, 120, 150], ["aten::log_softmax", 6, 0, 60, 79, 513], ["aten::nll_loss", 6, 0, 21, 74, 504], ["aten::ones_like", 6, 0, 6, 85, 381], ["NllLossBackward", 6, 0, 19, 133, 737], ["LogSoftmaxBackward", 6, 0, 63, 71, 454], ["aten::conj", 12, 0, 0, 53, 53], ["AddmmBackward", 6, 0, 288, 213, 1508], ["torch::autograd::AccumulateGrad", 966, 0, 2918, 12619, 43241], ["TBackward", 6, 0, 0, 43, 174], ["ViewBackward", 6, 0, 0, 35, 130], ["MeanBackward1", 6, 0, 161, 87, 615], ["ReluBackward1", 294, 0, 26280, 2527, 15546], ["AddBackward0", 96, 0, 0, 337, 337], ["CudnnBatchNormBackward", 318, 0, 56960, 4448, 34153], ["aten::cudnn_convolution_backward", 318, 0, 288342, 9440, 85929], ["CudnnConvolutionBackward", 318, 0, 288342, 3941, 89870], ["aten::zeros_like", 6, 0, 948, 54, 331], ["aten::resize_as_", 6, 0, 0, 49, 60], ["MaxPool2DWithIndicesBackward", 6, 0, 5053, 80, 779]]}} -{"data": {"columns": [{"type": "string", "name": "Name"}, {"type": "number", "name": "Calls"}, {"type": "number", "name": "Total Duration (us)"}, {"type": "number", "name": "Mean Duration (us)"}, {"type": "number", "name": "Max Duration (us)"}, {"type": "number", "name": "Min Duration (us)"}], "rows": [["void cudnn::detail::dgrad_engine(int, int, int, float const*, int, float const*, int, float*, kernel_grad_params, unsigned long long, int, unsigned long long, int, float, int, int, int)", 178, 90214, 507, 1092, 154], ["void cudnn::cnn::wgrad_alg0_engine(int, int, int, float const*, int, float*, float const*, kernel_grad_params, unsigned long long, int, float, int, int, int, int)", 179, 85427, 477, 814, 381], ["void cudnn::bn_bw_1C11_kernel_new(float, float, float, float, cudnnTensorStruct, float const*, cudnnTensorStruct, float const*, cudnnTensorStruct, float*, float const*, float*, float*, float const*, float const*, float)", 292, 62347, 214, 802, 43], ["void at::native::vectorized_elementwise_kernel<4, at::native::threshold_kernel_impl(at::TensorIterator&, float, float)::{lambda(float, float)#1}, at::detail::Array >(int, at::native::threshold_kernel_impl(at::TensorIterator&, float, float)::{lambda(float, float)#1}, at::detail::Array)", 613, 47476, 77, 364, 6], ["void at::native::vectorized_elementwise_kernel<4, at::native::AddFunctor, at::detail::Array >(int, at::native::AddFunctor, at::detail::Array)", 3506, 41486, 12, 364, 1], ["void implicit_convolve_sgemm(int, int, int, float const*, int, float*, float const*, kernel_conv_params, unsigned long long, int, float, float, int, float const*, float const*, bool, int, int)", 84, 36210, 431, 753, 384], ["void implicit_convolve_sgemm(int, int, int, float const*, int, float*, float const*, kernel_conv_params, unsigned long long, int, float, float, int, float const*, float const*, bool, int, int)", 66, 32761, 496, 854, 362], ["void cudnn::bn_fw_tr_1C11_kernel_NCHW(cudnnTensorStruct, float const*, cudnnTensorStruct, float*, float const*, float const*, float, float, float*, float*, float*, float*, float, float)", 150, 26289, 175, 433, 50], ["volta_sgemm_128x64_nt", 126, 23803, 189, 205, 156], ["volta_scudnn_128x128_stridedB_splitK_small_nn_v1", 49, 22477, 459, 705, 329], ["void cudnn::cnn::wgrad_alg0_engine(int, int, int, float const*, int, float*, float const*, kernel_grad_params, unsigned long long, int, float, int, int, int, int)", 21, 18728, 892, 913, 881], ["volta_sgemm_128x64_nn", 78, 14530, 186, 207, 157], ["volta_scudnn_128x64_stridedB_interior_nn_v1", 35, 11515, 329, 528, 260], ["volta_scudnn_128x64_relu_interior_nn_v1", 30, 10280, 343, 530, 295], ["void cudnn::cnn::wgrad_alg0_engine(int, int, int, float const*, int, float*, float const*, kernel_grad_params, unsigned long long, int, float, int, int, int, int)", 14, 8372, 598, 993, 207], ["volta_scudnn_winograd_128x128_ldg1_ldg4_relu_tile148t_nt_v1", 21, 7728, 368, 377, 365], ["void cudnn::winograd_nonfused::winogradForwardOutput4x4(cudnn::winograd_nonfused::WinogradOutputParams)", 141, 7213, 51, 115, 20], ["void cudnn::winograd_nonfused::winogradForwardData4x4(cudnn::winograd_nonfused::WinogradDataParams)", 141, 7133, 51, 143, 17], ["void cudnn::bn_fw_tr_1C11_singleread(cudnnTensorStruct, float const*, cudnnTensorStruct, float*, float const*, float const*, float, float, float*, float*, float*, float*, float, float, cudnn::reduced_divisor, int, cudnn::reduced_divisor, cudnn::bnFwPersistentState*, int, float, float, float, int, float, float, cudnnStatus_t*, bool)", 168, 7045, 42, 87, 14], ["void cudnn::ops::scalePackedTensor_kernel(cudnnTensor4dStruct, float*, float)", 178, 5920, 33, 158, 6], ["volta_scudnn_128x128_stridedB_interior_nn_v1", 21, 5763, 274, 294, 269], ["void implicit_convolve_sgemm(int, int, int, float const*, int, float*, float const*, kernel_conv_params, unsigned long long, int, float, float, int, float const*, float const*, bool, int, int)", 12, 5352, 446, 449, 443], ["void cudnn::cnn::wgrad_alg0_engine(int, int, int, float const*, int, float*, float const*, kernel_grad_params, unsigned long long, int, float, int, int, int, int)", 7, 5335, 762, 781, 744], ["void explicit_convolve_sgemm(int, int, int, float const*, int, float const*, int, float*, kernel_conv_params, unsigned long long, int, unsigned long long, int, float, float, int, float const*, float const*)", 6, 4882, 814, 817, 811], ["void at::native::(anonymous namespace)::max_pool_backward_nchw(int, float const*, long const*, int, int, int, int, int, int, int, int, int, int, int, int, int, int, float*)", 7, 4795, 685, 690, 683], ["volta_scudnn_128x128_stridedB_splitK_medium_nn_v1", 7, 4662, 666, 669, 658], ["volta_scudnn_128x128_stridedB_medium_nn_v1", 14, 4373, 312, 327, 297], ["void implicit_convolve_sgemm(int, int, int, float const*, int, float*, float const*, kernel_conv_params, unsigned long long, int, float, float, int, float const*, float const*, bool, int, int)", 6, 4023, 670, 676, 663], ["volta_scudnn_128x64_stridedB_splitK_xregs_large_nn_v1", 6, 4007, 668, 672, 664], ["volta_scudnn_128x64_relu_medium_nn_v1", 6, 3850, 642, 644, 638], ["volta_scudnn_128x64_relu_xregs_large_nn_v1", 6, 3731, 622, 672, 571], ["volta_scudnn_128x128_stridedB_small_nn_v1", 7, 3726, 532, 542, 526], ["volta_scudnn_128x64_relu_small_nn_v1", 12, 3427, 286, 295, 272], ["volta_scudnn_128x128_relu_interior_nn_v1", 6, 3340, 557, 562, 551], ["void at::native::vectorized_elementwise_kernel<4, at::native::MulScalarFunctor, at::detail::Array >(int, at::native::MulScalarFunctor, at::detail::Array)", 1127, 2771, 2, 24, 1], ["void cudnn::winograd_nonfused::winogradForwardFilter4x4(cudnn::winograd_nonfused::WinogradFilterParams)", 141, 2699, 19, 66, 3], ["void cudnn::winograd_nonfused::winogradWgradData4x4(cudnn::winograd_nonfused::WinogradDataParams)", 63, 2540, 40, 60, 20], ["void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", 979, 2534, 3, 158, 0], ["void cudnn::winograd_nonfused::winogradWgradDelta4x4(cudnn::winograd_nonfused::WinogradDeltaParams)", 63, 2479, 39, 60, 18], ["void cudnn::bn_bw_1C11_singleread(float, float, float, float, cudnnTensorStruct, float const*, cudnnTensorStruct, float const*, cudnnTensorStruct, float*, float const*, float*, float*, float const*, float const*, float, cudnn::reduced_divisor, int, cudnn::reduced_divisor, cudnn::bnBwPersistentState*, int, float, float, float, int, float, cudnnStatus_t*, bool)", 54, 2313, 43, 74, 19], ["void at::native::(anonymous namespace)::max_pool_forward_nchw(int, float const*, int, int, int, int, int, int, int, int, int, int, int, int, int, int, float*, long*)", 6, 1341, 224, 224, 223], ["void cudnn::winograd_nonfused::winogradWgradOutput4x4(cudnn::winograd_nonfused::WinogradWgradOutputParams)", 63, 1320, 21, 65, 4], ["void implicit_convolve_sgemm(int, int, int, float const*, int, float*, float const*, kernel_conv_params, unsigned long long, int, float, float, int, float const*, float const*, bool, int, int)", 6, 864, 144, 147, 142], ["void cudnn::cnn::im2col4d_kernel(cudnn::cnn::im2col4d_params, cudnnConvolutionStruct, cudnnTensor4dStruct, float const*, float*)", 6, 601, 100, 101, 99], ["cask_cudnn::computeOffsetsKernel(cask_cudnn::ComputeOffsetsParams)", 137, 352, 3, 6, 2], ["void at::native::vectorized_elementwise_kernel<4, at::native::BUnaryFunctor >, at::detail::Array >(int, at::native::BUnaryFunctor >, at::detail::Array)", 318, 327, 1, 2, 1], ["void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, float, 4> >(at::native::ReduceOp, unsigned int, float, 4>)", 6, 258, 43, 43, 43], ["volta_sgemm_64x32_sliced1x4_nn", 6, 161, 27, 27, 26], ["void at::native::unrolled_elementwise_kernel, at::detail::Array, OffsetCalculator<1, unsigned int>, OffsetCalculator<1, unsigned int>, at::native::memory::LoadWithoutCast, at::native::memory::StoreWithoutCast>(int, at::native::MulScalarFunctor, at::detail::Array, OffsetCalculator<1, unsigned int>, OffsetCalculator<1, unsigned int>, at::native::memory::LoadWithoutCast, at::native::memory::StoreWithoutCast)", 6, 161, 27, 27, 26], ["volta_sgemm_64x32_sliced1x4_tn", 6, 144, 24, 24, 24], ["volta_sgemm_128x32_nt", 6, 115, 19, 20, 19], ["cask_cudnn::computeWgradBOffsetsKernel(cask_cudnn::ComputeWgradBOffsetsParams)", 62, 87, 1, 2, 1], ["cask_cudnn::computeBOffsetsKernel(cask_cudnn::ComputeBOffsetsParams)", 77, 85, 1, 2, 1], ["void cudnn::winograd::generateWinogradTilesKernel<0, float, float>(cudnn::winograd::GenerateWinogradTilesParams)", 21, 84, 4, 4, 4], ["cask_cudnn::computeWgradSplitKOffsetsKernel(cask_cudnn::ComputeSplitKOffsetsParams)", 62, 75, 1, 2, 1], ["void (anonymous namespace)::softmax_warp_backward(float*, float const*, float const*, int, int, int)", 6, 63, 10, 11, 10], ["void (anonymous namespace)::softmax_warp_forward(float*, float const*, int, int, int)", 6, 60, 10, 10, 10], ["void at::native::reduce_kernel<128, 4, at::native::ReduceOp::operator()(at::TensorIterator&)::{lambda(float, float)#1}>, unsigned int, float, 4> >(at::native::ReduceOp::operator()(at::TensorIterator&)::{lambda(float, float)#1}>, unsigned int, float, 4>)", 6, 48, 8, 8, 8], ["void splitKreduce_kernel(cublasSplitKParams, float const*, float const*, float*, float const*, float const*)", 12, 36, 3, 4, 2], ["void at::native::unrolled_elementwise_kernel, OffsetCalculator<1, unsigned int>, char*, at::native::memory::LoadWithoutCast, at::detail::Array::StoreWithoutCast>(int, at::native::copy_device_to_device(at::TensorIterator&, bool)::{lambda()#2}::operator()() const::{lambda()#8}::operator()() const::{lambda(float)#1}, at::detail::Array, OffsetCalculator<1, unsigned int>, char*, at::native::memory::LoadWithoutCast, at::detail::Array::StoreWithoutCast)", 6, 36, 6, 6, 6], ["void cunn_ClassNLLCriterion_updateOutput_kernel(float*, float*, float*, long*, float*, int, int, int, int, long)", 6, 21, 4, 4, 3], ["void cunn_ClassNLLCriterion_updateGradInput_kernel(float*, float*, long*, float*, float*, int, int, int, int, long)", 6, 13, 2, 3, 2]]}} -{"total": {"columns": [{"type": "string", "name": "name"}, {"type": "number", "name": "value"}], "rows": [["void cudnn::detail::dgrad_engine(int, int, int, float const*, int, float const*, int, float*, kernel_grad_params, unsigned long long, int, unsigned long long, int, float, int, int, int)", 90214.0], ["void cudnn::cnn::wgrad_alg0_engine(int, int, int, float const*, int, float*, float const*, kernel_grad_params, unsigned long long, int, float, int, int, int, int)", 85427.0], ["void cudnn::bn_bw_1C11_kernel_new(float, float, float, float, cudnnTensorStruct, float const*, cudnnTensorStruct, float const*, cudnnTensorStruct, float*, float const*, float*, float*, float const*, float const*, float)", 62347.0], ["void at::native::vectorized_elementwise_kernel<4, at::native::threshold_kernel_impl(at::TensorIterator&, float, float)::{lambda(float, float)#1}, at::detail::Array >(int, at::native::threshold_kernel_impl(at::TensorIterator&, float, float)::{lambda(float, float)#1}, at::detail::Array)", 47476.0], ["void at::native::vectorized_elementwise_kernel<4, at::native::AddFunctor, at::detail::Array >(int, at::native::AddFunctor, at::detail::Array)", 41486.0], ["void implicit_convolve_sgemm(int, int, int, float const*, int, float*, float const*, kernel_conv_params, unsigned long long, int, float, float, int, float const*, float const*, bool, int, int)", 36210.0], ["void implicit_convolve_sgemm(int, int, int, float const*, int, float*, float const*, kernel_conv_params, unsigned long long, int, float, float, int, float const*, float const*, bool, int, int)", 32761.0], ["void cudnn::bn_fw_tr_1C11_kernel_NCHW(cudnnTensorStruct, float const*, cudnnTensorStruct, float*, float const*, float const*, float, float, float*, float*, float*, float*, float, float)", 26289.0], ["volta_sgemm_128x64_nt", 23803.0], ["volta_scudnn_128x128_stridedB_splitK_small_nn_v1", 22477.0], ["void cudnn::cnn::wgrad_alg0_engine(int, int, int, float const*, int, float*, float const*, kernel_grad_params, unsigned long long, int, float, int, int, int, int)", 18728.0], ["volta_sgemm_128x64_nn", 14530.0], ["volta_scudnn_128x64_stridedB_interior_nn_v1", 11515.0], ["volta_scudnn_128x64_relu_interior_nn_v1", 10280.0], ["void cudnn::cnn::wgrad_alg0_engine(int, int, int, float const*, int, float*, float const*, kernel_grad_params, unsigned long long, int, float, int, int, int, int)", 8372.0], ["volta_scudnn_winograd_128x128_ldg1_ldg4_relu_tile148t_nt_v1", 7728.0], ["void cudnn::winograd_nonfused::winogradForwardOutput4x4(cudnn::winograd_nonfused::WinogradOutputParams)", 7213.0], ["void cudnn::winograd_nonfused::winogradForwardData4x4(cudnn::winograd_nonfused::WinogradDataParams)", 7133.0], ["void cudnn::bn_fw_tr_1C11_singleread(cudnnTensorStruct, float const*, cudnnTensorStruct, float*, float const*, float const*, float, float, float*, float*, float*, float*, float, float, cudnn::reduced_divisor, int, cudnn::reduced_divisor, cudnn::bnFwPersistentState*, int, float, float, float, int, float, float, cudnnStatus_t*, bool)", 7045.0], ["void cudnn::ops::scalePackedTensor_kernel(cudnnTensor4dStruct, float*, float)", 5920.0], ["volta_scudnn_128x128_stridedB_interior_nn_v1", 5763.0], ["void implicit_convolve_sgemm(int, int, int, float const*, int, float*, float const*, kernel_conv_params, unsigned long long, int, float, float, int, float const*, float const*, bool, int, int)", 5352.0], ["void cudnn::cnn::wgrad_alg0_engine(int, int, int, float const*, int, float*, float const*, kernel_grad_params, unsigned long long, int, float, int, int, int, int)", 5335.0], ["void explicit_convolve_sgemm(int, int, int, float const*, int, float const*, int, float*, kernel_conv_params, unsigned long long, int, unsigned long long, int, float, float, int, float const*, float const*)", 4882.0], ["void at::native::(anonymous namespace)::max_pool_backward_nchw(int, float const*, long const*, int, int, int, int, int, int, int, int, int, int, int, int, int, int, float*)", 4795.0], ["volta_scudnn_128x128_stridedB_splitK_medium_nn_v1", 4662.0], ["volta_scudnn_128x128_stridedB_medium_nn_v1", 4373.0], ["void implicit_convolve_sgemm(int, int, int, float const*, int, float*, float const*, kernel_conv_params, unsigned long long, int, float, float, int, float const*, float const*, bool, int, int)", 4023.0], ["volta_scudnn_128x64_stridedB_splitK_xregs_large_nn_v1", 4007.0], ["volta_scudnn_128x64_relu_medium_nn_v1", 3850.0], ["volta_scudnn_128x64_relu_xregs_large_nn_v1", 3731.0], ["volta_scudnn_128x128_stridedB_small_nn_v1", 3726.0], ["volta_scudnn_128x64_relu_small_nn_v1", 3427.0], ["volta_scudnn_128x128_relu_interior_nn_v1", 3340.0], ["void at::native::vectorized_elementwise_kernel<4, at::native::MulScalarFunctor, at::detail::Array >(int, at::native::MulScalarFunctor, at::detail::Array)", 2771.0], ["void cudnn::winograd_nonfused::winogradForwardFilter4x4(cudnn::winograd_nonfused::WinogradFilterParams)", 2699.0], ["void cudnn::winograd_nonfused::winogradWgradData4x4(cudnn::winograd_nonfused::WinogradDataParams)", 2540.0], ["void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", 2534.0], ["void cudnn::winograd_nonfused::winogradWgradDelta4x4(cudnn::winograd_nonfused::WinogradDeltaParams)", 2479.0], ["void cudnn::bn_bw_1C11_singleread(float, float, float, float, cudnnTensorStruct, float const*, cudnnTensorStruct, float const*, cudnnTensorStruct, float*, float const*, float*, float*, float const*, float const*, float, cudnn::reduced_divisor, int, cudnn::reduced_divisor, cudnn::bnBwPersistentState*, int, float, float, float, int, float, cudnnStatus_t*, bool)", 2313.0], ["void at::native::(anonymous namespace)::max_pool_forward_nchw(int, float const*, int, int, int, int, int, int, int, int, int, int, int, int, int, int, float*, long*)", 1341.0], ["void cudnn::winograd_nonfused::winogradWgradOutput4x4(cudnn::winograd_nonfused::WinogradWgradOutputParams)", 1320.0], ["void implicit_convolve_sgemm(int, int, int, float const*, int, float*, float const*, kernel_conv_params, unsigned long long, int, float, float, int, float const*, float const*, bool, int, int)", 864.0], ["void cudnn::cnn::im2col4d_kernel(cudnn::cnn::im2col4d_params, cudnnConvolutionStruct, cudnnTensor4dStruct, float const*, float*)", 601.0], ["cask_cudnn::computeOffsetsKernel(cask_cudnn::ComputeOffsetsParams)", 352.0], ["void at::native::vectorized_elementwise_kernel<4, at::native::BUnaryFunctor >, at::detail::Array >(int, at::native::BUnaryFunctor >, at::detail::Array)", 327.0], ["void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, float, 4> >(at::native::ReduceOp, unsigned int, float, 4>)", 258.0], ["volta_sgemm_64x32_sliced1x4_nn", 161.0], ["void at::native::unrolled_elementwise_kernel, at::detail::Array, OffsetCalculator<1, unsigned int>, OffsetCalculator<1, unsigned int>, at::native::memory::LoadWithoutCast, at::native::memory::StoreWithoutCast>(int, at::native::MulScalarFunctor, at::detail::Array, OffsetCalculator<1, unsigned int>, OffsetCalculator<1, unsigned int>, at::native::memory::LoadWithoutCast, at::native::memory::StoreWithoutCast)", 161.0], ["volta_sgemm_64x32_sliced1x4_tn", 144.0], ["volta_sgemm_128x32_nt", 115.0], ["cask_cudnn::computeWgradBOffsetsKernel(cask_cudnn::ComputeWgradBOffsetsParams)", 87.0], ["cask_cudnn::computeBOffsetsKernel(cask_cudnn::ComputeBOffsetsParams)", 85.0], ["void cudnn::winograd::generateWinogradTilesKernel<0, float, float>(cudnn::winograd::GenerateWinogradTilesParams)", 84.0], ["cask_cudnn::computeWgradSplitKOffsetsKernel(cask_cudnn::ComputeSplitKOffsetsParams)", 75.0], ["void (anonymous namespace)::softmax_warp_backward(float*, float const*, float const*, int, int, int)", 63.0], ["void (anonymous namespace)::softmax_warp_forward(float*, float const*, int, int, int)", 60.0], ["void at::native::reduce_kernel<128, 4, at::native::ReduceOp::operator()(at::TensorIterator&)::{lambda(float, float)#1}>, unsigned int, float, 4> >(at::native::ReduceOp::operator()(at::TensorIterator&)::{lambda(float, float)#1}>, unsigned int, float, 4>)", 48.0], ["void splitKreduce_kernel(cublasSplitKParams, float const*, float const*, float*, float const*, float const*)", 36.0], ["void at::native::unrolled_elementwise_kernel, OffsetCalculator<1, unsigned int>, char*, at::native::memory::LoadWithoutCast, at::detail::Array::StoreWithoutCast>(int, at::native::copy_device_to_device(at::TensorIterator&, bool)::{lambda()#2}::operator()() const::{lambda()#8}::operator()() const::{lambda(float)#1}, at::detail::Array, OffsetCalculator<1, unsigned int>, char*, at::native::memory::LoadWithoutCast, at::detail::Array::StoreWithoutCast)", 36.0], ["void cunn_ClassNLLCriterion_updateOutput_kernel(float*, float*, float*, long*, float*, int, int, int, int, long)", 21.0], ["void cunn_ClassNLLCriterion_updateGradInput_kernel(float*, float*, long*, float*, float*, int, int, int, int, long)", 13.0]]}} +{"steps": {"columns": [{"type": "string", "name": "Step"}, {"type": "number", "name": "Kernel"}, {"type": "string", "role": "tooltip", "p": {"html": "true"}}, {"type": "number", "name": "Memcpy"}, {"type": "string", "role": "tooltip", "p": {"html": "true"}}, {"type": "number", "name": "Memset"}, {"type": "string", "role": "tooltip", "p": {"html": "true"}}, {"type": "number", "name": "Runtime"}, {"type": "string", "role": "tooltip", "p": {"html": "true"}}, {"type": "number", "name": "DataLoader"}, {"type": "string", "role": "tooltip", "p": {"html": "true"}}, {"type": "number", "name": "CPU Exec"}, {"type": "string", "role": "tooltip", "p": {"html": "true"}}, {"type": "number", "name": "Other"}, {"type": "string", "role": "tooltip", "p": {"html": "true"}}], "rows": [["5", 98598, "
Step 5
Total: 187948us
Kernel: 98598us
Percentage: 52.46%
", 1941, "
Step 5
Total: 187948us
Memcpy: 1941us
Percentage: 1.03%
", 90, "
Step 5
Total: 187948us
Memset: 90us
Percentage: 0.05%
", 2796, "
Step 5
Total: 187948us
Runtime: 2796us
Percentage: 1.49%
", 69317, "
Step 5
Total: 187948us
DataLoader: 69317us
Percentage: 36.88%
", 14091, "
Step 5
Total: 187948us
CPU Exec: 14091us
Percentage: 7.5%
", 1115, "
Step 5
Total: 187948us
Other: 1115us
Percentage: 0.59%
"], ["6", 98570, "
Step 6
Total: 175153us
Kernel: 98570us
Percentage: 56.28%
", 1947, "
Step 6
Total: 175153us
Memcpy: 1947us
Percentage: 1.11%
", 89, "
Step 6
Total: 175153us
Memset: 89us
Percentage: 0.05%
", 2762, "
Step 6
Total: 175153us
Runtime: 2762us
Percentage: 1.58%
", 57669, "
Step 6
Total: 175153us
DataLoader: 57669us
Percentage: 32.92%
", 12968, "
Step 6
Total: 175153us
CPU Exec: 12968us
Percentage: 7.4%
", 1148, "
Step 6
Total: 175153us
Other: 1148us
Percentage: 0.66%
"], ["7", 98596, "
Step 7
Total: 179733us
Kernel: 98596us
Percentage: 54.86%
", 1931, "
Step 7
Total: 179733us
Memcpy: 1931us
Percentage: 1.07%
", 91, "
Step 7
Total: 179733us
Memset: 91us
Percentage: 0.05%
", 2877, "
Step 7
Total: 179733us
Runtime: 2877us
Percentage: 1.6%
", 61257, "
Step 7
Total: 179733us
DataLoader: 61257us
Percentage: 34.08%
", 13768, "
Step 7
Total: 179733us
CPU Exec: 13768us
Percentage: 7.66%
", 1213, "
Step 7
Total: 179733us
Other: 1213us
Percentage: 0.67%
"], ["8", 98623, "
Step 8
Total: 174564us
Kernel: 98623us
Percentage: 56.5%
", 1938, "
Step 8
Total: 174564us
Memcpy: 1938us
Percentage: 1.11%
", 89, "
Step 8
Total: 174564us
Memset: 89us
Percentage: 0.05%
", 2841, "
Step 8
Total: 174564us
Runtime: 2841us
Percentage: 1.63%
", 56453, "
Step 8
Total: 174564us
DataLoader: 56453us
Percentage: 32.34%
", 13420, "
Step 8
Total: 174564us
CPU Exec: 13420us
Percentage: 7.69%
", 1200, "
Step 8
Total: 174564us
Other: 1200us
Percentage: 0.69%
"], ["9", 98504, "
Step 9
Total: 182172us
Kernel: 98504us
Percentage: 54.07%
", 1937, "
Step 9
Total: 182172us
Memcpy: 1937us
Percentage: 1.06%
", 87, "
Step 9
Total: 182172us
Memset: 87us
Percentage: 0.05%
", 2788, "
Step 9
Total: 182172us
Runtime: 2788us
Percentage: 1.53%
", 62690, "
Step 9
Total: 182172us
DataLoader: 62690us
Percentage: 34.41%
", 15025, "
Step 9
Total: 182172us
CPU Exec: 15025us
Percentage: 8.25%
", 1141, "
Step 9
Total: 182172us
Other: 1141us
Percentage: 0.63%
"], ["10", 98641, "
Step 10
Total: 165983us
Kernel: 98641us
Percentage: 59.43%
", 1798, "
Step 10
Total: 165983us
Memcpy: 1798us
Percentage: 1.08%
", 88, "
Step 10
Total: 165983us
Memset: 88us
Percentage: 0.05%
", 3381, "
Step 10
Total: 165983us
Runtime: 3381us
Percentage: 2.04%
", 48185, "
Step 10
Total: 165983us
DataLoader: 48185us
Percentage: 29.03%
", 12773, "
Step 10
Total: 165983us
CPU Exec: 12773us
Percentage: 7.7%
", 1117, "
Step 10
Total: 165983us
Other: 1117us
Percentage: 0.67%
"]]}, "performance": [{"name": "Average Step Time", "description": "", "value": 177592, "extra": 100, "children": [{"name": "Kernel", "description": "", "value": 98589, "extra": 55.51}, {"name": "Memcpy", "description": "", "value": 1915, "extra": 1.08}, {"name": "Memset", "description": "", "value": 89, "extra": 0.05}, {"name": "Runtime", "description": "", "value": 2908, "extra": 1.64}, {"name": "DataLoader", "description": "", "value": 59262, "extra": 33.37}, {"name": "CPU Exec", "description": "", "value": 13674, "extra": 7.7}, {"name": "Other", "description": "", "value": 1156, "extra": 0.65}]}], "recommendations": "
  • This run has high time cost on input data loading. 33.4% of the step time is in DataLoader. You could try to set num_workers on DataLoader's construction and enable multi-processes on data loading. Reference: Single- and Multi-process Data Loading
", "environments": [{"title": "Number of Worker(s)", "value": "1"}, {"title": "Device Type", "value": "GPU"}], "gpu_metrics": {"title": "GPU Summary", "data": [{"title": "GPU 0:", "value": ""}, {"title": "Name", "value": "Tesla V100-DGXS-32GB"}, {"title": "Memory", "value": "31.74 GB"}, {"title": "Compute Capability", "value": "7.0"}, {"title": "GPU Utilization", "value": "55.51 %"}, {"title": "Est. SM Efficiency", "value": "54.68 %"}, {"title": "Est. Achieved Occupancy", "value": "49.13 %"}], "tooltip": "The GPU usage metrics:\n\nGPU Utilization:\nGPU busy time / All steps time. GPU busy time is the time during which there is at least one GPU kernel running on it. All steps time is the total time of all profiler steps(or called as iterations).\n\nEst. SM Efficiency:\nEstimated Stream Multiprocessor Efficiency. Est. SM Efficiency of a kernel, SM_Eff_K = min(blocks of this kernel / SM number of this GPU, 100%). This overall number is the sum of all kernels' SM_Eff_K weighted by kernel's execution duration, divided by all steps time.\n\nEst. Achieved Occupancy:\nOccupancy is the ratio of active threads on an SM to the maximum number of active threads supported by the SM. The theoretical occupancy of a kernel is upper limit occupancy of this kernel, limited by multiple factors such as kernel shape, kernel used resource, and the GPU compute capability.Est. Achieved Occupancy of a kernel, OCC_K = min(threads of the kernel / SM number / max threads per SM, theoretical occupancy of the kernel). This overall number is the weighted sum of all kernels OCC_K using kernel's execution duration as weight."}} +{"device_total_time": {"title": "Device Total Time (us)", "columns": [{"type": "string", "name": "name"}, {"type": "number", "name": "value"}], "rows": [["aten::cudnn_convolution_backward", 273428], ["CudnnConvolutionBackward", 273428], ["aten::cudnn_convolution_backward_weight", 142461], ["aten::cudnn_convolution_backward_input", 130967], ["aten::cudnn_convolution", 126619], ["aten::_convolution", 126619], ["aten::convolution", 126619], ["aten::conv2d", 126619], ["aten::cudnn_batch_norm_backward", 61939], ["CudnnBatchNormBackward", 61939], ["aten::cudnn_batch_norm", 34245], ["aten::_batch_norm_impl_index", 34245], ["aten::batch_norm", 34245], ["aten::threshold_backward", 27298], ["ReluBackward1", 27298], ["aten::add_", 24098], ["aten::clamp_min", 17860], ["aten::clamp_min_", 17860], ["aten::relu_", 17860], ["aten::add", 16038], ["aten::copy_", 11492], ["aten::to", 11492], ["aten::max_pool2d_with_indices_backward", 4677], ["MaxPool2DWithIndicesBackward", 4677], ["torch::autograd::AccumulateGrad", 3030], ["aten::mul_", 2409], ["aten::fill_", 1887], ["aten::zero_", 1881], ["aten::max_pool2d_with_indices", 1420], ["aten::max_pool2d", 1420], ["aten::mm", 275], ["AddmmBackward", 275], ["aten::mean", 212], ["aten::adaptive_avg_pool2d", 212], ["aten::addmm", 197], ["aten::linear", 197], ["aten::div", 144], ["MeanBackward1", 144], ["aten::cross_entropy_loss", 60], ["aten::_log_softmax_backward_data", 53], ["LogSoftmaxBackward", 53], ["aten::sum", 44], ["aten::_log_softmax", 42], ["aten::log_softmax", 42], ["aten::nll_loss_forward", 18], ["aten::nll_loss", 18], ["aten::nll_loss_nd", 18], ["aten::nll_loss_backward", 18], ["NllLossBackward", 18], ["aten::ones_like", 6]]}, "device_self_time": {"title": "Device Self Time (us)", "columns": [{"type": "string", "name": "name"}, {"type": "number", "name": "value"}], "rows": [["aten::cudnn_convolution_backward_weight", 142461], ["aten::cudnn_convolution_backward_input", 130967], ["aten::cudnn_convolution", 126619], ["aten::cudnn_batch_norm_backward", 61939], ["aten::cudnn_batch_norm", 34245], ["aten::threshold_backward", 27298], ["aten::add_", 24098], ["aten::clamp_min", 17860], ["aten::add", 16038], ["aten::copy_", 11492], ["aten::max_pool2d_with_indices_backward", 3822], ["aten::mul_", 2409], ["aten::fill_", 1887], ["aten::max_pool2d_with_indices", 1420], ["aten::mm", 275], ["aten::mean", 212], ["aten::addmm", 197], ["aten::div", 144], ["aten::_log_softmax_backward_data", 53], ["aten::sum", 44], ["aten::_log_softmax", 42], ["aten::nll_loss_forward", 18], ["aten::nll_loss_backward", 18]]}, "host_total_time": {"title": "Host Total Time (us)", "columns": [{"type": "string", "name": "name"}, {"type": "number", "name": "value"}], "rows": [["CudnnConvolutionBackward", 90989], ["aten::batch_norm", 87977], ["aten::cudnn_convolution_backward", 87772], ["aten::add_", 78125], ["aten::_batch_norm_impl_index", 78071], ["aten::conv2d", 77781], ["aten::cudnn_batch_norm", 71527], ["aten::convolution", 70394], ["aten::empty", 68147], ["aten::to", 64332], ["aten::_convolution", 64243], ["aten::cudnn_convolution", 56998], ["aten::copy_", 52853], ["aten::cudnn_convolution_backward_input", 41445], ["aten::cudnn_convolution_backward_weight", 40246], ["aten::div", 35158], ["CudnnBatchNormBackward", 34608], ["aten::contiguous", 31137], ["aten::cudnn_batch_norm_backward", 30460], ["aten::mul_", 29081], ["torch::autograd::AccumulateGrad", 28494], ["aten::zero_", 27597], ["aten::empty_like", 26064], ["aten::stack", 24346], ["aten::relu_", 24181], ["aten::add", 19289], ["aten::cat", 17085], ["aten::fill_", 17059], ["aten::_cat", 16933], ["aten::clamp_min_", 15665], ["aten::view", 14027], ["aten::resize_", 12406], ["aten::empty_strided", 11829], ["ReluBackward1", 11656], ["aten::clamp_min", 10311], ["aten::permute", 9775], ["aten::threshold_backward", 9482], ["aten::as_strided", 7600], ["aten::unsqueeze", 6603], ["aten::linear", 1408], ["AddmmBackward", 1303], ["aten::cross_entropy_loss", 1180], ["aten::zeros", 1105], ["aten::addmm", 1034], ["MeanBackward1", 987], ["aten::mm", 860], ["NllLossBackward", 716], ["aten::max_pool2d", 687], ["aten::nll_loss_backward", 614], ["aten::t", 584], ["aten::log_softmax", 567], ["aten::max_pool2d_with_indices", 562], ["aten::adaptive_avg_pool2d", 561], ["aten::nll_loss_nd", 495], ["MaxPool2DWithIndicesBackward", 484], ["aten::ones_like", 452], ["aten::mean", 445], ["aten::_log_softmax", 433], ["aten::nll_loss", 414], ["aten::max_pool2d_with_indices_backward", 411], ["LogSoftmaxBackward", 359], ["aten::narrow", 350], ["aten::nll_loss_forward", 346], ["aten::transpose", 329], ["aten::sum", 327], ["aten::_log_softmax_backward_data", 306], ["aten::expand", 229], ["aten::slice", 223], ["aten::detach_", 208], ["AddBackward0", 175], ["aten::flatten", 164], ["TBackward", 103], ["detach_", 100], ["ViewBackward", 80], ["aten::reshape", 55], ["aten::conj", 12]]}, "host_self_time": {"title": "Host Self Time (us)", "columns": [{"type": "string", "name": "name"}, {"type": "number", "name": "value"}], "rows": [["aten::empty", 68147], ["aten::add_", 51013], ["aten::copy_", 40255], ["aten::cudnn_convolution", 33121], ["aten::cudnn_convolution_backward_input", 29324], ["aten::cudnn_convolution_backward_weight", 22804], ["aten::mul_", 20515], ["aten::div", 20135], ["aten::cudnn_batch_norm", 19843], ["aten::_cat", 16282], ["aten::to", 14834], ["aten::add", 14329], ["aten::view", 14027], ["aten::resize_", 12406], ["aten::cudnn_batch_norm_backward", 12238], ["aten::empty_strided", 11829], ["aten::empty_like", 11742], ["aten::zero_", 10693], ["aten::batch_norm", 9906], ["aten::fill_", 9879], ["aten::relu_", 8516], ["aten::as_strided", 7600], ["aten::conv2d", 7387], ["aten::_convolution", 7245], ["aten::clamp_min", 7106], ["aten::_batch_norm_impl_index", 6544], ["aten::convolution", 6151], ["aten::threshold_backward", 6090], ["aten::cudnn_convolution_backward", 6081], ["aten::permute", 5515], ["aten::contiguous", 5510], ["torch::autograd::AccumulateGrad", 5457], ["aten::clamp_min_", 5354], ["CudnnBatchNormBackward", 4148], ["aten::unsqueeze", 3574], ["CudnnConvolutionBackward", 3217], ["ReluBackward1", 2174], ["aten::zeros", 659], ["aten::stack", 658], ["aten::addmm", 639], ["aten::mm", 575], ["MeanBackward1", 541], ["aten::max_pool2d_with_indices", 477], ["aten::nll_loss_backward", 388], ["aten::nll_loss_forward", 266], ["aten::t", 255], ["aten::mean", 234], ["aten::transpose", 197], ["AddmmBackward", 182], ["aten::max_pool2d_with_indices_backward", 176], ["AddBackward0", 175], ["aten::_log_softmax", 170], ["aten::sum", 153], ["aten::cat", 152], ["aten::expand", 150], ["aten::narrow", 127], ["aten::max_pool2d", 125], ["aten::linear", 124], ["aten::slice", 123], ["aten::cross_entropy_loss", 118], ["aten::adaptive_avg_pool2d", 116], ["aten::detach_", 108], ["aten::_log_softmax_backward_data", 108], ["NllLossBackward", 102], ["detach_", 100], ["aten::ones_like", 95], ["aten::log_softmax", 90], ["aten::flatten", 84], ["aten::nll_loss_nd", 81], ["MaxPool2DWithIndicesBackward", 73], ["aten::nll_loss", 68], ["LogSoftmaxBackward", 53], ["aten::reshape", 29], ["ViewBackward", 25], ["TBackward", 18], ["aten::conj", 12]]}} +[{"name": "aten::cudnn_convolution_backward_weight", "calls": 318, "device_self_duration": 142461, "device_total_duration": 142461, "host_self_duration": 22804, "host_total_duration": 40246, "has_call_stack": false}, {"name": "aten::cudnn_convolution_backward_input", "calls": 312, "device_self_duration": 130967, "device_total_duration": 130967, "host_self_duration": 29324, "host_total_duration": 41445, "has_call_stack": false}, {"name": "aten::cudnn_convolution", "calls": 318, "device_self_duration": 126619, "device_total_duration": 126619, "host_self_duration": 33121, "host_total_duration": 56998, "has_call_stack": true}, {"name": "aten::cudnn_batch_norm_backward", "calls": 318, "device_self_duration": 61939, "device_total_duration": 61939, "host_self_duration": 12238, "host_total_duration": 30460, "has_call_stack": false}, {"name": "aten::cudnn_batch_norm", "calls": 318, "device_self_duration": 34245, "device_total_duration": 34245, "host_self_duration": 19843, "host_total_duration": 71527, "has_call_stack": true}, {"name": "aten::threshold_backward", "calls": 294, "device_self_duration": 27298, "device_total_duration": 27298, "host_self_duration": 6090, "host_total_duration": 9482, "has_call_stack": false}, {"name": "aten::add_", "calls": 2994, "device_self_duration": 24098, "device_total_duration": 24098, "host_self_duration": 51013, "host_total_duration": 78125, "has_call_stack": true}, {"name": "aten::clamp_min", "calls": 294, "device_self_duration": 17860, "device_total_duration": 17860, "host_self_duration": 7106, "host_total_duration": 10311, "has_call_stack": true}, {"name": "aten::add", "calls": 414, "device_self_duration": 16038, "device_total_duration": 16038, "host_self_duration": 14329, "host_total_duration": 19289, "has_call_stack": true}, {"name": "aten::copy_", "calls": 588, "device_self_duration": 11492, "device_total_duration": 11492, "host_self_duration": 40255, "host_total_duration": 52853, "has_call_stack": true}, {"name": "aten::max_pool2d_with_indices_backward", "calls": 6, "device_self_duration": 3822, "device_total_duration": 4677, "host_self_duration": 176, "host_total_duration": 411, "has_call_stack": false}, {"name": "aten::mul_", "calls": 966, "device_self_duration": 2409, "device_total_duration": 2409, "host_self_duration": 20515, "host_total_duration": 29081, "has_call_stack": true}, {"name": "aten::fill_", "calls": 978, "device_self_duration": 1887, "device_total_duration": 1887, "host_self_duration": 9879, "host_total_duration": 17059, "has_call_stack": true}, {"name": "aten::max_pool2d_with_indices", "calls": 6, "device_self_duration": 1420, "device_total_duration": 1420, "host_self_duration": 477, "host_total_duration": 562, "has_call_stack": true}, {"name": "aten::mm", "calls": 12, "device_self_duration": 275, "device_total_duration": 275, "host_self_duration": 575, "host_total_duration": 860, "has_call_stack": false}, {"name": "aten::mean", "calls": 6, "device_self_duration": 212, "device_total_duration": 212, "host_self_duration": 234, "host_total_duration": 445, "has_call_stack": true}, {"name": "aten::addmm", "calls": 6, "device_self_duration": 197, "device_total_duration": 197, "host_self_duration": 639, "host_total_duration": 1034, "has_call_stack": true}, {"name": "aten::div", "calls": 198, "device_self_duration": 144, "device_total_duration": 144, "host_self_duration": 20135, "host_total_duration": 35158, "has_call_stack": true}, {"name": "aten::_log_softmax_backward_data", "calls": 6, "device_self_duration": 53, "device_total_duration": 53, "host_self_duration": 108, "host_total_duration": 306, "has_call_stack": false}, {"name": "aten::sum", "calls": 6, "device_self_duration": 44, "device_total_duration": 44, "host_self_duration": 153, "host_total_duration": 327, "has_call_stack": false}, {"name": "aten::_log_softmax", "calls": 6, "device_self_duration": 42, "device_total_duration": 42, "host_self_duration": 170, "host_total_duration": 433, "has_call_stack": true}, {"name": "aten::nll_loss_forward", "calls": 6, "device_self_duration": 18, "device_total_duration": 18, "host_self_duration": 266, "host_total_duration": 346, "has_call_stack": true}, {"name": "aten::nll_loss_backward", "calls": 6, "device_self_duration": 18, "device_total_duration": 18, "host_self_duration": 388, "host_total_duration": 614, "has_call_stack": false}, {"name": "aten::empty", "calls": 4404, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 68147, "host_total_duration": 68147, "has_call_stack": true}, {"name": "aten::zero_", "calls": 996, "device_self_duration": 0, "device_total_duration": 1881, "host_self_duration": 10693, "host_total_duration": 27597, "has_call_stack": true}, {"name": "aten::zeros", "calls": 24, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 659, "host_total_duration": 1105, "has_call_stack": true}, {"name": "aten::view", "calls": 846, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 14027, "host_total_duration": 14027, "has_call_stack": true}, {"name": "aten::as_strided", "calls": 432, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 7600, "host_total_duration": 7600, "has_call_stack": true}, {"name": "aten::permute", "calls": 192, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 5515, "host_total_duration": 9775, "has_call_stack": true}, {"name": "aten::empty_like", "calls": 528, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 11742, "host_total_duration": 26064, "has_call_stack": true}, {"name": "aten::contiguous", "calls": 192, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 5510, "host_total_duration": 31137, "has_call_stack": true}, {"name": "aten::empty_strided", "calls": 402, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 11829, "host_total_duration": 11829, "has_call_stack": true}, {"name": "aten::to", "calls": 414, "device_self_duration": 0, "device_total_duration": 11492, "host_self_duration": 14834, "host_total_duration": 64332, "has_call_stack": true}, {"name": "aten::unsqueeze", "calls": 192, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 3574, "host_total_duration": 6603, "has_call_stack": true}, {"name": "aten::resize_", "calls": 1902, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 12406, "host_total_duration": 12406, "has_call_stack": true}, {"name": "aten::slice", "calls": 6, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 123, "host_total_duration": 223, "has_call_stack": true}, {"name": "aten::narrow", "calls": 6, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 127, "host_total_duration": 350, "has_call_stack": true}, {"name": "aten::_cat", "calls": 6, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 16282, "host_total_duration": 16933, "has_call_stack": true}, {"name": "aten::cat", "calls": 6, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 152, "host_total_duration": 17085, "has_call_stack": true}, {"name": "aten::stack", "calls": 6, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 658, "host_total_duration": 24346, "has_call_stack": true}, {"name": "detach_", "calls": 6, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 100, "host_total_duration": 100, "has_call_stack": true}, {"name": "aten::detach_", "calls": 6, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 108, "host_total_duration": 208, "has_call_stack": true}, {"name": "aten::_convolution", "calls": 318, "device_self_duration": 0, "device_total_duration": 126619, "host_self_duration": 7245, "host_total_duration": 64243, "has_call_stack": true}, {"name": "aten::convolution", "calls": 318, "device_self_duration": 0, "device_total_duration": 126619, "host_self_duration": 6151, "host_total_duration": 70394, "has_call_stack": true}, {"name": "aten::conv2d", "calls": 318, "device_self_duration": 0, "device_total_duration": 126619, "host_self_duration": 7387, "host_total_duration": 77781, "has_call_stack": true}, {"name": "aten::_batch_norm_impl_index", "calls": 318, "device_self_duration": 0, "device_total_duration": 34245, "host_self_duration": 6544, "host_total_duration": 78071, "has_call_stack": true}, {"name": "aten::batch_norm", "calls": 318, "device_self_duration": 0, "device_total_duration": 34245, "host_self_duration": 9906, "host_total_duration": 87977, "has_call_stack": true}, {"name": "aten::clamp_min_", "calls": 294, "device_self_duration": 0, "device_total_duration": 17860, "host_self_duration": 5354, "host_total_duration": 15665, "has_call_stack": true}, {"name": "aten::relu_", "calls": 294, "device_self_duration": 0, "device_total_duration": 17860, "host_self_duration": 8516, "host_total_duration": 24181, "has_call_stack": true}, {"name": "aten::max_pool2d", "calls": 6, "device_self_duration": 0, "device_total_duration": 1420, "host_self_duration": 125, "host_total_duration": 687, "has_call_stack": true}, {"name": "aten::adaptive_avg_pool2d", "calls": 6, "device_self_duration": 0, "device_total_duration": 212, "host_self_duration": 116, "host_total_duration": 561, "has_call_stack": true}, {"name": "aten::flatten", "calls": 6, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 84, "host_total_duration": 164, "has_call_stack": true}, {"name": "aten::transpose", "calls": 30, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 197, "host_total_duration": 329, "has_call_stack": true}, {"name": "aten::t", "calls": 30, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 255, "host_total_duration": 584, "has_call_stack": true}, {"name": "aten::expand", "calls": 12, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 150, "host_total_duration": 229, "has_call_stack": true}, {"name": "aten::linear", "calls": 6, "device_self_duration": 0, "device_total_duration": 197, "host_self_duration": 124, "host_total_duration": 1408, "has_call_stack": true}, {"name": "aten::log_softmax", "calls": 6, "device_self_duration": 0, "device_total_duration": 42, "host_self_duration": 90, "host_total_duration": 567, "has_call_stack": true}, {"name": "aten::nll_loss", "calls": 6, "device_self_duration": 0, "device_total_duration": 18, "host_self_duration": 68, "host_total_duration": 414, "has_call_stack": true}, {"name": "aten::nll_loss_nd", "calls": 6, "device_self_duration": 0, "device_total_duration": 18, "host_self_duration": 81, "host_total_duration": 495, "has_call_stack": true}, {"name": "aten::cross_entropy_loss", "calls": 6, "device_self_duration": 0, "device_total_duration": 60, "host_self_duration": 118, "host_total_duration": 1180, "has_call_stack": true}, {"name": "aten::ones_like", "calls": 6, "device_self_duration": 0, "device_total_duration": 6, "host_self_duration": 95, "host_total_duration": 452, "has_call_stack": true}, {"name": "NllLossBackward", "calls": 6, "device_self_duration": 0, "device_total_duration": 18, "host_self_duration": 102, "host_total_duration": 716, "has_call_stack": false}, {"name": "LogSoftmaxBackward", "calls": 6, "device_self_duration": 0, "device_total_duration": 53, "host_self_duration": 53, "host_total_duration": 359, "has_call_stack": false}, {"name": "aten::conj", "calls": 12, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 12, "host_total_duration": 12, "has_call_stack": false}, {"name": "AddmmBackward", "calls": 6, "device_self_duration": 0, "device_total_duration": 275, "host_self_duration": 182, "host_total_duration": 1303, "has_call_stack": false}, {"name": "torch::autograd::AccumulateGrad", "calls": 966, "device_self_duration": 0, "device_total_duration": 3030, "host_self_duration": 5457, "host_total_duration": 28494, "has_call_stack": false}, {"name": "TBackward", "calls": 6, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 18, "host_total_duration": 103, "has_call_stack": false}, {"name": "aten::reshape", "calls": 6, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 29, "host_total_duration": 55, "has_call_stack": false}, {"name": "ViewBackward", "calls": 6, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 25, "host_total_duration": 80, "has_call_stack": false}, {"name": "MeanBackward1", "calls": 6, "device_self_duration": 0, "device_total_duration": 144, "host_self_duration": 541, "host_total_duration": 987, "has_call_stack": false}, {"name": "ReluBackward1", "calls": 294, "device_self_duration": 0, "device_total_duration": 27298, "host_self_duration": 2174, "host_total_duration": 11656, "has_call_stack": false}, {"name": "AddBackward0", "calls": 96, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 175, "host_total_duration": 175, "has_call_stack": false}, {"name": "CudnnBatchNormBackward", "calls": 318, "device_self_duration": 0, "device_total_duration": 61939, "host_self_duration": 4148, "host_total_duration": 34608, "has_call_stack": false}, {"name": "aten::cudnn_convolution_backward", "calls": 318, "device_self_duration": 0, "device_total_duration": 273428, "host_self_duration": 6081, "host_total_duration": 87772, "has_call_stack": false}, {"name": "CudnnConvolutionBackward", "calls": 318, "device_self_duration": 0, "device_total_duration": 273428, "host_self_duration": 3217, "host_total_duration": 90989, "has_call_stack": false}, {"name": "MaxPool2DWithIndicesBackward", "calls": 6, "device_self_duration": 0, "device_total_duration": 4677, "host_self_duration": 73, "host_total_duration": 484, "has_call_stack": false}] +{"data": {"columns": [{"type": "string", "name": "Name"}, {"type": "number", "name": "Calls"}, {"type": "number", "name": "Total Duration (us)"}, {"type": "number", "name": "Mean Duration (us)"}, {"type": "number", "name": "Max Duration (us)"}, {"type": "number", "name": "Min Duration (us)"}, {"type": "number", "name": "Mean Blocks Per SM", "tooltip": "Blocks Per SM:\nmin(blocks of this kernel / SM number of this GPU). If this number is less than 1, it indicates the GPU multiprocessors are not fully utilized."}, {"type": "number", "name": "Mean Est. Achieved Occupancy (%)", "tooltip": "Est. Achieved Occupancy:\nOccupancy is the ratio of active threads on an SM to the maximum number of active threads supported by the SM. The theoretical occupancy of a kernel is upper limit occupancy of this kernel, limited by multiple factors such as kernel shape, kernel used resource, and the GPU compute capability.Est. Achieved Occupancy of a kernel, OCC_K = min(threads of the kernel / SM number / max threads per SM, theoretical occupancy of the kernel). This overall number is the weighted sum of all kernels OCC_K using kernel's execution duration as weight."}], "rows": [["cask_cudnn::computeBOffsetsKernel(cask_cudnn::ComputeBOffsetsParams)", 72, 73, 1, 2, 1, 0.02, 0.0], ["cask_cudnn::computeOffsetsKernel(cask_cudnn::ComputeOffsetsParams)", 138, 342, 2, 4, 1, 0.13, 1.73], ["cask_cudnn::computeWgradBOffsetsKernel(cask_cudnn::ComputeWgradBOffsetsParams)", 66, 81, 1, 2, 1, 0.02, 0.0], ["cask_cudnn::computeWgradSplitKOffsetsKernel(cask_cudnn::ComputeSplitKOffsetsParams)", 66, 81, 1, 2, 1, 0.15, 1.68], ["void (anonymous namespace)::softmax_warp_backward(float*, float const*, float const*, int, int, int)", 6, 53, 9, 9, 8, 0.1, 1.0], ["void (anonymous namespace)::softmax_warp_forward(float*, float const*, int, int, int)", 6, 42, 7, 7, 7, 0.1, 1.0], ["void at::native::(anonymous namespace)::max_pool_backward_nchw(int, float const*, long const*, int, int, int, int, int, int, int, int, int, int, int, int, int, int, float*)", 6, 3822, 637, 638, 636, 1254.4, 100.0], ["void at::native::(anonymous namespace)::max_pool_forward_nchw(int, float const*, int, int, int, int, int, int, int, int, int, int, int, int, int, int, float*, long*)", 6, 1420, 237, 239, 234, 313.6, 100.0], ["void at::native::reduce_kernel<128, 4, at::native::ReduceOp::operator()(at::TensorIterator&)::{lambda(float, float)#1}>, unsigned int, float, 4> >(at::native::ReduceOp::operator()(at::TensorIterator&)::{lambda(float, float)#1}>, unsigned int, float, 4>)", 6, 44, 7, 8, 7, 0.02, 0.0], ["void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, float, 4> >(at::native::ReduceOp, unsigned int, float, 4>)", 6, 212, 35, 36, 35, 51.2, 100.0], ["void at::native::unrolled_elementwise_kernel, at::detail::Array, OffsetCalculator<1, unsigned int>, OffsetCalculator<1, unsigned int>, at::native::memory::LoadWithoutCast, at::native::memory::StoreWithoutCast>(int, at::native::MulScalarFunctor, at::detail::Array, OffsetCalculator<1, unsigned int>, OffsetCalculator<1, unsigned int>, at::native::memory::LoadWithoutCast, at::native::memory::StoreWithoutCast)", 6, 144, 24, 24, 24, 156.8, 100.0], ["void at::native::unrolled_elementwise_kernel, OffsetCalculator<1, unsigned int>, char*, at::native::memory::LoadWithoutCast, at::detail::Array::StoreWithoutCast>(int, at::native::copy_device_to_device(at::TensorIterator&, bool)::{lambda()#2}::operator()() const::{lambda()#8}::operator()() const::{lambda(float)#1}, at::detail::Array, OffsetCalculator<1, unsigned int>, char*, at::native::memory::LoadWithoutCast, at::detail::Array::StoreWithoutCast)", 6, 30, 5, 5, 5, 1.56, 5.0], ["void at::native::vectorized_elementwise_kernel<4, at::native::(anonymous namespace)::clamp_min_scalar_kernel_impl(at::TensorIterator&, c10::Scalar)::{lambda()#1}::operator()() const::{lambda()#8}::operator()() const::{lambda(float)#1}, at::detail::Array >(int, at::native::(anonymous namespace)::clamp_min_scalar_kernel_impl(at::TensorIterator&, c10::Scalar)::{lambda()#1}::operator()() const::{lambda()#8}::operator()() const::{lambda(float)#1}, at::detail::Array)", 294, 17860, 61, 252, 5, 666.65, 100.0], ["void at::native::vectorized_elementwise_kernel<4, at::native::AddFunctor, at::detail::Array >(int, at::native::AddFunctor, at::detail::Array)", 3090, 39814, 13, 378, 1, 641.54, 92.32], ["void at::native::vectorized_elementwise_kernel<4, at::native::BUnaryFunctor >, at::detail::Array >(int, at::native::BUnaryFunctor >, at::detail::Array)", 318, 322, 1, 2, 1, 0.01, 0.0], ["void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", 978, 1887, 2, 143, 0, 599.07, 86.78], ["void at::native::vectorized_elementwise_kernel<4, at::native::MulScalarFunctor, at::detail::Array >(int, at::native::MulScalarFunctor, at::detail::Array)", 966, 2409, 2, 25, 1, 43.72, 58.39], ["void at::native::vectorized_elementwise_kernel<4, at::native::threshold_kernel_impl(at::TensorIteratorBase&, float, float)::{lambda(float, float)#1}, at::detail::Array >(int, at::native::threshold_kernel_impl(at::TensorIteratorBase&, float, float)::{lambda(float, float)#1}, at::detail::Array)", 294, 27298, 93, 377, 13, 653.06, 100.0], ["void cudnn::bn_bw_1C11_kernel_new(float, float, float, float, cudnnTensorStruct, float const*, cudnnTensorStruct, float const*, cudnnTensorStruct, float*, float const*, float*, float*, float const*, float const*, float)", 264, 59642, 226, 915, 45, 4.34, 67.98], ["void cudnn::bn_bw_1C11_singleread(float, float, float, float, cudnnTensorStruct, float const*, cudnnTensorStruct, float const*, cudnnTensorStruct, float*, float const*, float*, float*, float const*, float const*, float, cudnn::reduced_divisor, int, cudnn::reduced_divisor, cudnn::bnBwPersistentState*, int, float, float, float, int, float, cudnnStatus_t*, bool)", 54, 2297, 43, 73, 18, 20.81, 75.0], ["void cudnn::bn_fw_tr_1C11_kernel_NCHW(cudnnTensorStruct, float const*, cudnnTensorStruct, float*, float const*, float const*, float, float, float*, float*, float*, float*, float, float)", 150, 27060, 180, 452, 53, 3.12, 64.06], ["void cudnn::bn_fw_tr_1C11_singleread(cudnnTensorStruct, float const*, cudnnTensorStruct, float*, float const*, float const*, float, float, float*, float*, float*, float*, float, float, cudnn::reduced_divisor, int, cudnn::reduced_divisor, cudnn::bnFwPersistentState*, int, float, float, float, int, float, float, cudnnStatus_t*, bool)", 168, 7185, 43, 89, 13, 12.57, 75.0], ["void cudnn::cnn::im2col4d_kernel(cudnn::cnn::im2col4d_params, cudnnConvolutionStruct, cudnnTensor4dStruct, float const*, float*)", 6, 614, 102, 103, 101, 0.95, 24.0], ["void cudnn::cnn::wgrad_alg0_engine(int, int, int, float const*, int, float*, float const*, kernel_grad_params, unsigned long long, int, float, int, int, int, int)", 12, 7068, 589, 987, 193, 85.34, 37.5], ["void cudnn::cnn::wgrad_alg0_engine(int, int, int, float const*, int, float*, float const*, kernel_grad_params, unsigned long long, int, float, int, int, int, int)", 156, 66472, 426, 745, 345, 9.78, 38.0], ["void cudnn::cnn::wgrad_alg0_engine(int, int, int, float const*, int, float*, float const*, kernel_grad_params, unsigned long long, int, float, int, int, int, int)", 6, 4065, 678, 692, 652, 6.4, 25.0], ["void cudnn::detail::dgrad_engine(int, int, int, float const*, int, float const*, int, float*, kernel_grad_params, unsigned long long, int, unsigned long long, int, float, int, int, int)", 162, 80756, 498, 1017, 323, 42.25, 29.97], ["void cudnn::ops::scalePackedTensor_kernel(cudnnTensor4dStruct, float*, float)", 162, 4631, 29, 143, 5, 496.39, 100.0], ["void cudnn::winograd::generateWinogradTilesKernel<0, float, float>(cudnn::winograd::GenerateWinogradTilesParams)", 36, 134, 4, 5, 2, 0.4, 3.0], ["void cudnn::winograd_nonfused::winogradForwardData4x4(cudnn::winograd_nonfused::WinogradDataParams)", 120, 4710, 39, 66, 17, 10.11, 50.0], ["void cudnn::winograd_nonfused::winogradForwardFilter4x4(cudnn::winograd_nonfused::WinogradFilterParams)", 120, 2662, 22, 67, 5, 8.68, 73.22], ["void cudnn::winograd_nonfused::winogradForwardOutput4x4(cudnn::winograd_nonfused::WinogradOutputParams)", 120, 5369, 45, 73, 19, 10.0, 50.0], ["void cudnn::winograd_nonfused::winogradWgradData4x4(cudnn::winograd_nonfused::WinogradDataParams)", 78, 4692, 60, 126, 20, 15.46, 38.0], ["void cudnn::winograd_nonfused::winogradWgradDelta4x4(cudnn::winograd_nonfused::WinogradDeltaParams)", 78, 4573, 59, 125, 17, 15.69, 50.0], ["void cudnn::winograd_nonfused::winogradWgradOutput4x4(cudnn::winograd_nonfused::WinogradWgradOutputParams)", 78, 1504, 19, 69, 5, 8.06, 41.33], ["void cunn_ClassNLLCriterion_updateGradInput_kernel(float*, float*, long*, float*, float*, int, int, int, int, long)", 6, 12, 2, 2, 2, 0.01, 0.0], ["void cunn_ClassNLLCriterion_updateOutput_kernel(float*, float*, float*, long*, float*, int, int, int, int, long)", 6, 18, 3, 3, 3, 0.01, 0.0], ["void explicit_convolve_sgemm(int, int, int, float const*, int, float const*, int, float*, kernel_conv_params, unsigned long long, int, unsigned long long, int, float, float, int, float const*, float const*)", 6, 4759, 793, 796, 790, 9.8, 31.0], ["void implicit_convolve_sgemm(int, int, int, float const*, int, float*, float const*, kernel_conv_params, unsigned long long, int, float, float, int, float const*, float const*, bool, int, int)", 90, 36957, 411, 748, 347, 12.34, 50.0], ["void implicit_convolve_sgemm(int, int, int, float const*, int, float*, float const*, kernel_conv_params, unsigned long long, int, float, float, int, float const*, float const*, bool, int, int)", 12, 5219, 435, 437, 432, 9.8, 31.0], ["void implicit_convolve_sgemm(int, int, int, float const*, int, float*, float const*, kernel_conv_params, unsigned long long, int, float, float, int, float const*, float const*, bool, int, int)", 60, 25782, 430, 729, 352, 3.9, 42.09], ["void implicit_convolve_sgemm(int, int, int, float const*, int, float*, float const*, kernel_conv_params, unsigned long long, int, float, float, int, float const*, float const*, bool, int, int)", 6, 3917, 653, 686, 595, 4.9, 25.0], ["void nchwToNhwcKernel(int, int, int, int, float const*, float*, float, float)", 12, 453, 38, 68, 9, 73.28, 100.0], ["void nhwcToNchwKernel(int, int, int, int, float const*, float*, float, float)", 6, 105, 18, 18, 17, 22.4, 100.0], ["void splitKreduce_kernel(cublasSplitKParams, float const*, float const*, float*, float const*, float const*, float const*)", 12, 30, 2, 3, 2, 4.44, 28.0], ["volta_scudnn_128x128_relu_interior_nn_v1", 6, 3010, 502, 508, 495, 9.8, 25.0], ["volta_scudnn_128x128_stridedB_interior_nn_v1", 18, 4693, 261, 281, 252, 9.8, 25.0], ["volta_scudnn_128x128_stridedB_medium_nn_v1", 12, 3501, 292, 296, 286, 19.6, 25.0], ["volta_scudnn_128x128_stridedB_small_nn_v1", 6, 2995, 499, 505, 493, 19.6, 25.0], ["volta_scudnn_128x128_stridedB_splitK_medium_nn_v1", 6, 3720, 620, 623, 614, 5.6, 25.0], ["volta_scudnn_128x128_stridedB_splitK_small_nn_v1", 48, 20448, 426, 676, 307, 6.83, 25.0], ["volta_scudnn_128x32_sliced1x4_ldg4_relu_exp_medium_nhwc_tn_v1", 6, 3270, 545, 627, 526, 4.9, 25.0], ["volta_scudnn_128x64_relu_interior_nn_v1", 30, 8022, 267, 316, 94, 37.1, 25.0], ["volta_scudnn_128x64_relu_medium_nn_v1", 6, 3627, 604, 606, 603, 39.2, 25.0], ["volta_scudnn_128x64_relu_small_nn_v1", 12, 3265, 272, 279, 254, 9.8, 25.0], ["volta_scudnn_128x64_relu_xregs_large_nn_v1", 6, 3200, 533, 607, 516, 4.9, 19.0], ["volta_scudnn_128x64_stridedB_interior_nn_v1", 30, 9597, 320, 510, 252, 12.9, 19.0], ["volta_scudnn_128x64_stridedB_small_nn_v1", 6, 584, 97, 100, 93, 9.8, 19.0], ["volta_scudnn_128x64_stridedB_splitK_xregs_large_nn_v1", 12, 7817, 651, 671, 635, 15.96, 19.0], ["volta_scudnn_winograd_128x128_ldg1_ldg4_relu_tile148t_nt_v1", 36, 12704, 353, 362, 344, 22.4, 25.0], ["volta_sgemm_128x32_nt", 24, 8629, 360, 477, 18, 0.97, 11.51], ["volta_sgemm_32x128_nn", 18, 3053, 170, 171, 168, 22.05, 50.0], ["volta_sgemm_32x128_nt", 18, 2843, 158, 159, 156, 22.05, 50.0], ["volta_sgemm_64x32_sliced1x4_nn", 6, 150, 25, 26, 24, 2.0, 25.0], ["volta_sgemm_64x32_sliced1x4_tn", 6, 149, 25, 26, 24, 1.0, 13.0], ["volta_sgemm_64x64_nn", 42, 8551, 204, 217, 195, 12.34, 24.14], ["volta_sgemm_64x64_nt", 102, 21084, 207, 279, 184, 10.24, 19.38]]}} +{"total": {"columns": [{"type": "string", "name": "name"}, {"type": "number", "name": "value"}], "rows": [["cask_cudnn::computeBOffsetsKernel(cask_cudnn::ComputeBOffsetsParams)", 73.0], ["cask_cudnn::computeOffsetsKernel(cask_cudnn::ComputeOffsetsParams)", 342.0], ["cask_cudnn::computeWgradBOffsetsKernel(cask_cudnn::ComputeWgradBOffsetsParams)", 81.0], ["cask_cudnn::computeWgradSplitKOffsetsKernel(cask_cudnn::ComputeSplitKOffsetsParams)", 81.0], ["void (anonymous namespace)::softmax_warp_backward(float*, float const*, float const*, int, int, int)", 53.0], ["void (anonymous namespace)::softmax_warp_forward(float*, float const*, int, int, int)", 42.0], ["void at::native::(anonymous namespace)::max_pool_backward_nchw(int, float const*, long const*, int, int, int, int, int, int, int, int, int, int, int, int, int, int, float*)", 3822.0], ["void at::native::(anonymous namespace)::max_pool_forward_nchw(int, float const*, int, int, int, int, int, int, int, int, int, int, int, int, int, int, float*, long*)", 1420.0], ["void at::native::reduce_kernel<128, 4, at::native::ReduceOp::operator()(at::TensorIterator&)::{lambda(float, float)#1}>, unsigned int, float, 4> >(at::native::ReduceOp::operator()(at::TensorIterator&)::{lambda(float, float)#1}>, unsigned int, float, 4>)", 44.0], ["void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, float, 4> >(at::native::ReduceOp, unsigned int, float, 4>)", 212.0], ["void at::native::unrolled_elementwise_kernel, at::detail::Array, OffsetCalculator<1, unsigned int>, OffsetCalculator<1, unsigned int>, at::native::memory::LoadWithoutCast, at::native::memory::StoreWithoutCast>(int, at::native::MulScalarFunctor, at::detail::Array, OffsetCalculator<1, unsigned int>, OffsetCalculator<1, unsigned int>, at::native::memory::LoadWithoutCast, at::native::memory::StoreWithoutCast)", 144.0], ["void at::native::unrolled_elementwise_kernel, OffsetCalculator<1, unsigned int>, char*, at::native::memory::LoadWithoutCast, at::detail::Array::StoreWithoutCast>(int, at::native::copy_device_to_device(at::TensorIterator&, bool)::{lambda()#2}::operator()() const::{lambda()#8}::operator()() const::{lambda(float)#1}, at::detail::Array, OffsetCalculator<1, unsigned int>, char*, at::native::memory::LoadWithoutCast, at::detail::Array::StoreWithoutCast)", 30.0], ["void at::native::vectorized_elementwise_kernel<4, at::native::(anonymous namespace)::clamp_min_scalar_kernel_impl(at::TensorIterator&, c10::Scalar)::{lambda()#1}::operator()() const::{lambda()#8}::operator()() const::{lambda(float)#1}, at::detail::Array >(int, at::native::(anonymous namespace)::clamp_min_scalar_kernel_impl(at::TensorIterator&, c10::Scalar)::{lambda()#1}::operator()() const::{lambda()#8}::operator()() const::{lambda(float)#1}, at::detail::Array)", 17860.0], ["void at::native::vectorized_elementwise_kernel<4, at::native::AddFunctor, at::detail::Array >(int, at::native::AddFunctor, at::detail::Array)", 39814.0], ["void at::native::vectorized_elementwise_kernel<4, at::native::BUnaryFunctor >, at::detail::Array >(int, at::native::BUnaryFunctor >, at::detail::Array)", 322.0], ["void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", 1887.0], ["void at::native::vectorized_elementwise_kernel<4, at::native::MulScalarFunctor, at::detail::Array >(int, at::native::MulScalarFunctor, at::detail::Array)", 2409.0], ["void at::native::vectorized_elementwise_kernel<4, at::native::threshold_kernel_impl(at::TensorIteratorBase&, float, float)::{lambda(float, float)#1}, at::detail::Array >(int, at::native::threshold_kernel_impl(at::TensorIteratorBase&, float, float)::{lambda(float, float)#1}, at::detail::Array)", 27298.0], ["void cudnn::bn_bw_1C11_kernel_new(float, float, float, float, cudnnTensorStruct, float const*, cudnnTensorStruct, float const*, cudnnTensorStruct, float*, float const*, float*, float*, float const*, float const*, float)", 59642.0], ["void cudnn::bn_bw_1C11_singleread(float, float, float, float, cudnnTensorStruct, float const*, cudnnTensorStruct, float const*, cudnnTensorStruct, float*, float const*, float*, float*, float const*, float const*, float, cudnn::reduced_divisor, int, cudnn::reduced_divisor, cudnn::bnBwPersistentState*, int, float, float, float, int, float, cudnnStatus_t*, bool)", 2297.0], ["void cudnn::bn_fw_tr_1C11_kernel_NCHW(cudnnTensorStruct, float const*, cudnnTensorStruct, float*, float const*, float const*, float, float, float*, float*, float*, float*, float, float)", 27060.0], ["void cudnn::bn_fw_tr_1C11_singleread(cudnnTensorStruct, float const*, cudnnTensorStruct, float*, float const*, float const*, float, float, float*, float*, float*, float*, float, float, cudnn::reduced_divisor, int, cudnn::reduced_divisor, cudnn::bnFwPersistentState*, int, float, float, float, int, float, float, cudnnStatus_t*, bool)", 7185.0], ["void cudnn::cnn::im2col4d_kernel(cudnn::cnn::im2col4d_params, cudnnConvolutionStruct, cudnnTensor4dStruct, float const*, float*)", 614.0], ["void cudnn::cnn::wgrad_alg0_engine(int, int, int, float const*, int, float*, float const*, kernel_grad_params, unsigned long long, int, float, int, int, int, int)", 7068.0], ["void cudnn::cnn::wgrad_alg0_engine(int, int, int, float const*, int, float*, float const*, kernel_grad_params, unsigned long long, int, float, int, int, int, int)", 66472.0], ["void cudnn::cnn::wgrad_alg0_engine(int, int, int, float const*, int, float*, float const*, kernel_grad_params, unsigned long long, int, float, int, int, int, int)", 4065.0], ["void cudnn::detail::dgrad_engine(int, int, int, float const*, int, float const*, int, float*, kernel_grad_params, unsigned long long, int, unsigned long long, int, float, int, int, int)", 80756.0], ["void cudnn::ops::scalePackedTensor_kernel(cudnnTensor4dStruct, float*, float)", 4631.0], ["void cudnn::winograd::generateWinogradTilesKernel<0, float, float>(cudnn::winograd::GenerateWinogradTilesParams)", 134.0], ["void cudnn::winograd_nonfused::winogradForwardData4x4(cudnn::winograd_nonfused::WinogradDataParams)", 4710.0], ["void cudnn::winograd_nonfused::winogradForwardFilter4x4(cudnn::winograd_nonfused::WinogradFilterParams)", 2662.0], ["void cudnn::winograd_nonfused::winogradForwardOutput4x4(cudnn::winograd_nonfused::WinogradOutputParams)", 5369.0], ["void cudnn::winograd_nonfused::winogradWgradData4x4(cudnn::winograd_nonfused::WinogradDataParams)", 4692.0], ["void cudnn::winograd_nonfused::winogradWgradDelta4x4(cudnn::winograd_nonfused::WinogradDeltaParams)", 4573.0], ["void cudnn::winograd_nonfused::winogradWgradOutput4x4(cudnn::winograd_nonfused::WinogradWgradOutputParams)", 1504.0], ["void cunn_ClassNLLCriterion_updateGradInput_kernel(float*, float*, long*, float*, float*, int, int, int, int, long)", 12.0], ["void cunn_ClassNLLCriterion_updateOutput_kernel(float*, float*, float*, long*, float*, int, int, int, int, long)", 18.0], ["void explicit_convolve_sgemm(int, int, int, float const*, int, float const*, int, float*, kernel_conv_params, unsigned long long, int, unsigned long long, int, float, float, int, float const*, float const*)", 4759.0], ["void implicit_convolve_sgemm(int, int, int, float const*, int, float*, float const*, kernel_conv_params, unsigned long long, int, float, float, int, float const*, float const*, bool, int, int)", 36957.0], ["void implicit_convolve_sgemm(int, int, int, float const*, int, float*, float const*, kernel_conv_params, unsigned long long, int, float, float, int, float const*, float const*, bool, int, int)", 5219.0], ["void implicit_convolve_sgemm(int, int, int, float const*, int, float*, float const*, kernel_conv_params, unsigned long long, int, float, float, int, float const*, float const*, bool, int, int)", 25782.0], ["void implicit_convolve_sgemm(int, int, int, float const*, int, float*, float const*, kernel_conv_params, unsigned long long, int, float, float, int, float const*, float const*, bool, int, int)", 3917.0], ["void nchwToNhwcKernel(int, int, int, int, float const*, float*, float, float)", 453.0], ["void nhwcToNchwKernel(int, int, int, int, float const*, float*, float, float)", 105.0], ["void splitKreduce_kernel(cublasSplitKParams, float const*, float const*, float*, float const*, float const*, float const*)", 30.0], ["volta_scudnn_128x128_relu_interior_nn_v1", 3010.0], ["volta_scudnn_128x128_stridedB_interior_nn_v1", 4693.0], ["volta_scudnn_128x128_stridedB_medium_nn_v1", 3501.0], ["volta_scudnn_128x128_stridedB_small_nn_v1", 2995.0], ["volta_scudnn_128x128_stridedB_splitK_medium_nn_v1", 3720.0], ["volta_scudnn_128x128_stridedB_splitK_small_nn_v1", 20448.0], ["volta_scudnn_128x32_sliced1x4_ldg4_relu_exp_medium_nhwc_tn_v1", 3270.0], ["volta_scudnn_128x64_relu_interior_nn_v1", 8022.0], ["volta_scudnn_128x64_relu_medium_nn_v1", 3627.0], ["volta_scudnn_128x64_relu_small_nn_v1", 3265.0], ["volta_scudnn_128x64_relu_xregs_large_nn_v1", 3200.0], ["volta_scudnn_128x64_stridedB_interior_nn_v1", 9597.0], ["volta_scudnn_128x64_stridedB_small_nn_v1", 584.0], ["volta_scudnn_128x64_stridedB_splitK_xregs_large_nn_v1", 7817.0], ["volta_scudnn_winograd_128x128_ldg1_ldg4_relu_tile148t_nt_v1", 12704.0], ["volta_sgemm_128x32_nt", 8629.0], ["volta_sgemm_32x128_nn", 3053.0], ["volta_sgemm_32x128_nt", 2843.0], ["volta_sgemm_64x32_sliced1x4_nn", 150.0], ["volta_sgemm_64x32_sliced1x4_tn", 149.0], ["volta_sgemm_64x64_nn", 8551.0], ["volta_sgemm_64x64_nt", 21084.0]]}} +{"steps": {"columns": [{"type": "string", "name": "Step"}, {"type": "number", "name": "Kernel"}, {"type": "string", "role": "tooltip", "p": {"html": "true"}}, {"type": "number", "name": "Memcpy"}, {"type": "string", "role": "tooltip", "p": {"html": "true"}}, {"type": "number", "name": "Memset"}, {"type": "string", "role": "tooltip", "p": {"html": "true"}}, {"type": "number", "name": "Runtime"}, {"type": "string", "role": "tooltip", "p": {"html": "true"}}, {"type": "number", "name": "DataLoader"}, {"type": "string", "role": "tooltip", "p": {"html": "true"}}, {"type": "number", "name": "CPU Exec"}, {"type": "string", "role": "tooltip", "p": {"html": "true"}}, {"type": "number", "name": "Other"}, {"type": "string", "role": "tooltip", "p": {"html": "true"}}], "rows": [["5", 99778, "
Step 5
Total: 182306us
Kernel: 99778us
Percentage: 54.73%
", 3606, "
Step 5
Total: 182306us
Memcpy: 3606us
Percentage: 1.98%
", 98, "
Step 5
Total: 182306us
Memset: 98us
Percentage: 0.05%
", 41028, "
Step 5
Total: 182306us
Runtime: 41028us
Percentage: 22.51%
", 4341, "
Step 5
Total: 182306us
DataLoader: 4341us
Percentage: 2.38%
", 27460, "
Step 5
Total: 182306us
CPU Exec: 27460us
Percentage: 15.06%
", 5995, "
Step 5
Total: 182306us
Other: 5995us
Percentage: 3.29%
"], ["6", 99208, "
Step 6
Total: 126183us
Kernel: 99208us
Percentage: 78.62%
", 2948, "
Step 6
Total: 126183us
Memcpy: 2948us
Percentage: 2.34%
", 98, "
Step 6
Total: 126183us
Memset: 98us
Percentage: 0.08%
", 3406, "
Step 6
Total: 126183us
Runtime: 3406us
Percentage: 2.7%
", 0, "
Step 6
Total: 126183us
DataLoader: 0us
Percentage: 0.0%
", 16404, "
Step 6
Total: 126183us
CPU Exec: 16404us
Percentage: 13.0%
", 4119, "
Step 6
Total: 126183us
Other: 4119us
Percentage: 3.26%
"], ["7", 99114, "
Step 7
Total: 127181us
Kernel: 99114us
Percentage: 77.93%
", 2949, "
Step 7
Total: 127181us
Memcpy: 2949us
Percentage: 2.32%
", 98, "
Step 7
Total: 127181us
Memset: 98us
Percentage: 0.08%
", 3417, "
Step 7
Total: 127181us
Runtime: 3417us
Percentage: 2.69%
", 6, "
Step 7
Total: 127181us
DataLoader: 6us
Percentage: 0.0%
", 19521, "
Step 7
Total: 127181us
CPU Exec: 19521us
Percentage: 15.35%
", 2076, "
Step 7
Total: 127181us
Other: 2076us
Percentage: 1.63%
"], ["8", 99021, "
Step 8
Total: 123079us
Kernel: 99021us
Percentage: 80.45%
", 2975, "
Step 8
Total: 123079us
Memcpy: 2975us
Percentage: 2.42%
", 97, "
Step 8
Total: 123079us
Memset: 97us
Percentage: 0.08%
", 3544, "
Step 8
Total: 123079us
Runtime: 3544us
Percentage: 2.88%
", 0, "
Step 8
Total: 123079us
DataLoader: 0us
Percentage: 0.0%
", 15464, "
Step 8
Total: 123079us
CPU Exec: 15464us
Percentage: 12.56%
", 1978, "
Step 8
Total: 123079us
Other: 1978us
Percentage: 1.61%
"], ["9", 98791, "
Step 9
Total: 163461us
Kernel: 98791us
Percentage: 60.44%
", 3596, "
Step 9
Total: 163461us
Memcpy: 3596us
Percentage: 2.2%
", 97, "
Step 9
Total: 163461us
Memset: 97us
Percentage: 0.06%
", 8275, "
Step 9
Total: 163461us
Runtime: 8275us
Percentage: 5.06%
", 1370, "
Step 9
Total: 163461us
DataLoader: 1370us
Percentage: 0.84%
", 43905, "
Step 9
Total: 163461us
CPU Exec: 43905us
Percentage: 26.86%
", 7427, "
Step 9
Total: 163461us
Other: 7427us
Percentage: 4.54%
"], ["10", 98956, "
Step 10
Total: 124198us
Kernel: 98956us
Percentage: 79.68%
", 2885, "
Step 10
Total: 124198us
Memcpy: 2885us
Percentage: 2.32%
", 98, "
Step 10
Total: 124198us
Memset: 98us
Percentage: 0.08%
", 3714, "
Step 10
Total: 124198us
Runtime: 3714us
Percentage: 2.99%
", 1400, "
Step 10
Total: 124198us
DataLoader: 1400us
Percentage: 1.13%
", 13235, "
Step 10
Total: 124198us
CPU Exec: 13235us
Percentage: 10.66%
", 3910, "
Step 10
Total: 124198us
Other: 3910us
Percentage: 3.15%
"]]}, "performance": [{"name": "Average Step Time", "description": "", "value": 141068, "extra": 100, "children": [{"name": "Kernel", "description": "", "value": 99145, "extra": 70.28}, {"name": "Memcpy", "description": "", "value": 3160, "extra": 2.24}, {"name": "Memset", "description": "", "value": 98, "extra": 0.07}, {"name": "Runtime", "description": "", "value": 10564, "extra": 7.49}, {"name": "DataLoader", "description": "", "value": 1186, "extra": 0.84}, {"name": "CPU Exec", "description": "", "value": 22665, "extra": 16.07}, {"name": "Other", "description": "", "value": 4251, "extra": 3.01}]}], "recommendations": "
  • N/A
", "environments": [{"title": "Number of Worker(s)", "value": "1"}, {"title": "Device Type", "value": "GPU"}], "gpu_metrics": {"title": "GPU Summary", "data": [{"title": "GPU 0:", "value": ""}, {"title": "Name", "value": "Tesla V100-DGXS-32GB"}, {"title": "Memory", "value": "31.74 GB"}, {"title": "Compute Capability", "value": "7.0"}, {"title": "GPU Utilization", "value": "70.27 %"}, {"title": "Est. SM Efficiency", "value": "69.22 %"}, {"title": "Est. Achieved Occupancy", "value": "48.91 %"}], "tooltip": "The GPU usage metrics:\n\nGPU Utilization:\nGPU busy time / All steps time. GPU busy time is the time during which there is at least one GPU kernel running on it. All steps time is the total time of all profiler steps(or called as iterations).\n\nEst. SM Efficiency:\nEstimated Stream Multiprocessor Efficiency. Est. SM Efficiency of a kernel, SM_Eff_K = min(blocks of this kernel / SM number of this GPU, 100%). This overall number is the sum of all kernels' SM_Eff_K weighted by kernel's execution duration, divided by all steps time.\n\nEst. Achieved Occupancy:\nOccupancy is the ratio of active threads on an SM to the maximum number of active threads supported by the SM. The theoretical occupancy of a kernel is upper limit occupancy of this kernel, limited by multiple factors such as kernel shape, kernel used resource, and the GPU compute capability.Est. Achieved Occupancy of a kernel, OCC_K = min(threads of the kernel / SM number / max threads per SM, theoretical occupancy of the kernel). This overall number is the weighted sum of all kernels OCC_K using kernel's execution duration as weight."}} +{"device_total_time": {"title": "Device Total Time (us)", "columns": [{"type": "string", "name": "name"}, {"type": "number", "name": "value"}], "rows": [["aten::cudnn_convolution_backward", 274794], ["CudnnConvolutionBackward", 274794], ["aten::cudnn_convolution_backward_weight", 141300], ["aten::cudnn_convolution_backward_input", 133494], ["aten::cudnn_convolution", 128683], ["aten::_convolution", 128683], ["aten::convolution", 128683], ["aten::conv2d", 128683], ["aten::cudnn_batch_norm_backward", 61899], ["CudnnBatchNormBackward", 61899], ["aten::cudnn_batch_norm", 34315], ["aten::_batch_norm_impl_index", 34315], ["aten::batch_norm", 34315], ["aten::threshold_backward", 27280], ["ReluBackward1", 27280], ["aten::add_", 24052], ["aten::to", 18959], ["aten::copy_", 18959], ["aten::clamp_min", 17862], ["aten::clamp_min_", 17862], ["aten::relu_", 17862], ["aten::add", 16026], ["aten::max_pool2d_with_indices_backward", 4695], ["MaxPool2DWithIndicesBackward", 4695], ["torch::autograd::AccumulateGrad", 3012], ["aten::mul_", 2395], ["aten::fill_", 1888], ["aten::zero_", 1882], ["aten::max_pool2d_with_indices", 1422], ["aten::max_pool2d", 1422], ["aten::mm", 274], ["AddmmBackward", 274], ["aten::mean", 210], ["aten::adaptive_avg_pool2d", 210], ["aten::addmm", 197], ["aten::linear", 197], ["aten::div", 145], ["MeanBackward1", 145], ["aten::cross_entropy_loss", 60], ["aten::_log_softmax_backward_data", 51], ["LogSoftmaxBackward", 51], ["aten::sum", 45], ["aten::_log_softmax", 42], ["aten::log_softmax", 42], ["aten::nll_loss_forward", 18], ["aten::nll_loss", 18], ["aten::nll_loss_nd", 18], ["aten::nll_loss_backward", 18], ["NllLossBackward", 18], ["aten::ones_like", 6]]}, "device_self_time": {"title": "Device Self Time (us)", "columns": [{"type": "string", "name": "name"}, {"type": "number", "name": "value"}], "rows": [["aten::cudnn_convolution_backward_weight", 141300], ["aten::cudnn_convolution_backward_input", 133494], ["aten::cudnn_convolution", 128683], ["aten::cudnn_batch_norm_backward", 61899], ["aten::cudnn_batch_norm", 34315], ["aten::threshold_backward", 27280], ["aten::add_", 24052], ["aten::copy_", 18959], ["aten::clamp_min", 17862], ["aten::add", 16026], ["aten::max_pool2d_with_indices_backward", 3838], ["aten::mul_", 2395], ["aten::fill_", 1888], ["aten::max_pool2d_with_indices", 1422], ["aten::mm", 274], ["aten::mean", 210], ["aten::addmm", 197], ["aten::div", 145], ["aten::_log_softmax_backward_data", 51], ["aten::sum", 45], ["aten::_log_softmax", 42], ["aten::nll_loss_forward", 18], ["aten::nll_loss_backward", 18]]}, "host_total_time": {"title": "Host Total Time (us)", "columns": [{"type": "string", "name": "name"}, {"type": "number", "name": "value"}], "rows": [["CudnnConvolutionBackward", 119890], ["aten::cudnn_convolution_backward", 115797], ["aten::batch_norm", 105589], ["aten::add_", 97540], ["aten::_batch_norm_impl_index", 95925], ["aten::conv2d", 91000], ["aten::cudnn_batch_norm", 87823], ["aten::empty", 82024], ["aten::convolution", 81781], ["aten::_convolution", 74086], ["aten::cudnn_convolution", 64167], ["aten::cudnn_convolution_backward_weight", 60712], ["aten::to", 57776], ["aten::copy_", 56915], ["aten::cudnn_convolution_backward_input", 47359], ["CudnnBatchNormBackward", 41825], ["torch::autograd::AccumulateGrad", 37189], ["aten::cudnn_batch_norm_backward", 36641], ["aten::mul_", 35389], ["aten::relu_", 29432], ["aten::zero_", 28309], ["aten::add", 23831], ["aten::clamp_min_", 19059], ["aten::empty_like", 18591], ["aten::fill_", 17657], ["aten::resize_", 15019], ["ReluBackward1", 14944], ["aten::clamp_min", 12503], ["aten::threshold_backward", 12062], ["aten::view", 9046], ["AddmmBackward", 2026], ["aten::linear", 1463], ["aten::mm", 1424], ["aten::zeros", 1319], ["aten::cross_entropy_loss", 1225], ["aten::addmm", 1060], ["NllLossBackward", 889], ["aten::nll_loss_backward", 747], ["aten::t", 725], ["MeanBackward1", 663], ["aten::max_pool2d", 599], ["MaxPool2DWithIndicesBackward", 590], ["aten::adaptive_avg_pool2d", 581], ["aten::log_softmax", 580], ["aten::nll_loss_nd", 507], ["LogSoftmaxBackward", 500], ["aten::max_pool2d_with_indices_backward", 493], ["aten::ones_like", 470], ["aten::div", 469], ["aten::mean", 454], ["aten::empty_strided", 453], ["aten::_log_softmax_backward_data", 424], ["aten::max_pool2d_with_indices", 422], ["aten::_log_softmax", 420], ["aten::nll_loss", 418], ["aten::transpose", 413], ["aten::sum", 411], ["aten::nll_loss_forward", 343], ["aten::detach_", 323], ["aten::as_strided", 244], ["aten::expand", 237], ["aten::set_", 221], ["AddBackward0", 200], ["aten::flatten", 163], ["detach_", 156], ["TBackward", 151], ["ViewBackward", 132], ["aten::reshape", 88], ["aten::conj", 15]]}, "host_self_time": {"title": "Host Self Time (us)", "columns": [{"type": "string", "name": "name"}, {"type": "number", "name": "value"}], "rows": [["aten::empty", 82024], ["aten::add_", 62385], ["aten::cudnn_convolution", 35632], ["aten::cudnn_convolution_backward_input", 31902], ["aten::cudnn_convolution_backward_weight", 30672], ["aten::mul_", 24617], ["aten::cudnn_batch_norm", 23800], ["aten::add", 17808], ["aten::cudnn_batch_norm_backward", 15118], ["aten::resize_", 15019], ["aten::zero_", 10815], ["aten::relu_", 10373], ["aten::_convolution", 9919], ["aten::batch_norm", 9664], ["aten::fill_", 9660], ["aten::conv2d", 9219], ["aten::view", 9046], ["aten::clamp_min", 8409], ["aten::empty_like", 8385], ["aten::_batch_norm_impl_index", 8102], ["aten::threshold_backward", 7820], ["aten::cudnn_convolution_backward", 7726], ["aten::convolution", 7695], ["torch::autograd::AccumulateGrad", 7181], ["aten::clamp_min_", 6556], ["CudnnBatchNormBackward", 5184], ["CudnnConvolutionBackward", 4093], ["ReluBackward1", 2882], ["aten::mm", 1032], ["aten::zeros", 877], ["aten::addmm", 652], ["aten::to", 547], ["aten::nll_loss_backward", 463], ["aten::empty_strided", 453], ["aten::div", 343], ["aten::max_pool2d_with_indices", 325], ["aten::t", 312], ["aten::nll_loss_forward", 264], ["aten::transpose", 254], ["aten::as_strided", 244], ["AddmmBackward", 244], ["aten::mean", 233], ["aten::copy_", 230], ["aten::set_", 221], ["aten::max_pool2d_with_indices_backward", 213], ["aten::sum", 201], ["AddBackward0", 200], ["aten::max_pool2d", 177], ["aten::_log_softmax", 168], ["aten::detach_", 167], ["detach_", 156], ["aten::expand", 152], ["NllLossBackward", 142], ["aten::_log_softmax_backward_data", 142], ["aten::linear", 139], ["aten::cross_entropy_loss", 138], ["aten::adaptive_avg_pool2d", 127], ["aten::log_softmax", 106], ["MaxPool2DWithIndicesBackward", 97], ["aten::ones_like", 96], ["MeanBackward1", 95], ["aten::nll_loss_nd", 89], ["aten::flatten", 88], ["LogSoftmaxBackward", 76], ["aten::nll_loss", 75], ["ViewBackward", 44], ["aten::reshape", 43], ["TBackward", 33], ["aten::conj", 15]]}} +[{"name": "aten::cudnn_convolution_backward_weight", "calls": 318, "device_self_duration": 141300, "device_total_duration": 141300, "host_self_duration": 30672, "host_total_duration": 60712, "has_call_stack": false}, {"name": "aten::cudnn_convolution_backward_input", "calls": 312, "device_self_duration": 133494, "device_total_duration": 133494, "host_self_duration": 31902, "host_total_duration": 47359, "has_call_stack": false}, {"name": "aten::cudnn_convolution", "calls": 318, "device_self_duration": 128683, "device_total_duration": 128683, "host_self_duration": 35632, "host_total_duration": 64167, "has_call_stack": true}, {"name": "aten::cudnn_batch_norm_backward", "calls": 318, "device_self_duration": 61899, "device_total_duration": 61899, "host_self_duration": 15118, "host_total_duration": 36641, "has_call_stack": false}, {"name": "aten::cudnn_batch_norm", "calls": 318, "device_self_duration": 34315, "device_total_duration": 34315, "host_self_duration": 23800, "host_total_duration": 87823, "has_call_stack": true}, {"name": "aten::threshold_backward", "calls": 294, "device_self_duration": 27280, "device_total_duration": 27280, "host_self_duration": 7820, "host_total_duration": 12062, "has_call_stack": false}, {"name": "aten::add_", "calls": 2994, "device_self_duration": 24052, "device_total_duration": 24052, "host_self_duration": 62385, "host_total_duration": 97540, "has_call_stack": true}, {"name": "aten::copy_", "calls": 12, "device_self_duration": 18959, "device_total_duration": 18959, "host_self_duration": 230, "host_total_duration": 56915, "has_call_stack": true}, {"name": "aten::clamp_min", "calls": 294, "device_self_duration": 17862, "device_total_duration": 17862, "host_self_duration": 8409, "host_total_duration": 12503, "has_call_stack": true}, {"name": "aten::add", "calls": 414, "device_self_duration": 16026, "device_total_duration": 16026, "host_self_duration": 17808, "host_total_duration": 23831, "has_call_stack": true}, {"name": "aten::max_pool2d_with_indices_backward", "calls": 6, "device_self_duration": 3838, "device_total_duration": 4695, "host_self_duration": 213, "host_total_duration": 493, "has_call_stack": false}, {"name": "aten::mul_", "calls": 966, "device_self_duration": 2395, "device_total_duration": 2395, "host_self_duration": 24617, "host_total_duration": 35389, "has_call_stack": true}, {"name": "aten::fill_", "calls": 978, "device_self_duration": 1888, "device_total_duration": 1888, "host_self_duration": 9660, "host_total_duration": 17657, "has_call_stack": true}, {"name": "aten::max_pool2d_with_indices", "calls": 6, "device_self_duration": 1422, "device_total_duration": 1422, "host_self_duration": 325, "host_total_duration": 422, "has_call_stack": true}, {"name": "aten::mm", "calls": 12, "device_self_duration": 274, "device_total_duration": 274, "host_self_duration": 1032, "host_total_duration": 1424, "has_call_stack": false}, {"name": "aten::mean", "calls": 6, "device_self_duration": 210, "device_total_duration": 210, "host_self_duration": 233, "host_total_duration": 454, "has_call_stack": true}, {"name": "aten::addmm", "calls": 6, "device_self_duration": 197, "device_total_duration": 197, "host_self_duration": 652, "host_total_duration": 1060, "has_call_stack": true}, {"name": "aten::div", "calls": 6, "device_self_duration": 145, "device_total_duration": 145, "host_self_duration": 343, "host_total_duration": 469, "has_call_stack": false}, {"name": "aten::_log_softmax_backward_data", "calls": 6, "device_self_duration": 51, "device_total_duration": 51, "host_self_duration": 142, "host_total_duration": 424, "has_call_stack": false}, {"name": "aten::sum", "calls": 6, "device_self_duration": 45, "device_total_duration": 45, "host_self_duration": 201, "host_total_duration": 411, "has_call_stack": false}, {"name": "aten::_log_softmax", "calls": 6, "device_self_duration": 42, "device_total_duration": 42, "host_self_duration": 168, "host_total_duration": 420, "has_call_stack": true}, {"name": "aten::nll_loss_forward", "calls": 6, "device_self_duration": 18, "device_total_duration": 18, "host_self_duration": 264, "host_total_duration": 343, "has_call_stack": true}, {"name": "aten::nll_loss_backward", "calls": 6, "device_self_duration": 18, "device_total_duration": 18, "host_self_duration": 463, "host_total_duration": 747, "has_call_stack": false}, {"name": "aten::empty", "calls": 4212, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 82024, "host_total_duration": 82024, "has_call_stack": true}, {"name": "aten::zero_", "calls": 996, "device_self_duration": 0, "device_total_duration": 1882, "host_self_duration": 10815, "host_total_duration": 28309, "has_call_stack": true}, {"name": "aten::zeros", "calls": 24, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 877, "host_total_duration": 1319, "has_call_stack": true}, {"name": "aten::to", "calls": 36, "device_self_duration": 0, "device_total_duration": 18959, "host_self_duration": 547, "host_total_duration": 57776, "has_call_stack": true}, {"name": "detach_", "calls": 12, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 156, "host_total_duration": 156, "has_call_stack": true}, {"name": "aten::detach_", "calls": 12, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 167, "host_total_duration": 323, "has_call_stack": true}, {"name": "aten::set_", "calls": 12, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 221, "host_total_duration": 221, "has_call_stack": true}, {"name": "aten::empty_strided", "calls": 18, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 453, "host_total_duration": 453, "has_call_stack": true}, {"name": "aten::resize_", "calls": 1896, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 15019, "host_total_duration": 15019, "has_call_stack": true}, {"name": "aten::_convolution", "calls": 318, "device_self_duration": 0, "device_total_duration": 128683, "host_self_duration": 9919, "host_total_duration": 74086, "has_call_stack": true}, {"name": "aten::convolution", "calls": 318, "device_self_duration": 0, "device_total_duration": 128683, "host_self_duration": 7695, "host_total_duration": 81781, "has_call_stack": true}, {"name": "aten::conv2d", "calls": 318, "device_self_duration": 0, "device_total_duration": 128683, "host_self_duration": 9219, "host_total_duration": 91000, "has_call_stack": true}, {"name": "aten::empty_like", "calls": 336, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 8385, "host_total_duration": 18591, "has_call_stack": true}, {"name": "aten::view", "calls": 654, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 9046, "host_total_duration": 9046, "has_call_stack": true}, {"name": "aten::_batch_norm_impl_index", "calls": 318, "device_self_duration": 0, "device_total_duration": 34315, "host_self_duration": 8102, "host_total_duration": 95925, "has_call_stack": true}, {"name": "aten::batch_norm", "calls": 318, "device_self_duration": 0, "device_total_duration": 34315, "host_self_duration": 9664, "host_total_duration": 105589, "has_call_stack": true}, {"name": "aten::clamp_min_", "calls": 294, "device_self_duration": 0, "device_total_duration": 17862, "host_self_duration": 6556, "host_total_duration": 19059, "has_call_stack": true}, {"name": "aten::relu_", "calls": 294, "device_self_duration": 0, "device_total_duration": 17862, "host_self_duration": 10373, "host_total_duration": 29432, "has_call_stack": true}, {"name": "aten::max_pool2d", "calls": 6, "device_self_duration": 0, "device_total_duration": 1422, "host_self_duration": 177, "host_total_duration": 599, "has_call_stack": true}, {"name": "aten::adaptive_avg_pool2d", "calls": 6, "device_self_duration": 0, "device_total_duration": 210, "host_self_duration": 127, "host_total_duration": 581, "has_call_stack": true}, {"name": "aten::flatten", "calls": 6, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 88, "host_total_duration": 163, "has_call_stack": true}, {"name": "aten::as_strided", "calls": 42, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 244, "host_total_duration": 244, "has_call_stack": true}, {"name": "aten::transpose", "calls": 30, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 254, "host_total_duration": 413, "has_call_stack": true}, {"name": "aten::t", "calls": 30, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 312, "host_total_duration": 725, "has_call_stack": true}, {"name": "aten::expand", "calls": 12, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 152, "host_total_duration": 237, "has_call_stack": true}, {"name": "aten::linear", "calls": 6, "device_self_duration": 0, "device_total_duration": 197, "host_self_duration": 139, "host_total_duration": 1463, "has_call_stack": true}, {"name": "aten::log_softmax", "calls": 6, "device_self_duration": 0, "device_total_duration": 42, "host_self_duration": 106, "host_total_duration": 580, "has_call_stack": true}, {"name": "aten::nll_loss", "calls": 6, "device_self_duration": 0, "device_total_duration": 18, "host_self_duration": 75, "host_total_duration": 418, "has_call_stack": true}, {"name": "aten::nll_loss_nd", "calls": 6, "device_self_duration": 0, "device_total_duration": 18, "host_self_duration": 89, "host_total_duration": 507, "has_call_stack": true}, {"name": "aten::cross_entropy_loss", "calls": 6, "device_self_duration": 0, "device_total_duration": 60, "host_self_duration": 138, "host_total_duration": 1225, "has_call_stack": true}, {"name": "aten::ones_like", "calls": 6, "device_self_duration": 0, "device_total_duration": 6, "host_self_duration": 96, "host_total_duration": 470, "has_call_stack": true}, {"name": "NllLossBackward", "calls": 6, "device_self_duration": 0, "device_total_duration": 18, "host_self_duration": 142, "host_total_duration": 889, "has_call_stack": false}, {"name": "LogSoftmaxBackward", "calls": 6, "device_self_duration": 0, "device_total_duration": 51, "host_self_duration": 76, "host_total_duration": 500, "has_call_stack": false}, {"name": "aten::conj", "calls": 12, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 15, "host_total_duration": 15, "has_call_stack": false}, {"name": "AddmmBackward", "calls": 6, "device_self_duration": 0, "device_total_duration": 274, "host_self_duration": 244, "host_total_duration": 2026, "has_call_stack": false}, {"name": "torch::autograd::AccumulateGrad", "calls": 966, "device_self_duration": 0, "device_total_duration": 3012, "host_self_duration": 7181, "host_total_duration": 37189, "has_call_stack": false}, {"name": "TBackward", "calls": 6, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 33, "host_total_duration": 151, "has_call_stack": false}, {"name": "aten::reshape", "calls": 6, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 43, "host_total_duration": 88, "has_call_stack": false}, {"name": "ViewBackward", "calls": 6, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 44, "host_total_duration": 132, "has_call_stack": false}, {"name": "MeanBackward1", "calls": 6, "device_self_duration": 0, "device_total_duration": 145, "host_self_duration": 95, "host_total_duration": 663, "has_call_stack": false}, {"name": "ReluBackward1", "calls": 294, "device_self_duration": 0, "device_total_duration": 27280, "host_self_duration": 2882, "host_total_duration": 14944, "has_call_stack": false}, {"name": "AddBackward0", "calls": 96, "device_self_duration": 0, "device_total_duration": 0, "host_self_duration": 200, "host_total_duration": 200, "has_call_stack": false}, {"name": "CudnnBatchNormBackward", "calls": 318, "device_self_duration": 0, "device_total_duration": 61899, "host_self_duration": 5184, "host_total_duration": 41825, "has_call_stack": false}, {"name": "aten::cudnn_convolution_backward", "calls": 318, "device_self_duration": 0, "device_total_duration": 274794, "host_self_duration": 7726, "host_total_duration": 115797, "has_call_stack": false}, {"name": "CudnnConvolutionBackward", "calls": 318, "device_self_duration": 0, "device_total_duration": 274794, "host_self_duration": 4093, "host_total_duration": 119890, "has_call_stack": false}, {"name": "MaxPool2DWithIndicesBackward", "calls": 6, "device_self_duration": 0, "device_total_duration": 4695, "host_self_duration": 97, "host_total_duration": 590, "has_call_stack": false}] +{"data": {"columns": [{"type": "string", "name": "Name"}, {"type": "number", "name": "Calls"}, {"type": "number", "name": "Total Duration (us)"}, {"type": "number", "name": "Mean Duration (us)"}, {"type": "number", "name": "Max Duration (us)"}, {"type": "number", "name": "Min Duration (us)"}, {"type": "number", "name": "Mean Blocks Per SM", "tooltip": "Blocks Per SM:\nmin(blocks of this kernel / SM number of this GPU). If this number is less than 1, it indicates the GPU multiprocessors are not fully utilized."}, {"type": "number", "name": "Mean Est. Achieved Occupancy (%)", "tooltip": "Est. Achieved Occupancy:\nOccupancy is the ratio of active threads on an SM to the maximum number of active threads supported by the SM. The theoretical occupancy of a kernel is upper limit occupancy of this kernel, limited by multiple factors such as kernel shape, kernel used resource, and the GPU compute capability.Est. Achieved Occupancy of a kernel, OCC_K = min(threads of the kernel / SM number / max threads per SM, theoretical occupancy of the kernel). This overall number is the weighted sum of all kernels OCC_K using kernel's execution duration as weight."}], "rows": [["cask_cudnn::computeBOffsetsKernel(cask_cudnn::ComputeBOffsetsParams)", 54, 57, 1, 2, 1, 0.02, 0.0], ["cask_cudnn::computeOffsetsKernel(cask_cudnn::ComputeOffsetsParams)", 108, 216, 2, 5, 1, 0.16, 2.0], ["cask_cudnn::computeWgradBOffsetsKernel(cask_cudnn::ComputeWgradBOffsetsParams)", 132, 150, 1, 2, 1, 0.02, 0.0], ["cask_cudnn::computeWgradSplitKOffsetsKernel(cask_cudnn::ComputeSplitKOffsetsParams)", 132, 155, 1, 2, 1, 0.16, 1.83], ["void (anonymous namespace)::softmax_warp_backward(float*, float const*, float const*, int, int, int)", 6, 51, 8, 9, 8, 0.1, 1.0], ["void (anonymous namespace)::softmax_warp_forward(float*, float const*, int, int, int)", 6, 42, 7, 7, 7, 0.1, 1.0], ["void at::native::(anonymous namespace)::max_pool_backward_nchw(int, float const*, long const*, int, int, int, int, int, int, int, int, int, int, int, int, int, int, float*)", 6, 3838, 640, 643, 637, 1254.4, 100.0], ["void at::native::(anonymous namespace)::max_pool_forward_nchw(int, float const*, int, int, int, int, int, int, int, int, int, int, int, int, int, int, float*, long*)", 6, 1422, 237, 243, 234, 313.6, 100.0], ["void at::native::reduce_kernel<128, 4, at::native::ReduceOp::operator()(at::TensorIterator&)::{lambda(float, float)#1}>, unsigned int, float, 4> >(at::native::ReduceOp::operator()(at::TensorIterator&)::{lambda(float, float)#1}>, unsigned int, float, 4>)", 6, 45, 8, 8, 7, 0.02, 0.0], ["void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, float, 4> >(at::native::ReduceOp, unsigned int, float, 4>)", 6, 210, 35, 35, 35, 51.2, 100.0], ["void at::native::unrolled_elementwise_kernel, at::detail::Array, OffsetCalculator<1, unsigned int>, OffsetCalculator<1, unsigned int>, at::native::memory::LoadWithoutCast, at::native::memory::StoreWithoutCast>(int, at::native::MulScalarFunctor, at::detail::Array, OffsetCalculator<1, unsigned int>, OffsetCalculator<1, unsigned int>, at::native::memory::LoadWithoutCast, at::native::memory::StoreWithoutCast)", 6, 145, 24, 25, 24, 156.8, 100.0], ["void at::native::unrolled_elementwise_kernel, OffsetCalculator<1, unsigned int>, char*, at::native::memory::LoadWithoutCast, at::detail::Array::StoreWithoutCast>(int, at::native::copy_device_to_device(at::TensorIterator&, bool)::{lambda()#2}::operator()() const::{lambda()#8}::operator()() const::{lambda(float)#1}, at::detail::Array, OffsetCalculator<1, unsigned int>, char*, at::native::memory::LoadWithoutCast, at::detail::Array::StoreWithoutCast)", 6, 30, 5, 5, 5, 1.56, 5.0], ["void at::native::vectorized_elementwise_kernel<4, at::native::(anonymous namespace)::clamp_min_scalar_kernel_impl(at::TensorIterator&, c10::Scalar)::{lambda()#1}::operator()() const::{lambda()#8}::operator()() const::{lambda(float)#1}, at::detail::Array >(int, at::native::(anonymous namespace)::clamp_min_scalar_kernel_impl(at::TensorIterator&, c10::Scalar)::{lambda()#1}::operator()() const::{lambda()#8}::operator()() const::{lambda(float)#1}, at::detail::Array)", 294, 17862, 61, 252, 5, 666.77, 100.0], ["void at::native::vectorized_elementwise_kernel<4, at::native::AddFunctor, at::detail::Array >(int, at::native::AddFunctor, at::detail::Array)", 3090, 39753, 13, 376, 1, 641.51, 92.35], ["void at::native::vectorized_elementwise_kernel<4, at::native::BUnaryFunctor >, at::detail::Array >(int, at::native::BUnaryFunctor >, at::detail::Array)", 318, 325, 1, 2, 1, 0.01, 0.0], ["void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", 978, 1888, 2, 143, 0, 600.2, 86.95], ["void at::native::vectorized_elementwise_kernel<4, at::native::MulScalarFunctor, at::detail::Array >(int, at::native::MulScalarFunctor, at::detail::Array)", 966, 2395, 2, 25, 1, 44.01, 58.56], ["void at::native::vectorized_elementwise_kernel<4, at::native::threshold_kernel_impl(at::TensorIteratorBase&, float, float)::{lambda(float, float)#1}, at::detail::Array >(int, at::native::threshold_kernel_impl(at::TensorIteratorBase&, float, float)::{lambda(float, float)#1}, at::detail::Array)", 294, 27280, 93, 377, 13, 653.26, 100.0], ["void cudnn::bn_bw_1C11_kernel_new(float, float, float, float, cudnnTensorStruct, float const*, cudnnTensorStruct, float const*, cudnnTensorStruct, float*, float const*, float*, float*, float const*, float const*, float)", 264, 59568, 226, 923, 45, 4.33, 67.92], ["void cudnn::bn_bw_1C11_singleread(float, float, float, float, cudnnTensorStruct, float const*, cudnnTensorStruct, float const*, cudnnTensorStruct, float*, float const*, float*, float*, float const*, float const*, float, cudnn::reduced_divisor, int, cudnn::reduced_divisor, cudnn::bnBwPersistentState*, int, float, float, float, int, float, cudnnStatus_t*, bool)", 54, 2331, 43, 75, 19, 20.83, 75.0], ["void cudnn::bn_fw_tr_1C11_kernel_NCHW(cudnnTensorStruct, float const*, cudnnTensorStruct, float*, float const*, float const*, float, float, float*, float*, float*, float*, float, float)", 150, 27084, 181, 454, 53, 3.12, 64.02], ["void cudnn::bn_fw_tr_1C11_singleread(cudnnTensorStruct, float const*, cudnnTensorStruct, float*, float const*, float const*, float, float, float*, float*, float*, float*, float, float, cudnn::reduced_divisor, int, cudnn::reduced_divisor, cudnn::bnFwPersistentState*, int, float, float, float, int, float, float, cudnnStatus_t*, bool)", 168, 7231, 43, 89, 11, 12.63, 75.0], ["void cudnn::cnn::wgrad_alg0_engine(int, int, int, float const*, int, float*, float const*, kernel_grad_params, unsigned long long, int, float, int, int, int, int)", 12, 7068, 589, 990, 192, 85.38, 37.51], ["void cudnn::cnn::wgrad_alg0_engine(int, int, int, float const*, int, float*, float const*, kernel_grad_params, unsigned long long, int, float, int, int, int, int)", 90, 43471, 483, 742, 363, 8.18, 38.0], ["void cudnn::cnn::wgrad_alg0_engine(int, int, int, float const*, int, float*, float const*, kernel_grad_params, unsigned long long, int, float, int, int, int, int)", 6, 4038, 673, 691, 649, 6.4, 25.0], ["void cudnn::detail::dgrad_engine(int, int, int, float const*, int, float const*, int, float*, kernel_grad_params, unsigned long long, int, unsigned long long, int, float, int, int, int)", 180, 86855, 483, 1023, 323, 45.33, 30.04], ["void cudnn::ops::scalePackedTensor_kernel(cudnnTensor4dStruct, float*, float)", 180, 5901, 33, 142, 5, 525.02, 100.0], ["void cudnn::winograd::generateWinogradTilesKernel<0, float, float>(cudnn::winograd::GenerateWinogradTilesParams)", 36, 126, 4, 5, 2, 0.4, 3.0], ["void cudnn::winograd_nonfused::winogradForwardData4x4(cudnn::winograd_nonfused::WinogradDataParams)", 120, 4648, 39, 67, 17, 10.15, 50.0], ["void cudnn::winograd_nonfused::winogradForwardFilter4x4(cudnn::winograd_nonfused::WinogradFilterParams)", 120, 2632, 22, 67, 4, 8.75, 73.78], ["void cudnn::winograd_nonfused::winogradForwardOutput4x4(cudnn::winograd_nonfused::WinogradOutputParams)", 120, 5314, 44, 72, 20, 10.02, 50.0], ["void cudnn::winograd_nonfused::winogradWgradData4x4(cudnn::winograd_nonfused::WinogradDataParams)", 78, 4681, 60, 126, 20, 15.46, 38.0], ["void cudnn::winograd_nonfused::winogradWgradDelta4x4(cudnn::winograd_nonfused::WinogradDeltaParams)", 78, 4559, 58, 126, 17, 15.71, 50.0], ["void cudnn::winograd_nonfused::winogradWgradOutput4x4(cudnn::winograd_nonfused::WinogradWgradOutputParams)", 78, 1484, 19, 69, 3, 8.13, 41.71], ["void cunn_ClassNLLCriterion_updateGradInput_kernel(float*, float*, long*, float*, float*, int, int, int, int, long)", 6, 12, 2, 2, 2, 0.01, 0.0], ["void cunn_ClassNLLCriterion_updateOutput_kernel(float*, float*, float*, long*, float*, int, int, int, int, long)", 6, 18, 3, 3, 3, 0.01, 0.0], ["void implicit_convolve_sgemm(int, int, int, float const*, int, float*, float const*, kernel_conv_params, unsigned long long, int, float, float, int, float const*, float const*, bool, int, int)", 90, 37016, 411, 735, 346, 12.39, 50.0], ["void implicit_convolve_sgemm(int, int, int, float const*, int, float*, float const*, kernel_conv_params, unsigned long long, int, float, float, int, float const*, float const*, bool, int, int)", 12, 5221, 435, 440, 431, 9.8, 31.0], ["void implicit_convolve_sgemm(int, int, int, float const*, int, float*, float const*, kernel_conv_params, unsigned long long, int, float, float, int, float const*, float const*, bool, int, int)", 72, 35106, 488, 822, 350, 3.83, 41.64], ["void implicit_convolve_sgemm(int, int, int, float const*, int, float*, float const*, kernel_conv_params, unsigned long long, int, float, float, int, float const*, float const*, bool, int, int)", 12, 7939, 662, 733, 584, 7.54, 25.0], ["void nchwToNhwcKernel(int, int, int, int, float const*, float*, float, float)", 12, 383, 32, 34, 29, 71.72, 100.0], ["void nhwcToNchwKernel(int, int, int, int, float const*, float*, float, float)", 6, 54, 9, 10, 8, 12.8, 100.0], ["void splitKreduce_kernel(cublasSplitKParams, float const*, float const*, float*, float const*, float const*, float const*)", 12, 31, 3, 4, 2, 4.39, 27.74], ["volta_scudnn_128x128_stridedB_medium_nn_v1", 12, 3550, 296, 309, 286, 19.6, 25.0], ["volta_scudnn_128x128_stridedB_small_nn_v1", 6, 3034, 506, 520, 491, 19.6, 25.0], ["volta_scudnn_128x128_stridedB_splitK_medium_nn_v1", 72, 25342, 352, 629, 323, 3.21, 25.0], ["volta_scudnn_128x128_stridedB_splitK_small_nn_v1", 48, 20473, 427, 681, 309, 6.82, 25.0], ["volta_scudnn_128x32_sliced1x4_ldg4_relu_exp_small_nhwc_tn_v1", 6, 3697, 616, 621, 614, 2.6, 25.0], ["volta_scudnn_128x64_relu_interior_nn_v1", 30, 7976, 266, 316, 92, 37.08, 25.0], ["volta_scudnn_128x64_relu_medium_nn_v1", 6, 3647, 608, 620, 602, 39.2, 25.0], ["volta_scudnn_128x64_relu_small_nn_v1", 12, 3273, 273, 286, 258, 9.8, 25.0], ["volta_scudnn_128x64_stridedB_interior_nn_v1", 30, 9559, 319, 508, 255, 12.91, 19.0], ["volta_scudnn_128x64_stridedB_small_nn_v1", 6, 582, 97, 99, 94, 9.8, 19.0], ["volta_scudnn_128x64_stridedB_splitK_xregs_large_nn_v1", 12, 7819, 652, 670, 634, 15.96, 19.0], ["volta_scudnn_winograd_128x128_ldg1_ldg4_relu_tile148t_nt_v1", 36, 12761, 354, 365, 344, 22.4, 25.0], ["volta_sgemm_128x32_nt", 24, 8658, 361, 479, 18, 0.97, 11.51], ["volta_sgemm_32x128_nn", 18, 3059, 170, 173, 167, 22.05, 50.0], ["volta_sgemm_32x128_nt", 18, 2837, 158, 159, 156, 22.05, 50.0], ["volta_sgemm_64x32_sliced1x4_nn", 6, 149, 25, 25, 24, 2.0, 25.0], ["volta_sgemm_64x32_sliced1x4_tn", 6, 148, 25, 25, 24, 1.0, 13.0], ["volta_sgemm_64x64_nn", 42, 8544, 203, 210, 197, 12.35, 24.14], ["volta_sgemm_64x64_nt", 102, 21125, 207, 281, 184, 10.28, 19.38]]}} +{"total": {"columns": [{"type": "string", "name": "name"}, {"type": "number", "name": "value"}], "rows": [["cask_cudnn::computeBOffsetsKernel(cask_cudnn::ComputeBOffsetsParams)", 57.0], ["cask_cudnn::computeOffsetsKernel(cask_cudnn::ComputeOffsetsParams)", 216.0], ["cask_cudnn::computeWgradBOffsetsKernel(cask_cudnn::ComputeWgradBOffsetsParams)", 150.0], ["cask_cudnn::computeWgradSplitKOffsetsKernel(cask_cudnn::ComputeSplitKOffsetsParams)", 155.0], ["void (anonymous namespace)::softmax_warp_backward(float*, float const*, float const*, int, int, int)", 51.0], ["void (anonymous namespace)::softmax_warp_forward(float*, float const*, int, int, int)", 42.0], ["void at::native::(anonymous namespace)::max_pool_backward_nchw(int, float const*, long const*, int, int, int, int, int, int, int, int, int, int, int, int, int, int, float*)", 3838.0], ["void at::native::(anonymous namespace)::max_pool_forward_nchw(int, float const*, int, int, int, int, int, int, int, int, int, int, int, int, int, int, float*, long*)", 1422.0], ["void at::native::reduce_kernel<128, 4, at::native::ReduceOp::operator()(at::TensorIterator&)::{lambda(float, float)#1}>, unsigned int, float, 4> >(at::native::ReduceOp::operator()(at::TensorIterator&)::{lambda(float, float)#1}>, unsigned int, float, 4>)", 45.0], ["void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, float, 4> >(at::native::ReduceOp, unsigned int, float, 4>)", 210.0], ["void at::native::unrolled_elementwise_kernel, at::detail::Array, OffsetCalculator<1, unsigned int>, OffsetCalculator<1, unsigned int>, at::native::memory::LoadWithoutCast, at::native::memory::StoreWithoutCast>(int, at::native::MulScalarFunctor, at::detail::Array, OffsetCalculator<1, unsigned int>, OffsetCalculator<1, unsigned int>, at::native::memory::LoadWithoutCast, at::native::memory::StoreWithoutCast)", 145.0], ["void at::native::unrolled_elementwise_kernel, OffsetCalculator<1, unsigned int>, char*, at::native::memory::LoadWithoutCast, at::detail::Array::StoreWithoutCast>(int, at::native::copy_device_to_device(at::TensorIterator&, bool)::{lambda()#2}::operator()() const::{lambda()#8}::operator()() const::{lambda(float)#1}, at::detail::Array, OffsetCalculator<1, unsigned int>, char*, at::native::memory::LoadWithoutCast, at::detail::Array::StoreWithoutCast)", 30.0], ["void at::native::vectorized_elementwise_kernel<4, at::native::(anonymous namespace)::clamp_min_scalar_kernel_impl(at::TensorIterator&, c10::Scalar)::{lambda()#1}::operator()() const::{lambda()#8}::operator()() const::{lambda(float)#1}, at::detail::Array >(int, at::native::(anonymous namespace)::clamp_min_scalar_kernel_impl(at::TensorIterator&, c10::Scalar)::{lambda()#1}::operator()() const::{lambda()#8}::operator()() const::{lambda(float)#1}, at::detail::Array)", 17862.0], ["void at::native::vectorized_elementwise_kernel<4, at::native::AddFunctor, at::detail::Array >(int, at::native::AddFunctor, at::detail::Array)", 39753.0], ["void at::native::vectorized_elementwise_kernel<4, at::native::BUnaryFunctor >, at::detail::Array >(int, at::native::BUnaryFunctor >, at::detail::Array)", 325.0], ["void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", 1888.0], ["void at::native::vectorized_elementwise_kernel<4, at::native::MulScalarFunctor, at::detail::Array >(int, at::native::MulScalarFunctor, at::detail::Array)", 2395.0], ["void at::native::vectorized_elementwise_kernel<4, at::native::threshold_kernel_impl(at::TensorIteratorBase&, float, float)::{lambda(float, float)#1}, at::detail::Array >(int, at::native::threshold_kernel_impl(at::TensorIteratorBase&, float, float)::{lambda(float, float)#1}, at::detail::Array)", 27280.0], ["void cudnn::bn_bw_1C11_kernel_new(float, float, float, float, cudnnTensorStruct, float const*, cudnnTensorStruct, float const*, cudnnTensorStruct, float*, float const*, float*, float*, float const*, float const*, float)", 59568.0], ["void cudnn::bn_bw_1C11_singleread(float, float, float, float, cudnnTensorStruct, float const*, cudnnTensorStruct, float const*, cudnnTensorStruct, float*, float const*, float*, float*, float const*, float const*, float, cudnn::reduced_divisor, int, cudnn::reduced_divisor, cudnn::bnBwPersistentState*, int, float, float, float, int, float, cudnnStatus_t*, bool)", 2331.0], ["void cudnn::bn_fw_tr_1C11_kernel_NCHW(cudnnTensorStruct, float const*, cudnnTensorStruct, float*, float const*, float const*, float, float, float*, float*, float*, float*, float, float)", 27084.0], ["void cudnn::bn_fw_tr_1C11_singleread(cudnnTensorStruct, float const*, cudnnTensorStruct, float*, float const*, float const*, float, float, float*, float*, float*, float*, float, float, cudnn::reduced_divisor, int, cudnn::reduced_divisor, cudnn::bnFwPersistentState*, int, float, float, float, int, float, float, cudnnStatus_t*, bool)", 7231.0], ["void cudnn::cnn::wgrad_alg0_engine(int, int, int, float const*, int, float*, float const*, kernel_grad_params, unsigned long long, int, float, int, int, int, int)", 7068.0], ["void cudnn::cnn::wgrad_alg0_engine(int, int, int, float const*, int, float*, float const*, kernel_grad_params, unsigned long long, int, float, int, int, int, int)", 43471.0], ["void cudnn::cnn::wgrad_alg0_engine(int, int, int, float const*, int, float*, float const*, kernel_grad_params, unsigned long long, int, float, int, int, int, int)", 4038.0], ["void cudnn::detail::dgrad_engine(int, int, int, float const*, int, float const*, int, float*, kernel_grad_params, unsigned long long, int, unsigned long long, int, float, int, int, int)", 86855.0], ["void cudnn::ops::scalePackedTensor_kernel(cudnnTensor4dStruct, float*, float)", 5901.0], ["void cudnn::winograd::generateWinogradTilesKernel<0, float, float>(cudnn::winograd::GenerateWinogradTilesParams)", 126.0], ["void cudnn::winograd_nonfused::winogradForwardData4x4(cudnn::winograd_nonfused::WinogradDataParams)", 4648.0], ["void cudnn::winograd_nonfused::winogradForwardFilter4x4(cudnn::winograd_nonfused::WinogradFilterParams)", 2632.0], ["void cudnn::winograd_nonfused::winogradForwardOutput4x4(cudnn::winograd_nonfused::WinogradOutputParams)", 5314.0], ["void cudnn::winograd_nonfused::winogradWgradData4x4(cudnn::winograd_nonfused::WinogradDataParams)", 4681.0], ["void cudnn::winograd_nonfused::winogradWgradDelta4x4(cudnn::winograd_nonfused::WinogradDeltaParams)", 4559.0], ["void cudnn::winograd_nonfused::winogradWgradOutput4x4(cudnn::winograd_nonfused::WinogradWgradOutputParams)", 1484.0], ["void cunn_ClassNLLCriterion_updateGradInput_kernel(float*, float*, long*, float*, float*, int, int, int, int, long)", 12.0], ["void cunn_ClassNLLCriterion_updateOutput_kernel(float*, float*, float*, long*, float*, int, int, int, int, long)", 18.0], ["void implicit_convolve_sgemm(int, int, int, float const*, int, float*, float const*, kernel_conv_params, unsigned long long, int, float, float, int, float const*, float const*, bool, int, int)", 37016.0], ["void implicit_convolve_sgemm(int, int, int, float const*, int, float*, float const*, kernel_conv_params, unsigned long long, int, float, float, int, float const*, float const*, bool, int, int)", 5221.0], ["void implicit_convolve_sgemm(int, int, int, float const*, int, float*, float const*, kernel_conv_params, unsigned long long, int, float, float, int, float const*, float const*, bool, int, int)", 35106.0], ["void implicit_convolve_sgemm(int, int, int, float const*, int, float*, float const*, kernel_conv_params, unsigned long long, int, float, float, int, float const*, float const*, bool, int, int)", 7939.0], ["void nchwToNhwcKernel(int, int, int, int, float const*, float*, float, float)", 383.0], ["void nhwcToNchwKernel(int, int, int, int, float const*, float*, float, float)", 54.0], ["void splitKreduce_kernel(cublasSplitKParams, float const*, float const*, float*, float const*, float const*, float const*)", 31.0], ["volta_scudnn_128x128_stridedB_medium_nn_v1", 3550.0], ["volta_scudnn_128x128_stridedB_small_nn_v1", 3034.0], ["volta_scudnn_128x128_stridedB_splitK_medium_nn_v1", 25342.0], ["volta_scudnn_128x128_stridedB_splitK_small_nn_v1", 20473.0], ["volta_scudnn_128x32_sliced1x4_ldg4_relu_exp_small_nhwc_tn_v1", 3697.0], ["volta_scudnn_128x64_relu_interior_nn_v1", 7976.0], ["volta_scudnn_128x64_relu_medium_nn_v1", 3647.0], ["volta_scudnn_128x64_relu_small_nn_v1", 3273.0], ["volta_scudnn_128x64_stridedB_interior_nn_v1", 9559.0], ["volta_scudnn_128x64_stridedB_small_nn_v1", 582.0], ["volta_scudnn_128x64_stridedB_splitK_xregs_large_nn_v1", 7819.0], ["volta_scudnn_winograd_128x128_ldg1_ldg4_relu_tile148t_nt_v1", 12761.0], ["volta_sgemm_128x32_nt", 8658.0], ["volta_sgemm_32x128_nn", 3059.0], ["volta_sgemm_32x128_nt", 2837.0], ["volta_sgemm_64x32_sliced1x4_nn", 149.0], ["volta_sgemm_64x32_sliced1x4_tn", 148.0], ["volta_sgemm_64x64_nn", 8544.0], ["volta_sgemm_64x64_nt", 21125.0]]}} diff --git a/tb_plugin/test/test_compare_with_autograd.py b/tb_plugin/test/test_compare_with_autograd.py index 9b7cda063..3f862f717 100644 --- a/tb_plugin/test/test_compare_with_autograd.py +++ b/tb_plugin/test/test_compare_with_autograd.py @@ -1,7 +1,7 @@ import os -import pytest import time import unittest +import pytest import torch import torch.nn as nn import torch.backends.cudnn as cudnn @@ -10,6 +10,7 @@ import torchvision import torchvision.transforms as T import torchvision.models as models +import torch_tb_profiler.io as io from torch_tb_profiler.profiler import RunLoader def create_log_dir(): @@ -20,7 +21,7 @@ def create_log_dir(): raise RuntimeError("Can't create directory: " + log_dir_name) return log_dir_name -def get_autograd_result(p, worker_name): +def get_autograd_result(p, worker_name, record_shapes=False, with_stack=False): avgs = p.key_averages() sort_by = 'self_cuda_time_total' avgs = sorted( @@ -60,20 +61,111 @@ def get_type(evt): elif is_gpu and evt_type == "kernel": line = [avg.key, int(avg.count), int(avg.self_cuda_time_total)] result_dict[worker_name + "#kernel"].append(line) + if record_shapes: + result_dict[worker_name + "#operator#input_shape"] = list() + avgs = p.key_averages(True) + sort_by = 'self_cuda_time_total' + avgs = sorted( + avgs, key=lambda evt: getattr(evt, sort_by), reverse=True + ) + for avg in avgs: + evt_type = get_type(avg) + if evt_type == "operator": + line = [avg.key, str(avg.input_shapes), int(avg.count)] + if is_gpu: + line.extend([int(avg.self_cuda_time_total), int(avg.cuda_time_total)]) + line.extend([int(avg.self_cpu_time_total), int(avg.cpu_time_total)]) + result_dict[worker_name + "#operator#input_shape"].append(line) + # The call stack for legacy and kineto profiler is different for now, + # The legacy profiler has stack for backward while kineto not + # So, just disable call stack compare for the moment + if False and with_stack: + result_dict[worker_name + "#operator#stack"] = list() + avgs = p.key_averages(False, 100) + sort_by = 'self_cuda_time_total' + avgs = sorted( + avgs, key=lambda evt: getattr(evt, sort_by), reverse=True + ) + for avg in avgs: + evt_type = get_type(avg) + if evt_type == "operator" and avg.stack: + line = [avg.key, int(avg.count)] + if is_gpu: + line.extend([int(avg.self_cuda_time_total), int(avg.cuda_time_total)]) + line.extend([int(avg.self_cpu_time_total), int(avg.cpu_time_total), ''.join(avg.stack)]) + result_dict[worker_name + "#operator#stack"].append(line) + + result_dict[worker_name + "#operator#stack#input_shape"] = list() + avgs = p.key_averages(True, 100) + sort_by = 'self_cuda_time_total' + avgs = sorted( + avgs, key=lambda evt: getattr(evt, sort_by), reverse=True + ) + for avg in avgs: + evt_type = get_type(avg) + if evt_type == "operator" and avg.stack: + line = [avg.key, str(avg.input_shapes), int(avg.count)] + if is_gpu: + line.extend([int(avg.self_cuda_time_total), int(avg.cuda_time_total)]) + line.extend([int(avg.self_cpu_time_total), int(avg.cpu_time_total), ''.join(avg.stack)]) + result_dict[worker_name + "#operator#stack#input_shape"].append(line) + return result_dict -def get_plugin_result(run): +def generate_plugin_result_row(data): + row = list() + row.append(data['name']) + if 'input_shape' in data: + row.append(data['input_shape']) + row.append(data['calls']) + if 'device_self_duration' in data: + row.append(data['device_self_duration']) + row.append(data['device_total_duration']) + row.extend([data['host_self_duration'], data['host_total_duration']]) + if 'call_stack' in data: + row.append(data['call_stack']) + return row + +def get_plugin_result(run, record_shapes=False, with_stack=False): result_dict = dict() - for worker_name, profile in run.profiles.items(): + for (worker_name, span), profile in run.profiles.items(): worker_name = worker_name.split('.')[0] - if profile.operation_table_by_name is not None: - rows = profile.operation_table_by_name["data"]["rows"] - result_dict[worker_name + "#operator"] = rows - if profile.kernel_table is not None: - rows = profile.kernel_table["data"]["rows"] - result_dict[worker_name + "#kernel"] = list() - for row in rows: - result_dict[worker_name + "#kernel"].append(row[:3]) + assert profile.operation_table_by_name is not None + result_dict[worker_name + "#operator"] = list() + for data in profile.operation_table_by_name: + row = generate_plugin_result_row(data) + result_dict[worker_name + "#operator"].append(row) + if profile.kernel_table is not None: + rows = profile.kernel_table["data"]["rows"] + result_dict[worker_name + "#kernel"] = list() + for row in rows: + result_dict[worker_name + "#kernel"].append(row[:3]) + if record_shapes: + assert profile.operation_table_by_name_input is not None + result_dict[worker_name + "#operator#input_shape"] = list() + for data in profile.operation_table_by_name_input: + row = generate_plugin_result_row(data) + result_dict[worker_name + "#operator#input_shape"].append(row) + # The call stack for legacy and kineto profiler is different for now, + # The legacy profiler has stack for backward while kineto not + # So, just disable call stack compare for the moment + if False and with_stack: + assert profile.operation_stack_by_name is not None + assert profile.operation_stack_by_name_input is not None + result_dict[worker_name + "#operator#stack"] = list() + op_stack_dict = profile.operation_stack_by_name + for k,datalist in op_stack_dict.items(): + for data in datalist: + row = generate_plugin_result_row(data) + result_dict[worker_name + "#operator#stack"].append(row) + if record_shapes: + result_dict[worker_name + "#operator#stack#input_shape"] = list() + op_stack_dict = profile.operation_stack_by_name_input + for k,datalist in op_stack_dict.items(): + for data in datalist: + row = generate_plugin_result_row(data) + result_dict[worker_name + "#operator#stack#input_shape"].append(row) + return result_dict def get_train_func(use_gpu=True): @@ -128,13 +220,14 @@ def output_fn(p): class TestCompareWithAutogradResult(unittest.TestCase): - def compare_results(self, log_dir, profilers_dict, use_gpu=True): - loader = RunLoader(os.path.split(log_dir)[-1], log_dir) + def compare_results(self, log_dir, profilers_dict, use_gpu=True, record_shapes=False, with_stack=False): + cache = io.Cache() + loader = RunLoader(os.path.split(log_dir)[-1], log_dir, cache) run = loader.load() - plugin_result = get_plugin_result(run) + plugin_result = get_plugin_result(run, record_shapes, with_stack) count = 0 for worker_name, p in profilers_dict.items(): - autograd_result = get_autograd_result(p, worker_name) + autograd_result = get_autograd_result(p, worker_name, record_shapes, with_stack) for key in autograd_result.keys(): count += 1 self.assertTrue(key in plugin_result.keys()) @@ -173,7 +266,7 @@ def base_profiler_api(self, use_gpu, record_shapes, profile_memory, with_stack): with_stack=with_stack ) as p: get_train_func(use_gpu)(13, p) - self.compare_results(log_dir, profilers_dict, use_gpu) + self.compare_results(log_dir, profilers_dict, use_gpu, record_shapes, with_stack) @pytest.mark.skipif('CI' in os.environ, reason="") def test_profiler_api_without_gpu(self): diff --git a/tb_plugin/test/test_profiler.py b/tb_plugin/test/test_profiler.py index 445ee37a1..f816e7d99 100644 --- a/tb_plugin/test/test_profiler.py +++ b/tb_plugin/test/test_profiler.py @@ -1,8 +1,11 @@ import json +import gzip import unittest import torch_tb_profiler.profiler.trace as trace from torch_tb_profiler.profiler.data import RunProfileData +from torch_tb_profiler.profiler.overall_parser import ProfileRole +from torch_tb_profiler.run import RunProfile SCHEMA_VERSION = 1 WORKER_NAME = "worker0" @@ -10,11 +13,12 @@ def parse_json_trace(json_content): trace_json = json.loads(json_content) + trace_json = {"schemaVersion": 1, "traceEvents": trace_json} profile = RunProfileData(WORKER_NAME) - parser = trace.get_event_parser(SCHEMA_VERSION) + profile.trace_json = trace_json profile.events = [] - for data in trace_json: - event = parser.parse(data) + for data in trace_json["traceEvents"]: + event = trace.create_event(data) if event is not None: profile.events.append(event) return profile @@ -36,25 +40,25 @@ def test_all_categories(self): "ph": "X", "cat": "Operator", "name": "enumerate(DataLoader)#_SingleProcessDataLoaderIter.__next__", "pid": 13721, "tid": "123", "ts": 100, "dur": 180, - "args": {"Input dims": [], "External id": 2} + "args": {"Input Dims": [], "External id": 2} }, { "ph": "X", "cat": "Operator", "name": "aten::to", "pid": 13721, "tid": "123", "ts": 200, "dur": 60, - "args": {"Input dims": [[2, 8, 5], [], [], [], [], [], [], []], "External id": 3} + "args": {"Input Dims": [[2, 8, 5], [], [], [], [], [], [], []], "External id": 3} }, { "ph": "X", "cat": "Operator", "name": "aten::nll_loss_backward", "pid": 13721, "tid": "456", "ts": 340, "dur": 70, - "args": {"Input dims": [[], [32, 1000], [32], [], [], [], []], "External id": 4} + "args": {"Input Dims": [[], [32, 1000], [32], [], [], [], []], "External id": 4} }, { "ph": "X", "cat": "Operator", "name": "ProfilerStep#1", "pid": 13721, "tid": "123", "ts": 50, "dur": 400, - "args": {"Input dims": [], "External id": 1} + "args": {"Input Dims": [], "External id": 1} }, { "ph": "X", "cat": "Memcpy", @@ -84,7 +88,7 @@ def test_all_categories(self): "ph": "X", "cat": "Kernel", "name": "void cunn_ClassNLLCriterion_updateGradInput_kernel", "pid": 0, "tid": "stream 7", "ts": 430, "dur": 15, - "args": {"correlation": 40348, "external id": 4} + "args": {"correlation": 40348, "external id": 4, "device": 0} }, { "ph": "X", "cat": "Runtime", @@ -100,13 +104,13 @@ def test_all_categories(self): self.assertTrue(profile.has_kernel) self.assertTrue(profile.has_memcpy_or_memset) step = profile.steps_costs[0] - self.assertEqual(step.kernel_cost, 15) - self.assertEqual(step.memcpy_cost, 10) - self.assertEqual(step.memset_cost, 5) - self.assertEqual(step.runtime_cost, 30) - self.assertEqual(step.dataloader_cost, 180) - self.assertEqual(step.cpuop_cost, 35) - self.assertEqual(step.other_cost, 125) + self.assertEqual(step.costs[ProfileRole.Kernel], 15) + self.assertEqual(step.costs[ProfileRole.Memcpy], 10) + self.assertEqual(step.costs[ProfileRole.Memset], 5) + self.assertEqual(step.costs[ProfileRole.Runtime], 30) + self.assertEqual(step.costs[ProfileRole.DataLoader], 180) + self.assertEqual(step.costs[ProfileRole.CpuOp], 35) + self.assertEqual(step.costs[ProfileRole.Other], 125) self.assertEqual(len(profile.op_list_groupby_name), 2) self.assertEqual(len(profile.op_list_groupby_name_input), 2) @@ -116,7 +120,8 @@ def test_op_list(op_list): for op_agg in op_list: if op_agg.name == "aten::to": op_count += 1 - self.assertEqual(op_agg.input_shape, "[[2, 8, 5], [], [], [], [], [], [], []]") + self.assertEqual(op_agg.input_shape, + "[[2, 8, 5], [], [], [], [], [], [], []]") self.assertEqual(op_agg.calls, 1) self.assertEqual(op_agg.host_duration, 60) self.assertEqual(op_agg.device_duration, 0) @@ -124,11 +129,13 @@ def test_op_list(op_list): self.assertEqual(op_agg.self_device_duration, 0) if op_agg.name == "aten::nll_loss_backward": op_count += 1 - self.assertEqual(op_agg.input_shape, "[[], [32, 1000], [32], [], [], [], []]") + self.assertEqual(op_agg.input_shape, + "[[], [32, 1000], [32], [], [], [], []]") self.assertEqual(op_agg.calls, 1) self.assertEqual(op_agg.host_duration, 70) self.assertEqual(op_agg.device_duration, 30) - self.assertEqual(op_agg.self_host_duration, 70 - 20 - 10 - 5) + self.assertEqual( + op_agg.self_host_duration, 70 - 20 - 10 - 5) self.assertEqual(op_agg.self_device_duration, 30) self.assertEqual(op_count, 2) @@ -139,11 +146,15 @@ def test_op_list(op_list): self.assertEqual(profile.kernel_stat.shape[0], 1) self.assertEqual(profile.kernel_list_groupby_name_op[0].name, "void cunn_ClassNLLCriterion_updateGradInput_kernel") - self.assertEqual(profile.kernel_list_groupby_name_op[0].op_name, "aten::nll_loss_backward") + self.assertEqual( + profile.kernel_list_groupby_name_op[0].op_name, "aten::nll_loss_backward") self.assertEqual(profile.kernel_list_groupby_name_op[0].calls, 1) - self.assertEqual(profile.kernel_list_groupby_name_op[0].total_duration, 15) - self.assertEqual(profile.kernel_list_groupby_name_op[0].min_duration, 15) - self.assertEqual(profile.kernel_list_groupby_name_op[0].max_duration, 15) + self.assertEqual( + profile.kernel_list_groupby_name_op[0].total_duration, 15) + self.assertEqual( + profile.kernel_list_groupby_name_op[0].min_duration, 15) + self.assertEqual( + profile.kernel_list_groupby_name_op[0].max_duration, 15) self.assertEqual(profile.kernel_stat.iloc[0]["count"], 1) self.assertEqual(profile.kernel_stat.iloc[0]["sum"], 15) self.assertEqual(profile.kernel_stat.iloc[0]["mean"], 15) @@ -160,19 +171,19 @@ def test_external_id(self): "ph": "X", "cat": "Operator", "name": "aten::mat_mul", "pid": 13721, "tid": "456", "ts": 100, "dur": 100, - "args": {"Input dims": [], "External id": 2} + "args": {"Input Dims": [], "External id": 2} }, { "ph": "X", "cat": "Operator", "name": "aten::mm", "pid": 13721, "tid": "456", "ts": 120, "dur": 70, - "args": {"Input dims": [], "External id": 4} + "args": {"Input Dims": [], "External id": 4} }, { "ph": "X", "cat": "Kernel", "name": "void cunn_ClassNLLCriterion_updateGradInput_kernel", "pid": 0, "tid": "stream 7", "ts": 130, "dur": 5, - "args": {"correlation": 334, "external id": 4} + "args": {"correlation": 334, "external id": 4, "device": 0} }, { "ph": "X", "cat": "Runtime", @@ -184,7 +195,7 @@ def test_external_id(self): "ph": "X", "cat": "Kernel", "name": "void cunn_ClassNLLCriterion_updateGradInput_kernel", "pid": 0, "tid": "stream 7", "ts": 130, "dur": 6, - "args": {"correlation": 335, "external id": 2} + "args": {"correlation": 335, "external id": 2, "device": 0} }, { "ph": "X", "cat": "Runtime", @@ -196,7 +207,7 @@ def test_external_id(self): "ph": "X", "cat": "Kernel", "name": "void cunn_ClassNLLCriterion_updateGradInput_kernel", "pid": 0, "tid": "stream 7", "ts": 130, "dur": 7, - "args": {"correlation": 336, "external id": 4} + "args": {"correlation": 336, "external id": 4, "device": 0} }, { "ph": "X", "cat": "Runtime", @@ -208,7 +219,7 @@ def test_external_id(self): "ph": "X", "cat": "Kernel", "name": "void cunn_ClassNLLCriterion_updateGradInput_kernel", "pid": 0, "tid": "stream 7", "ts": 130, "dur": 8, - "args": {"correlation": 337, "external id": 2} + "args": {"correlation": 337, "external id": 2, "device": 0} }, { "ph": "X", "cat": "Runtime", @@ -240,13 +251,13 @@ def test_operator_relation(self): "ph": "X", "cat": "Operator", "name": "aten::mat_mul", "pid": 13721, "tid": "456", "ts": 100, "dur": 100, - "args": {"Input dims": [], "External id": 2} + "args": {"Input Dims": [], "External id": 2} }, { "ph": "X", "cat": "Operator", "name": "aten::mm", "pid": 13721, "tid": "456", "ts": 100, "dur": 70, - "args": {"Input dims": [], "External id": 4} + "args": {"Input Dims": [], "External id": 4} }] """ profile = parse_json_trace(json_content) @@ -267,13 +278,13 @@ def test_operator_relation(self): "ph": "X", "cat": "Operator", "name": "aten::mat_mul", "pid": 13721, "tid": "456", "ts": 100, "dur": 100, - "args": {"Input dims": [], "External id": 2} + "args": {"Input Dims": [], "External id": 2} }, { "ph": "X", "cat": "Operator", "name": "aten::mm", "pid": 13721, "tid": "456", "ts": 130, "dur": 70, - "args": {"Input dims": [], "External id": 4} + "args": {"Input Dims": [], "External id": 4} }] """ profile = parse_json_trace(json_content) @@ -298,19 +309,19 @@ def test_remove_dup_nodes(self): "ph": "X", "cat": "Operator", "name": "aten::mm", "pid": 13721, "tid": "456", "ts": 100, "dur": 100, - "args": {"Input dims": [], "External id": 2} + "args": {"Input Dims": [], "External id": 2} }, { "ph": "X", "cat": "Operator", "name": "aten::mm", "pid": 13721, "tid": "456", "ts": 110, "dur": 80, - "args": {"Input dims": [], "External id": 3} + "args": {"Input Dims": [], "External id": 3} }, { "ph": "X", "cat": "Operator", "name": "aten::mm", "pid": 13721, "tid": "456", "ts": 120, "dur": 60, - "args": {"Input dims": [], "External id": 4} + "args": {"Input Dims": [], "External id": 4} }, { "ph": "X", "cat": "Runtime", @@ -322,13 +333,14 @@ def test_remove_dup_nodes(self): "ph": "X", "cat": "Kernel", "name": "void gemmSN_TN_kernel_64addr", "pid": 0, "tid": "stream 7", "ts": 220, "dur": 8, - "args": {"correlation": 335, "external id": 4} + "args": {"correlation": 335, "external id": 4, "device": 0} }] """ profile = parse_json_trace(json_content) profile.process() self.assertEqual(len(profile.op_list_groupby_name), 1) - self.assertEqual(profile.op_list_groupby_name[0].self_device_duration, 8) + self.assertEqual( + profile.op_list_groupby_name[0].self_device_duration, 8) # Test Runtime with "external id" 0. # This kind of Runtime should not be attached to any operator, @@ -340,7 +352,7 @@ def test_top_level_runtime(self): "ph": "X", "cat": "Operator", "name": "aten::mm", "pid": 13721, "tid": "123", "ts": 100, "dur": 100, - "args": {"Input dims": [], "External id": 2} + "args": {"Input Dims": [], "External id": 2} }, { "ph": "X", "cat": "Runtime", @@ -352,13 +364,14 @@ def test_top_level_runtime(self): "ph": "X", "cat": "Kernel", "name": "void gemmSN_TN_kernel_64addr", "pid": 0, "tid": "stream 7", "ts": 220, "dur": 8, - "args": {"correlation": 335, "external id": 0} + "args": {"correlation": 335, "external id": 0, "device": 0} }] """ profile = parse_json_trace(json_content) profile.process() self.assertEqual(profile.op_list_groupby_name[0].device_duration, 0) - self.assertEqual(profile.op_list_groupby_name[0].self_device_duration, 0) + self.assertEqual( + profile.op_list_groupby_name[0].self_device_duration, 0) self.assertEqual(profile.kernel_stat.iloc[0]["count"], 1) # Test Runtime directly called in ProfilerStep, not inside any operator. @@ -368,7 +381,7 @@ def test_runtime_called_by_profilerstep(self): "ph": "X", "cat": "Operator", "name": "ProfilerStep#1", "pid": 13721, "tid": "456", "ts": 100, "dur": 300, - "args": {"Input dims": [], "External id": 2} + "args": {"Input Dims": [], "External id": 2} }, { "ph": "X", "cat": "Runtime", @@ -380,17 +393,18 @@ def test_runtime_called_by_profilerstep(self): "ph": "X", "cat": "Kernel", "name": "void gemmSN_TN_kernel_64addr", "pid": 0, "tid": "stream 7", "ts": 220, "dur": 8, - "args": {"correlation": 335, "external id": 2} + "args": {"correlation": 335, "external id": 2, "device": 0} }] """ profile = parse_json_trace(json_content) profile.process() step = profile.steps_costs[0] - self.assertEqual(step.kernel_cost, 8) - self.assertEqual(step.runtime_cost, 20) - self.assertEqual(step.cpuop_cost, 0) - self.assertEqual(step.other_cost, 300 - 8 - 20) - self.assertEqual(len(profile.op_list_groupby_name), 0) # ProfilerStep is not regarded as an operator. + self.assertEqual(step.costs[ProfileRole.Kernel], 8) + self.assertEqual(step.costs[ProfileRole.Runtime], 20) + self.assertEqual(step.costs[ProfileRole.CpuOp], 0) + self.assertEqual(step.costs[ProfileRole.Other], 300 - 8 - 20) + # ProfilerStep is not regarded as an operator. + self.assertEqual(len(profile.op_list_groupby_name), 0) self.assertEqual(len(profile.op_list_groupby_name_input), 0) self.assertEqual(profile.kernel_stat.iloc[0]["count"], 1) self.assertEqual(len(profile.kernel_list_groupby_name_op), 1) @@ -405,7 +419,7 @@ def test_runtime_launch_multipe_kernels(self): "ph": "X", "cat": "Operator", "name": "Broadcast", "pid": 13721, "tid": "456", "ts": 100, "dur": 300, - "args": {"Input dims": [], "External id": 2} + "args": {"Input Dims": [], "External id": 2} }, { "ph": "X", "cat": "Runtime", @@ -418,19 +432,20 @@ def test_runtime_launch_multipe_kernels(self): "name": "ncclBroadcastRingLLKernel_copy_i8(ncclColl)", "pid": 0, "tid": "stream 13", "ts": 160, "dur": 120318, "args": {"device": 0, "context": 1, "stream": 13, - "correlation": 335, "external id": 2} + "correlation": 335, "external id": 2, "device": 0} }, { "ph": "X", "cat": "Kernel", "name": "ncclBroadcastRingLLKernel_copy_i8(ncclColl)", "pid": 0, "tid": "stream 22", "ts": 170, "dur": 132800, - "args": {"device": 1, "context": 2, "stream": 22, + "args": {"device": 0, "context": 2, "stream": 22, "correlation": 335, "external id": 2} }] """ profile = parse_json_trace(json_content) profile.process() - self.assertEqual(profile.op_list_groupby_name[0].device_duration, 120318 + 132800) + self.assertEqual( + profile.op_list_groupby_name[0].device_duration, 120318 + 132800) self.assertEqual(profile.kernel_stat.iloc[0]["count"], 2) self.assertEqual(len(profile.kernel_list_groupby_name_op), 1) @@ -441,19 +456,19 @@ def test_no_profilerstep(self): "ph": "X", "cat": "Operator", "name": "aten::to", "pid": 13721, "tid": "123", "ts": 100, "dur": 60, - "args": {"Input dims": [[2, 8, 5], [], [], [], [], [], [], []], "External id": 3} + "args": {"Input Dims": [[2, 8, 5], [], [], [], [], [], [], []], "External id": 3} }, { "ph": "X", "cat": "Operator", "name": "aten::nll_loss_backward", "pid": 13721, "tid": "456", "ts": 300, "dur": 70, - "args": {"Input dims": [[], [32, 1000], [32], [], [], [], []], "External id": 4} + "args": {"Input Dims": [[], [32, 1000], [32], [], [], [], []], "External id": 4} }, { "ph": "X", "cat": "Kernel", "name": "void cunn_ClassNLLCriterion_updateGradInput_kernel", "pid": 0, "tid": "stream 7", "ts": 320, "dur": 100, - "args": {"correlation": 40348, "external id": 4} + "args": {"correlation": 40348, "external id": 4, "device": 0} }, { "ph": "X", "cat": "Runtime", @@ -470,15 +485,16 @@ def test_no_profilerstep(self): self.assertTrue(not profile.has_memcpy_or_memset) self.assertEqual(len(profile.steps_costs), 1) step = profile.steps_costs[0] - self.assertEqual(step.kernel_cost, 100) - self.assertEqual(step.memcpy_cost, 0) - self.assertEqual(step.memset_cost, 0) - self.assertEqual(step.runtime_cost, 320 - 310) - self.assertEqual(step.dataloader_cost, 0) - self.assertEqual(step.cpuop_cost, 60 + (310 - 300)) + + self.assertEqual(step.costs[ProfileRole.Kernel], 100) + self.assertEqual(step.costs[ProfileRole.Memcpy], 0) + self.assertEqual(step.costs[ProfileRole.Memset], 0) + self.assertEqual(step.costs[ProfileRole.Runtime], 320 - 310) + self.assertEqual(step.costs[ProfileRole.DataLoader], 0) + self.assertEqual(step.costs[ProfileRole.CpuOp], 60 + (310 - 300)) # If no ProfilerStep, all events will be regarded as a step. - self.assertEqual(step.other_cost, 300 - (100 + 60)) - self.assertEqual(step.step_total_cost, (320 + 100) - 100) + self.assertEqual(step.costs[ProfileRole.Other], 300 - (100 + 60)) + self.assertEqual(step.costs[ProfileRole.Total], (320 + 100) - 100) self.assertEqual(len(profile.op_list_groupby_name), 2) self.assertEqual(len(profile.op_list_groupby_name_input), 2) self.assertEqual(profile.kernel_stat.iloc[0]["count"], 1) @@ -489,7 +505,8 @@ def test_op_list(op_list): for op_agg in op_list: if op_agg.name == "aten::to": op_count += 1 - self.assertEqual(op_agg.input_shape, "[[2, 8, 5], [], [], [], [], [], [], []]") + self.assertEqual(op_agg.input_shape, + "[[2, 8, 5], [], [], [], [], [], [], []]") self.assertEqual(op_agg.calls, 1) self.assertEqual(op_agg.host_duration, 60) self.assertEqual(op_agg.device_duration, 0) @@ -497,7 +514,8 @@ def test_op_list(op_list): self.assertEqual(op_agg.self_device_duration, 0) if op_agg.name == "aten::nll_loss_backward": op_count += 1 - self.assertEqual(op_agg.input_shape, "[[], [32, 1000], [32], [], [], [], []]") + self.assertEqual(op_agg.input_shape, + "[[], [32, 1000], [32], [], [], [], []]") self.assertEqual(op_agg.calls, 1) self.assertEqual(op_agg.host_duration, 70) self.assertEqual(op_agg.device_duration, 100) @@ -510,11 +528,15 @@ def test_op_list(op_list): self.assertEqual(profile.kernel_list_groupby_name_op[0].name, "void cunn_ClassNLLCriterion_updateGradInput_kernel") - self.assertEqual(profile.kernel_list_groupby_name_op[0].op_name, "aten::nll_loss_backward") + self.assertEqual( + profile.kernel_list_groupby_name_op[0].op_name, "aten::nll_loss_backward") self.assertEqual(profile.kernel_list_groupby_name_op[0].calls, 1) - self.assertEqual(profile.kernel_list_groupby_name_op[0].total_duration, 100) - self.assertEqual(profile.kernel_list_groupby_name_op[0].min_duration, 100) - self.assertEqual(profile.kernel_list_groupby_name_op[0].max_duration, 100) + self.assertEqual( + profile.kernel_list_groupby_name_op[0].total_duration, 100) + self.assertEqual( + profile.kernel_list_groupby_name_op[0].min_duration, 100) + self.assertEqual( + profile.kernel_list_groupby_name_op[0].max_duration, 100) self.assertEqual(profile.kernel_stat.iloc[0]["count"], 1) self.assertEqual(profile.kernel_stat.iloc[0]["sum"], 100) self.assertEqual(profile.kernel_stat.iloc[0]["mean"], 100) @@ -528,25 +550,25 @@ def test_multiple_profilersteps_no_overlap(self): "ph": "X", "cat": "Operator", "name": "ProfilerStep#1", "pid": 13721, "tid": "123", "ts": 100, "dur": 200, - "args": {"Input dims": [], "External id": 1} + "args": {"Input Dims": [], "External id": 1} }, { "ph": "X", "cat": "Operator", "name": "aten::to", "pid": 13721, "tid": "123", "ts": 200, "dur": 60, - "args": {"Input dims": [[2, 8, 5], [], [], [], [], [], [], []], "External id": 2} + "args": {"Input Dims": [[2, 8, 5], [], [], [], [], [], [], []], "External id": 2} }, { "ph": "X", "cat": "Operator", "name": "ProfilerStep#2", "pid": 13721, "tid": "123", "ts": 350, "dur": 150, - "args": {"Input dims": [], "External id": 3} + "args": {"Input Dims": [], "External id": 3} }, { "ph": "X", "cat": "Operator", "name": "aten::mm", "pid": 13721, "tid": "123", "ts": 360, "dur": 50, - "args": {"Input dims": [], "External id": 4} + "args": {"Input Dims": [], "External id": 4} }, { "ph": "X", "cat": "Memcpy", @@ -564,7 +586,7 @@ def test_multiple_profilersteps_no_overlap(self): "ph": "X", "cat": "Kernel", "name": "void cunn_ClassNLLCriterion_updateGradInput_kernel", "pid": 0, "tid": "stream 7", "ts": 410, "dur": 200, - "args": {"correlation": 40348, "external id": 4} + "args": {"correlation": 40348, "external id": 4, "device": 0} }, { "ph": "X", "cat": "Runtime", @@ -581,24 +603,27 @@ def test_multiple_profilersteps_no_overlap(self): self.assertTrue(profile.has_memcpy_or_memset) self.assertEqual(len(profile.steps_costs), 2) step = profile.steps_costs[0] - self.assertEqual(step.kernel_cost, 0) - self.assertEqual(step.memcpy_cost, 40) - self.assertEqual(step.memset_cost, 0) - self.assertEqual(step.runtime_cost, 5) - self.assertEqual(step.dataloader_cost, 0) - self.assertEqual(step.cpuop_cost, 60 - 5) - self.assertEqual(step.other_cost, 200 - 60 - 20) - self.assertEqual(step.step_total_cost, 320 - 100) # Device side takes effect. + self.assertEqual(step.costs[ProfileRole.Kernel], 0) + self.assertEqual(step.costs[ProfileRole.Memcpy], 40) + self.assertEqual(step.costs[ProfileRole.Memset], 0) + self.assertEqual(step.costs[ProfileRole.Runtime], 5) + self.assertEqual(step.costs[ProfileRole.DataLoader], 0) + self.assertEqual(step.costs[ProfileRole.CpuOp], 60 - 5) + self.assertEqual(step.costs[ProfileRole.Other], 200 - 60 - 20) + # Device side takes effect. + self.assertEqual(step.costs[ProfileRole.Total], 320 - 100) step = profile.steps_costs[1] - self.assertEqual(step.kernel_cost, 200) - self.assertEqual(step.memcpy_cost, 0) - self.assertEqual(step.memset_cost, 0) - self.assertEqual(step.runtime_cost, 5) - self.assertEqual(step.dataloader_cost, 0) - self.assertEqual(step.cpuop_cost, 50 - 5) - self.assertEqual(step.other_cost, 360 - 350) - self.assertEqual(step.step_total_cost, 610 - 350) # Device side takes effect. - self.assertEqual(profile.avg_costs.step_total_cost, ((320 - 100) + (610 - 350)) / 2) + self.assertEqual(step.costs[ProfileRole.Kernel], 200) + self.assertEqual(step.costs[ProfileRole.Memcpy], 0) + self.assertEqual(step.costs[ProfileRole.Memset], 0) + self.assertEqual(step.costs[ProfileRole.Runtime], 5) + self.assertEqual(step.costs[ProfileRole.DataLoader], 0) + self.assertEqual(step.costs[ProfileRole.CpuOp], 50 - 5) + self.assertEqual(step.costs[ProfileRole.Other], 360 - 350) + # Device side takes effect. + self.assertEqual(step.costs[ProfileRole.Total], 610 - 350) + self.assertEqual( + profile.avg_costs.costs[ProfileRole.Total], ((320 - 100) + (610 - 350)) / 2) self.assertEqual(len(profile.op_list_groupby_name), 2) self.assertEqual(len(profile.op_list_groupby_name_input), 2) @@ -608,7 +633,8 @@ def test_op_list(op_list): for op_agg in op_list: if op_agg.name == "aten::to": op_count += 1 - self.assertEqual(op_agg.input_shape, "[[2, 8, 5], [], [], [], [], [], [], []]") + self.assertEqual(op_agg.input_shape, + "[[2, 8, 5], [], [], [], [], [], [], []]") self.assertEqual(op_agg.calls, 1) self.assertEqual(op_agg.host_duration, 60) self.assertEqual(op_agg.device_duration, 40) @@ -631,11 +657,15 @@ def test_op_list(op_list): self.assertEqual(profile.kernel_stat.shape[0], 1) self.assertEqual(profile.kernel_list_groupby_name_op[0].name, "void cunn_ClassNLLCriterion_updateGradInput_kernel") - self.assertEqual(profile.kernel_list_groupby_name_op[0].op_name, "aten::mm") + self.assertEqual( + profile.kernel_list_groupby_name_op[0].op_name, "aten::mm") self.assertEqual(profile.kernel_list_groupby_name_op[0].calls, 1) - self.assertEqual(profile.kernel_list_groupby_name_op[0].total_duration, 200) - self.assertEqual(profile.kernel_list_groupby_name_op[0].min_duration, 200) - self.assertEqual(profile.kernel_list_groupby_name_op[0].max_duration, 200) + self.assertEqual( + profile.kernel_list_groupby_name_op[0].total_duration, 200) + self.assertEqual( + profile.kernel_list_groupby_name_op[0].min_duration, 200) + self.assertEqual( + profile.kernel_list_groupby_name_op[0].max_duration, 200) self.assertEqual(profile.kernel_stat.iloc[0]["count"], 1) self.assertEqual(profile.kernel_stat.iloc[0]["sum"], 200) self.assertEqual(profile.kernel_stat.iloc[0]["mean"], 200) @@ -650,19 +680,19 @@ def test_external_id(self): "ph": "X", "cat": "Operator", "name": "aten::mat_mul", "pid": 13721, "tid": "456", "ts": 100, "dur": 100, - "args": {"Input dims": [], "External id": 2} + "args": {"Input Dims": [], "External id": 2} }, { "ph": "X", "cat": "Operator", "name": "aten::mm", "pid": 13721, "tid": "456", "ts": 120, "dur": 40, - "args": {"Input dims": [], "External id": 4} + "args": {"Input Dims": [], "External id": 4} }, { "ph": "X", "cat": "Kernel", "name": "void cunn_ClassNLLCriterion_updateGradInput_kernel", "pid": 0, "tid": "stream 7", "ts": 155, "dur": 20, - "args": {"correlation": 334, "external id": 4} + "args": {"correlation": 334, "external id": 4, "device": 0} }, { "ph": "X", "cat": "Runtime", @@ -674,7 +704,7 @@ def test_external_id(self): "ph": "X", "cat": "Kernel", "name": "void cunn_ClassNLLCriterion_updateGradInput_kernel", "pid": 0, "tid": "stream 7", "ts": 210, "dur": 16, - "args": {"correlation": 335, "external id": 2} + "args": {"correlation": 335, "external id": 2, "device": 0} }, { "ph": "X", "cat": "Runtime", @@ -715,31 +745,31 @@ def test_multiple_profilersteps_with_overlap(self): "ph": "X", "cat": "Operator", "name": "ProfilerStep#1", "pid": 13721, "tid": "123", "ts": 100, "dur": 200, - "args": {"Input dims": [], "External id": 1} + "args": {"Input Dims": [], "External id": 1} }, { "ph": "X", "cat": "Operator", "name": "aten::to", "pid": 13721, "tid": "123", "ts": 200, "dur": 60, - "args": {"Input dims": [[2, 8, 5], [], [], [], [], [], [], []], "External id": 2} + "args": {"Input Dims": [[2, 8, 5], [], [], [], [], [], [], []], "External id": 2} }, { "ph": "X", "cat": "Operator", "name": "ProfilerStep#2", "pid": 13721, "tid": "123", "ts": 350, "dur": 150, - "args": {"Input dims": [], "External id": 3} + "args": {"Input Dims": [], "External id": 3} }, { "ph": "X", "cat": "Operator", "name": "aten::mm", "pid": 13721, "tid": "123", "ts": 360, "dur": 50, - "args": {"Input dims": [], "External id": 4} + "args": {"Input Dims": [], "External id": 4} }, { "ph": "X", "cat": "Kernel", "name": "void cunn_ClassNLLCriterion_updateGradInput_kernel", "pid": 0, "tid": "stream 7", "ts": 150, "dur": 90, - "args": {"correlation": 123, "external id": 0} + "args": {"correlation": 123, "external id": 0, "device": 0} }, { "ph": "X", "cat": "Memcpy", @@ -757,7 +787,7 @@ def test_multiple_profilersteps_with_overlap(self): "ph": "X", "cat": "Kernel", "name": "void cunn_ClassNLLCriterion_updateGradInput_kernel", "pid": 0, "tid": "stream 7", "ts": 410, "dur": 200, - "args": {"correlation": 40348, "external id": 4} + "args": {"correlation": 40348, "external id": 4, "device": 0} }, { "ph": "X", "cat": "Runtime", @@ -774,23 +804,28 @@ def test_multiple_profilersteps_with_overlap(self): self.assertTrue(profile.has_memcpy_or_memset) self.assertEqual(len(profile.steps_costs), 2) step = profile.steps_costs[0] - self.assertEqual(step.kernel_cost, 0) - self.assertEqual(step.memcpy_cost, 100) - self.assertEqual(step.memset_cost, 0) - self.assertEqual(step.runtime_cost, 5) - self.assertEqual(step.dataloader_cost, 0) - self.assertEqual(step.cpuop_cost, (200 + 60) - (150 + 90) - 5) - self.assertEqual(step.other_cost, 280 - (200 + 60)) - self.assertEqual(step.step_total_cost, (280 + 100) - (150 + 90)) # Device side takes effect. + self.assertEqual(step.costs[ProfileRole.Kernel], 0) + self.assertEqual(step.costs[ProfileRole.Memcpy], 100) + self.assertEqual(step.costs[ProfileRole.Memset], 0) + self.assertEqual(step.costs[ProfileRole.Runtime], 5) + self.assertEqual(step.costs[ProfileRole.DataLoader], 0) + self.assertEqual(step.costs[ProfileRole.CpuOp], + (200 + 60) - (150 + 90) - 5) + self.assertEqual(step.costs[ProfileRole.Other], 280 - (200 + 60)) + # Device side takes effect. + self.assertEqual(step.costs[ProfileRole.Total], + (280 + 100) - (150 + 90)) step = profile.steps_costs[1] - self.assertEqual(step.kernel_cost, 200) - self.assertEqual(step.memcpy_cost, 0) - self.assertEqual(step.memset_cost, 0) - self.assertEqual(step.runtime_cost, 5) - self.assertEqual(step.dataloader_cost, 0) - self.assertEqual(step.cpuop_cost, (280 + 100) - 360 + (410 - 405)) - self.assertEqual(step.other_cost, 0) - self.assertEqual(step.step_total_cost, 610 - (280 + 100)) # Device side takes effect. + self.assertEqual(step.costs[ProfileRole.Kernel], 200) + self.assertEqual(step.costs[ProfileRole.Memcpy], 0) + self.assertEqual(step.costs[ProfileRole.Memset], 0) + self.assertEqual(step.costs[ProfileRole.Runtime], 5) + self.assertEqual(step.costs[ProfileRole.DataLoader], 0) + self.assertEqual(step.costs[ProfileRole.CpuOp], + (280 + 100) - 360 + (410 - 405)) + self.assertEqual(step.costs[ProfileRole.Other], 0) + # Device side takes effect. + self.assertEqual(step.costs[ProfileRole.Total], 610 - (280 + 100)) # Test whether step time is calculated correctly when the last 2 steps have no kernels launched. def test_last_steps_no_kernel(self): @@ -799,31 +834,31 @@ def test_last_steps_no_kernel(self): "ph": "X", "cat": "Operator", "name": "ProfilerStep#1", "pid": 13721, "tid": "123", "ts": 100, "dur": 200, - "args": {"Input dims": [], "External id": 1} + "args": {"Input Dims": [], "External id": 1} }, { "ph": "X", "cat": "Operator", "name": "aten::to", "pid": 13721, "tid": "123", "ts": 120, "dur": 10, - "args": {"Input dims": [[2, 8, 5], [], [], [], [], [], [], []], "External id": 2} + "args": {"Input Dims": [[2, 8, 5], [], [], [], [], [], [], []], "External id": 2} }, { "ph": "X", "cat": "Operator", "name": "ProfilerStep#2", "pid": 13721, "tid": "123", "ts": 300, "dur": 100, - "args": {"Input dims": [], "External id": 3} + "args": {"Input Dims": [], "External id": 3} }, { "ph": "X", "cat": "Operator", "name": "ProfilerStep#3", "pid": 13721, "tid": "123", "ts": 400, "dur": 50, - "args": {"Input dims": [], "External id": 4} + "args": {"Input Dims": [], "External id": 4} }, { "ph": "X", "cat": "Kernel", "name": "void cunn_ClassNLLCriterion_updateGradInput_kernel", "pid": 0, "tid": "stream 7", "ts": 90, "dur": 20, - "args": {"correlation": 123, "external id": 0} + "args": {"correlation": 123, "external id": 0, "device": 0} }, { "ph": "X", "cat": "Runtime", @@ -835,15 +870,17 @@ def test_last_steps_no_kernel(self): "ph": "X", "cat": "Kernel", "name": "void cunn_ClassNLLCriterion_updateGradInput_kernel", "pid": 0, "tid": "stream 7", "ts": 150, "dur": 180, - "args": {"correlation": 334, "external id": 2} + "args": {"correlation": 334, "external id": 2, "device": 0} }] """ profile = parse_json_trace(json_content) profile.process() - self.assertEqual(len(profile.steps_costs), 1) # The last 2 steps without kernels are removed from overall view. + # The last 2 steps without kernels are removed from overall view. + self.assertEqual(len(profile.steps_costs), 1) step = profile.steps_costs[0] - self.assertEqual(step.step_total_cost, (150 + 180) - (90 + 20)) + self.assertEqual( + step.costs[ProfileRole.Total], (150 + 180) - (90 + 20)) def test_pure_cpu(self): json_content = """ @@ -851,25 +888,25 @@ def test_pure_cpu(self): "ph": "X", "cat": "Operator", "name": "ProfilerStep#1", "pid": 13721, "tid": "123", "ts": 100, "dur": 200, - "args": {"Input dims": [], "External id": 1} + "args": {"Input Dims": [], "External id": 1} }, { "ph": "X", "cat": "Operator", "name": "aten::to", "pid": 13721, "tid": "123", "ts": 120, "dur": 10, - "args": {"Input dims": [[2, 8, 5], [], [], [], [], [], [], []], "External id": 2} + "args": {"Input Dims": [[2, 8, 5], [], [], [], [], [], [], []], "External id": 2} }, { "ph": "X", "cat": "Operator", "name": "ProfilerStep#2", "pid": 13721, "tid": "123", "ts": 300, "dur": 100, - "args": {"Input dims": [], "External id": 3} + "args": {"Input Dims": [], "External id": 3} }, { "ph": "X", "cat": "Operator", "name": "aten::mm", "pid": 13721, "tid": "123", "ts": 350, "dur": 40, - "args": {"Input dims": [], "External id": 4} + "args": {"Input Dims": [], "External id": 4} }] """ profile = parse_json_trace(json_content) @@ -877,24 +914,646 @@ def test_pure_cpu(self): self.assertEqual(len(profile.steps_costs), 2) step = profile.steps_costs[0] - self.assertEqual(step.kernel_cost, 0) - self.assertEqual(step.memcpy_cost, 0) - self.assertEqual(step.memset_cost, 0) - self.assertEqual(step.runtime_cost, 0) - self.assertEqual(step.dataloader_cost, 0) - self.assertEqual(step.cpuop_cost, 10) - self.assertEqual(step.other_cost, 200 - 10) - self.assertEqual(step.step_total_cost, 200) + self.assertEqual(step.costs[ProfileRole.Kernel], 0) + self.assertEqual(step.costs[ProfileRole.Memcpy], 0) + self.assertEqual(step.costs[ProfileRole.Memset], 0) + self.assertEqual(step.costs[ProfileRole.Runtime], 0) + self.assertEqual(step.costs[ProfileRole.DataLoader], 0) + self.assertEqual(step.costs[ProfileRole.CpuOp], 10) + self.assertEqual(step.costs[ProfileRole.Other], 200 - 10) + self.assertEqual(step.costs[ProfileRole.Total], 200) step = profile.steps_costs[1] - self.assertEqual(step.kernel_cost, 0) - self.assertEqual(step.memcpy_cost, 0) - self.assertEqual(step.memset_cost, 0) - self.assertEqual(step.runtime_cost, 0) - self.assertEqual(step.dataloader_cost, 0) - self.assertEqual(step.cpuop_cost, 40) - self.assertEqual(step.other_cost, 100 - 40) - self.assertEqual(step.step_total_cost, 100) + self.assertEqual(step.costs[ProfileRole.Kernel], 0) + self.assertEqual(step.costs[ProfileRole.Memcpy], 0) + self.assertEqual(step.costs[ProfileRole.Memset], 0) + self.assertEqual(step.costs[ProfileRole.Runtime], 0) + self.assertEqual(step.costs[ProfileRole.DataLoader], 0) + self.assertEqual(step.costs[ProfileRole.CpuOp], 40) + self.assertEqual(step.costs[ProfileRole.Other], 100 - 40) + self.assertEqual(step.costs[ProfileRole.Total], 100) + + # Test GPU utilization, est. SM efficiency, and occupancy. + def test_gpu_utilization(self): + json_content = """ + [{ + "ph": "X", "cat": "Operator", + "name": "aten::mat_mul", "pid": 13721, "tid": "456", + "ts": 100, "dur": 100, + "args": {"Input Dims": [], "External id": 2} + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::mm", "pid": 13721, "tid": "456", + "ts": 120, "dur": 70, + "args": {"Input Dims": [], "External id": 4} + }, + { + "ph": "X", "cat": "Kernel", + "name": "void cunn_ClassNLLCriterion_updateGradInput_kernel", "pid": 1, "tid": "stream 7", + "ts": 130, "dur": 10, + "args": {"correlation": 334, "external id": 4, "device": 1, + "blocks per SM": 0.5, "est. achieved occupancy %": 0.6} + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 13721, "tid": "456", + "ts": 120, "dur": 0, + "args": {"correlation": 334, "external id": 4} + }, + { + "ph": "X", "cat": "Kernel", + "name": "void gemmSN_TN_kernel_64addr", "pid": 1, "tid": "stream 8", + "ts": 135, "dur": 15, + "args": {"correlation": 335, "external id": 2, "device": 1, + "blocks per SM": 0.6, "est. achieved occupancy %": 0.1} + }, + { + "ph": "X", "cat": "Kernel", + "name": "void gemmSN_TN_kernel_64addr", "pid": 1, "tid": "stream 8", + "ts": 150, "dur": 0, + "args": {"correlation": 335, "external id": 2, "device": 1, + "blocks per SM": 0.3, "est. achieved occupancy %": 0.2} + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 13721, "tid": "456", + "ts": 120, "dur": 0, + "args": {"correlation": 335, "external id": 2} + }, + { + "ph": "X", "cat": "Kernel", + "name": "void cunn_ClassNLLCriterion_updateGradInput_kernel", "pid": 1, "tid": "stream 7", + "ts": 145, "dur": 25, + "args": {"correlation": 336, "external id": 4, "device": 1, + "blocks per SM": 0.3, "est. achieved occupancy %": 1.0} + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 13721, "tid": "456", + "ts": 125, "dur": 3, + "args": {"correlation": 336, "external id": 4} + }, + { + "ph": "X", "cat": "Kernel", + "name": "void cunn_ClassNLLCriterion_updateGradInput_kernel", "pid": 1, "tid": "stream 7", + "ts": 200, "dur": 20, + "args": {"correlation": 337, "external id": 2, "device": 1, + "blocks per SM": 10.5, "est. achieved occupancy %": 0.3} + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 13721, "tid": "456", + "ts": 195, "dur": 1, + "args": {"correlation": 337, "external id": 2} + }] + """ + profile = parse_json_trace(json_content) + profile.process() + + self.assertEqual(len(profile.gpu_ids), 1) + self.assertAlmostEqual(profile.gpu_utilization[1], (40 + 20) / 120) + self.assertAlmostEqual(profile.sm_efficency[1], + (0.5 * (135 - 130) + + 1.0 * (140 - 135) + + 0.6 * (145 - 140) + + 0.9 * (150 - 145) + + 0.3 * (170 - 150) + + 1.0 * (220 - 200)) / (220 - 100)) + self.assertAlmostEqual(profile.occupancy[1], + (0.6 * 10 + 0.1 * 15 + 1.0 * 25 + 0.3 * 20) / (10 + 15 + 25 + 20)) + + gpu_util_expected = [(100, 0), (110, 0), (120, 0), (130, 1.0), (140, 1.0), (150, 1.0), (160, 1.0), + (170, 0), (180, 0), (190, 0), (200, 1.0), (210, 1.0), (220, 0)] + for gpu_id in profile.gpu_ids: + buckets = profile.gpu_util_buckets[gpu_id] + gpu_util_id = 0 + for b in buckets: + self.assertEqual(b[0], gpu_util_expected[gpu_util_id][0]) + self.assertAlmostEqual(b[1], gpu_util_expected[gpu_util_id][1]) + gpu_util_id += 1 + self.assertEqual(gpu_util_id, len(gpu_util_expected)) + + sm_efficiency_expected = [(130, 0.5), (135, 0), (135, 1.0), (140, 0), (140, 0.6), (145, 0), (145, 0.9), + (150, 0), (150, 0.3), (170, 0), (170, 0), (200, 0), (200, 1.0), (220, 0)] + for gpu_id in profile.gpu_ids: + ranges = profile.approximated_sm_efficency_ranges[gpu_id] + sm_efficiency_id = 0 + for r in ranges: + self.assertEqual( + r[0], sm_efficiency_expected[sm_efficiency_id][0]) + self.assertAlmostEqual( + r[2], sm_efficiency_expected[sm_efficiency_id][1]) + sm_efficiency_id += 1 + self.assertEqual( + r[1], sm_efficiency_expected[sm_efficiency_id][0]) + self.assertAlmostEqual( + 0, sm_efficiency_expected[sm_efficiency_id][1]) + sm_efficiency_id += 1 + self.assertEqual(sm_efficiency_id, len(sm_efficiency_expected)) + + count = 0 + for agg_by_op in profile.kernel_list_groupby_name_op: + if agg_by_op.name == "void gemmSN_TN_kernel_64addr" and agg_by_op.op_name == "aten::mat_mul": + self.assertAlmostEqual(agg_by_op.avg_blocks_per_sm, 0.6) + self.assertAlmostEqual(agg_by_op.avg_occupancy, 0.1) + count += 1 + if agg_by_op.name == "void cunn_ClassNLLCriterion_updateGradInput_kernel" and \ + agg_by_op.op_name == "aten::mm": + self.assertAlmostEqual( + agg_by_op.avg_blocks_per_sm, (0.5 * 10 + 0.3 * 25) / (10 + 25)) + self.assertAlmostEqual( + agg_by_op.avg_occupancy, (0.6 * 10 + 1.0 * 25) / (10 + 25)) + count += 1 + if agg_by_op.name == "void cunn_ClassNLLCriterion_updateGradInput_kernel" and \ + agg_by_op.op_name == "aten::mat_mul": + self.assertAlmostEqual(agg_by_op.avg_blocks_per_sm, 10.5) + self.assertAlmostEqual(agg_by_op.avg_occupancy, 0.3) + count += 1 + self.assertEqual(count, 3) + + count = 0 + for _id, (name, row) in enumerate(profile.kernel_stat.iterrows()): + # The kernel with zero "dur" should be ignored. + if name == "void gemmSN_TN_kernel_64addr": + self.assertAlmostEqual(row["blocks_per_sm"], 0.6) + self.assertAlmostEqual(row["occupancy"], 0.1) + count += 1 + if name == "void cunn_ClassNLLCriterion_updateGradInput_kernel": + self.assertAlmostEqual( + row["blocks_per_sm"], (0.5 * 10 + 0.3 * 25 + 10.5 * 20) / (10 + 25 + 20)) + self.assertAlmostEqual( + row["occupancy"], (0.6 * 10 + 1.0 * 25 + 0.3 * 20) / (10 + 25 + 20)) + count += 1 + self.assertEqual(count, 2) + + def test_dump_gpu_metrics(self): + profile = RunProfile("test_dump_gpu_metrics", None) + # Faked data for easy to see in UI. Real data values are 1/100 of these. + profile.gpu_util_buckets = [[(1621401187223005, 0.0), (1621401187224005, 0.0), + (1621401187225005, 0.6), (1621401187226005, 0.5), + (1621401187227005, 0.6), (1621401187228005, 0.2), + (1621401187229005, 0.6), (1621401187230005, 0.1), + (1621401187231005, 0.5), (1621401187232005, 0.2), + (1621401187233005, 0.3), (1621401187234005, 0.4), + (1621401187235005, 0.4219409282700422), + (1621401187236901, 0)]] + # Faked data for easy to see in UI. Real data values are 1/10 of these. + profile.approximated_sm_efficency_ranges = \ + [[(1621401187225275, 1621401187225278, 0.25), (1621401187225530, 1621401187225532, 0.125), + (1621401187225820, 1621401187225821, 0.125), (1621401187226325, 1621401187226327, 0.25), + (1621401187226575, 1621401187226577, 0.125), (1621401187226912, 1621401187226913, 0.125), + (1621401187227092, 1621401187227094, 0.125), (1621401187227619, 1621401187227620, 0.125), + (1621401187227745, 1621401187227746, 0.125), (1621401187227859, 1621401187227860, 0.125), + (1621401187227973, 1621401187227974, 0.125), (1621401187228279, 1621401187228280, 0.125), + (1621401187228962, 1621401187228963, 0.125), (1621401187229153, 1621401187229155, 0.125), + (1621401187229711, 1621401187229715, 0.125), (1621401187230162, 1621401187230163, 0.125), + (1621401187231100, 1621401187231103, 0.125), (1621401187231692, 1621401187231694, 0.5), + (1621401187232603, 1621401187232604, 0.125), (1621401187232921, 1621401187232922, 0.125), + (1621401187233342, 1621401187233343, 0.125), (1621401187233770, 1621401187233772, 0.125), + (1621401187234156, 1621401187234159, 0.125), (1621401187234445, 1621401187234446, 0.125), + (1621401187235025, 1621401187235028, 0.125), (1621401187235555, 1621401187235556, 0.125), + (1621401187236158, 1621401187236159, 0.125), (1621401187236278, 1621401187236279, 0.125), + (1621401187236390, 1621401187236391, 0.125), (1621401187236501, 1621401187236502, 0.125)]] + + trace_json_flat_path = "gpu_metrics_input.json" + with open(trace_json_flat_path, "rb") as file: + raw_data = file.read() + data_with_gpu_metrics_compressed = profile.append_gpu_metrics(raw_data) + data_with_gpu_metrics_flat = gzip.decompress( + data_with_gpu_metrics_compressed) + + trace_json_expected_path = "gpu_metrics_expected.json" + with open(trace_json_expected_path, "rb") as file: + data_expected = file.read() + + # Parse to json in order to ignore text format difference. + data_with_gpu_metrics_json = json.loads( + data_with_gpu_metrics_flat.decode("utf8")) + data_expected_json = json.loads(data_expected.decode("utf8")) + data_with_gpu_metrics_str = json.dumps( + data_with_gpu_metrics_json, sort_keys=True) + data_expected_str = json.dumps(data_expected_json, sort_keys=True) + + self.assertEqual(data_with_gpu_metrics_str, data_expected_str) + + try: + data = json.loads(data_with_gpu_metrics_flat.decode("utf8")) + except: + self.assertTrue( + False, "The string fails to be parsed by json after appending gpu metrics.") + + def test_memory_view(self): + json_content = """[ + { + "ph": "X", "cat": "Operator", + "name": "aten::to", "pid": 13721, "tid": "123", + "ts": 10, "dur": 10, + "args": {"Input Dims": [], "External id": 2} + }, + { + "ph": "X", "cat": "Operator", + "name": "enumerate(DataLoader)#_SingleProcessDataLoaderIter.__next__", "pid": 13721, "tid": "123", + "ts": 100, "dur": 180, + "args": {"Input Dims": [], "External id": 2} + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::to", "pid": 13721, "tid": "123", + "ts": 200, "dur": 60, + "args": {"Input Dims": [[2, 8, 5], [], [], [], [], [], [], []], "External id": 3} + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::nll_loss_backward", "pid": 13721, "tid": "123", + "ts": 340, "dur": 70, + "args": {"Input Dims": [[], [32, 1000], [32], [], [], [], []], "External id": 4} + }, + { + "ph": "X", "cat": "Operator", + "name": "ProfilerStep#1", "pid": 13721, "tid": "123", + "ts": 50, "dur": 400, + "args": {"Input Dims": [], "External id": 1} + }, + { + "ph": "X", "cat": "Operator", + "name": "ProfilerStep#2", "pid": 13721, "tid": "123", + "ts": 500, "dur": 500, + "args": {"Input Dims": [], "External id": 1} + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::to", "pid": 13721, "tid": "123", + "ts": 510, "dur": 150, + "args": {"Input Dims": [[2, 8, 5], [], [], [], [], [], [], []], "External id": 3} + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::copy_", "pid": 13721, "tid": "123", + "ts": 520, "dur": 100, + "args": {"Input Dims": [[2, 8, 5], [], [], [], [], [], [], []], "External id": 3} + }, + + { + "ph": "X", "cat": "Operator", + "name": "aten::liner", "pid": 13721, "tid": "123", + "ts": 700, "dur": 100, + "args": {"Input Dims": [[2, 8, 5], [], [], [], [], [], [], []], "External id": 3} + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::t", "pid": 13721, "tid": "123", + "ts": 705, "dur": 40, + "args": {"Input Dims": [[2, 8, 5], [], [], [], [], [], [], []], "External id": 3} + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::transpose", "pid": 13721, "tid": "123", + "ts": 710, "dur": 30, + "args": {"Input Dims": [[2, 8, 5], [], [], [], [], [], [], []], "External id": 3} + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::tranas_stride", "pid": 13721, "tid": "123", + "ts": 720, "dur": 10, + "args": {"Input Dims": [[2, 8, 5], [], [], [], [], [], [], []], "External id": 3} + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::addmm", "pid": 13721, "tid": "123", + "ts": 750, "dur": 40, + "args": {"Input Dims": [[2, 8, 5], [], [], [], [], [], [], []], "External id": 3} + }, + { + "ph": "X", "cat": "Operator", + "name": "aten::to", "pid": 13721, "tid": "123", + "ts": 900, "dur": 100, + "args": {"Input Dims": [[2, 8, 5], [], [], [], [], [], [], []], "External id": 3} + }, + { + "ph": "X", "cat": "Memcpy", + "name": "Memcpy HtoD (Pageable -> Device)", "pid": 0, "tid": "stream 7", + "ts": 405, "dur": 10, + "args": {"stream": 7, "correlation": 334, "external id": 4} + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaMemcpyAsync", "pid": 13721, "tid": "456", + "ts": 360, "dur": 20, + "args": {"correlation": 334, "external id": 4} + }, + { + "ph": "X", "cat": "Memset", + "name": "Memset (Device)", "pid": 0, "tid": "stream 7", + "ts": 420, "dur": 5, + "args": {"stream": 7, "correlation": 40344, "external id": 4} + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaMemsetAsync", "pid": 13721, "tid": "456", + "ts": 390, "dur": 10, + "args": {"correlation": 40344, "external id": 4} + }, + { + "ph": "X", "cat": "Kernel", + "name": "void cunn_ClassNLLCriterion_updateGradInput_kernel", "pid": 0, "tid": "stream 7", + "ts": 430, "dur": 15, + "args": {"correlation": 40348, "external id": 4, "device": 0} + }, + { + "ph": "X", "cat": "Runtime", + "name": "cudaLaunchKernel", "pid": 13721, "tid": "456", + "ts": 405, "dur": 5, + "args": {"correlation": 40348, "external id": 4} + }, + + + { + "ph": "i", "s": "t", "name": "[memory] ignored by test before start time", + "pid": 13721, "tid": 123, + "ts": 90, + "args": { + "Device Type": 0, "Device Id": -1, "Bytes": 4 + } + }, + { + "ph": "i", "s": "t", "name": "[memory] ignored in ProfilerStep", + "pid": 13721, "tid": 123, + "ts": 150, + "args": { + "Device Type": 0, "Device Id": -1, "Bytes": 4 + } + }, + { + "ph": "i", "s": "t", "name": "[memory] belongs to aten::to", + "pid": 13721, "tid": 123, + "ts": 200, + "args": { + "Device Type": 0, "Device Id": -1, "Bytes": 4 + } + }, + { + "ph": "i", "s": "t", "name": "[memory] aten::to", + "pid": 13721, "tid": 123, + "ts": 210, + "args": { + "Device Type": 1, "Device Id": 0, "Bytes": 4 + } + }, + { + "ph": "i", "s": "t", "name": "[memory] ignored since beyond aten::to and before nll_loss_backward", + "pid": 13721, "tid": 123, + "ts": 265, + "args": { + "Device Type": 1, "Device Id": 0, "Bytes": 4 + } + }, + { + "ph": "i", "s": "t", "name": "[memory] ignored", + "pid": 13721, "tid": 123, + "ts": 300, + "args": { + "Device Type": 1, "Device Id": 0, "Bytes": 4 + } + }, + { + "ph": "i", "s": "t", "name": "[memory]", + "pid": 13721, "tid": 123, + "ts": 350, + "args": { + "Device Type": 1, "Device Id": 0, "Bytes": 10 + } + }, + { + "ph": "i", "s": "t", "name": "[memory]", + "pid": 13721, "tid": 123, + "ts": 360, + "args": { + "Device Type": 1, "Device Id": 0, "Bytes": -10 + } + }, + { + "ph": "i", "s": "t", "name": "[memory] ignored", + "pid": 13721, "tid": 123, + "ts": 450, + "args": { + "Device Type": 0, "Device Id": -1, "Bytes": 1000000 + } + }, + + { + "ph": "i", "s": "t", "name": "[memory]", + "pid": 13721, "tid": 123, + "ts": 515, + "args": { + "Device Type": 1, "Device Id": 0, "Bytes": 100 + } + }, + { + "ph": "i", "s": "t", "name": "[memory]", + "pid": 13721, "tid": 123, + "ts": 520, + "args": { + "Device Type": 1, "Device Id": 0, "Bytes": 100 + } + }, + { + "ph": "i", "s": "t", "name": "[memory]", + "pid": 13721, "tid": 123, + "ts": 600, + "args": { + "Device Type": 1, "Device Id": 0, "Bytes": -100 + } + }, + { + "ph": "i", "s": "t", "name": "[memory]ignored", + "pid": 13721, "tid": 123, + "ts": 690, + "args": { + "Device Type": 1, "Device Id": 0, "Bytes": 100 + } + }, + { + "ph": "i", "s": "t", "name": "[memory]", + "pid": 13721, "tid": 123, + "ts": 701, + "args": { + "Device Type": 1, "Device Id": 0, "Bytes": 100 + } + }, + { + "ph": "i", "s": "t", "name": "[memory]", + "pid": 13721, "tid": 123, + "ts": 795, + "args": { + "Device Type": 1, "Device Id": 0, "Bytes": -100 + } + }, + + { + "ph": "i", "s": "t", "name": "[memory]", + "pid": 13721, "tid": 123, + "ts": 708, + "args": { + "Device Type": 1, "Device Id": 0, "Bytes": 100 + } + }, + { + "ph": "i", "s": "t", "name": "[memory]", + "pid": 13721, "tid": 123, + "ts": 742, + "args": { + "Device Type": 1, "Device Id": 0, "Bytes": -100 + } + }, + { + "ph": "i", "s": "t", "name": "[memory]", + "pid": 13721, "tid": 123, + "ts": 715, + "args": { + "Device Type": 1, "Device Id": 0, "Bytes": 50 + } + }, + { + "ph": "i", "s": "t", "name": "[memory]", + "pid": 13721, "tid": 123, + "ts": 735, + "args": { + "Device Type": 1, "Device Id": 0, "Bytes": -50 + } + }, + { + "ph": "i", "s": "t", "name": "[memory]", + "pid": 13721, "tid": 123, + "ts": 725, + "args": { + "Device Type": 1, "Device Id": 0, "Bytes": 50 + } + }, + { + "ph": "i", "s": "t", "name": "[memory]", + "pid": 13721, "tid": 123, + "ts": 728, + "args": { + "Device Type": 1, "Device Id": 0, "Bytes": -50 + } + }, + { + "ph": "i", "s": "t", "name": "[memory]", + "pid": 13721, "tid": 123, + "ts": 729, + "args": { + "Device Type": 0, "Device Id": -1, "Bytes": 50 + } + }, + { + "ph": "i", "s": "t", "name": "[memory]", + "pid": 13721, "tid": 123, + "ts": 746, + "args": { + "Device Type": 0, "Device Id": -1, "Bytes": 100 + } + }, + { + "ph": "i", "s": "t", "name": "[memory]", + "pid": 13721, "tid": 123, + "ts": 747, + "args": { + "Device Type": 1, "Device Id": 0, "Bytes": 20 + } + }, + { + "ph": "i", "s": "t", "name": "[memory]", + "pid": 13721, "tid": 123, + "ts": 749, + "args": { + "Device Type": 0, "Device Id": -1, "Bytes": -100 + } + }, + { + "ph": "i", "s": "t", "name": "[memory]", + "pid": 13721, "tid": 123, + "ts": 760, + "args": { + "Device Type": 1, "Device Id": 0, "Bytes": 30 + } + }, + { + "ph": "i", "s": "t", "name": "[memory]", + "pid": 13721, "tid": 123, + "ts": 780, + "args": { + "Device Type": 1, "Device Id": 0, "Bytes": -30 + } + }, + { + "ph": "i", "s": "t", "name": "[memory]", + "pid": 13721, "tid": 123, + "ts": 795, + "args": { + "Device Type": 1, "Device Id": 0, "Bytes": 10 + } + }, + { + "ph": "i", "s": "t", "name": "[memory]", + "pid": 13721, "tid": 123, + "ts": 799, + "args": { + "Device Type": 1, "Device Id": 0, "Bytes": -10 + } + } + ] + """ + import logging + from torch_tb_profiler.utils import get_logger + logger = get_logger() + logger.addHandler(logging.StreamHandler()) + + profile = parse_json_trace(json_content) + profile.process() + + self.assertEqual(len(profile.memory_stats), 2) + self.assertEqual("GPU0" in profile.memory_stats, True) + + # validation + gpu_expected_data = { + # self increase size, self allocation size, self allocation count, increase size, allocation size, allocation count, + 'aten::to': [104, 104, 2, 104, 204, 3, 4], + 'aten::nll_loss_backward': [0, 10, 1, 0, 10, 1 ,1], + 'aten::copy_': [0, 100, 1, 0, 100, 1, 1], + 'aten::addmm': [0, 30, 1, 0, 30, 1, 1], + 'aten::tranas_stride': [0, 50, 1, 0, 50, 1, 1], + 'aten::transpose': [0, 50, 1, 0, 100, 2, 1], + 'aten::t': [0, 100, 1, 0, 200, 3, 1], + 'aten::liner': [20, 130, 3, 20, 360, 7, 1] + } + + cpu_expected_data = { + 'aten::to': [4, 4, 1, 4, 4, 1, 4], + 'aten::liner': [0, 100, 1, 50, 150, 2, 1], + 'aten::tranas_stride': [50, 50, 1, 50, 50, 1, 1], + 'aten::transpose': [0, 0, 0, 50, 50, 1, 1], + 'aten::t': [0, 0, 0, 50, 50, 1, 1] + } + validate_data = [ + (profile.memory_stats["CPU"], cpu_expected_data), + (profile.memory_stats["GPU0"], gpu_expected_data) + ] + for (mem_stat, expected_data) in validate_data: + for name, values in expected_data.items(): + # self increase size + self.assertEqual(mem_stat[name][0], values[0]) + # self allocation size + self.assertEqual(mem_stat[name][1], values[1]) + # self allocation count + self.assertEqual(mem_stat[name][2], values[2]) + self.assertEqual(mem_stat[name][3], values[3]) # increase size + # allocation size + self.assertEqual(mem_stat[name][4], values[4]) + # allocation count + self.assertEqual(mem_stat[name][5], values[5]) + # op calls + self.assertEqual(mem_stat[name][6], values[6]) if __name__ == '__main__': unittest.main() diff --git a/tb_plugin/test/test_tensorboard_end2end.py b/tb_plugin/test/test_tensorboard_end2end.py index cef0de93a..1f7597f43 100644 --- a/tb_plugin/test/test_tensorboard_end2end.py +++ b/tb_plugin/test/test_tensorboard_end2end.py @@ -1,24 +1,76 @@ +import json import os import socket import time +import unittest import urllib import urllib.request -import unittest from subprocess import Popen +from urllib.error import HTTPError class TestEnd2End(unittest.TestCase): + #def test_tensorboard_gs(self): + # test_folder = 'gs://pe-tests-public/tb_samples/' + # expected_runs = b'["resnet50_profiler_api_num_workers_0", "resnet50_profiler_api_num_workers_4"]' + # self._test_tensorboard_with_arguments(test_folder, expected_runs, {'TORCH_PROFILER_START_METHOD':'spawn'}) + def test_tensorboard_end2end(self): test_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)),'../samples') - tb = Popen(['tensorboard', '--logdir='+test_folder]) + expected_runs = b'["resnet50_num_workers_0", "resnet50_num_workers_4"]' + + print("starting spawn mode testing...") + self._test_tensorboard_with_arguments(test_folder, expected_runs, {'TORCH_PROFILER_START_METHOD':'spawn'}) + + def test_tensorboard_fork(self): + test_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)),'../samples') + expected_runs = b'["resnet50_num_workers_0", "resnet50_num_workers_4"]' + + print("starting fork mode testing") + self._test_tensorboard_with_arguments(test_folder, expected_runs) - run_link = "http://localhost:6006/data/plugin/pytorch_profiler/runs" + def test_tensorboard_with_path_prefix(self): + test_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)),'../samples') expected_runs = b'["resnet50_num_workers_0", "resnet50_num_workers_4"]' + self._test_tensorboard_with_arguments(test_folder, expected_runs, path_prefix='/tensorboard/viewer/') + + def _test_tensorboard_with_arguments(self, test_folder, expected_runs, env=None, path_prefix=None): host='localhost' - port=6006 + port=7007 - timeout = 60 + try: + if env: + env_copy = os.environ.copy() + env_copy.update(env) + env = env_copy + if not path_prefix: + tb = Popen(['tensorboard', '--logdir='+test_folder, '--port='+str(port)], env=env) + else: + tb = Popen(['tensorboard', '--logdir='+test_folder, '--port='+str(port), '--path_prefix='+path_prefix], env=env) + self._test_tensorboard(host, port, expected_runs, path_prefix) + finally: + pid = tb.pid + print("tensorboard process {} is terminating.".format(pid)) + tb.terminate() + + def _test_tensorboard(self, host, port, expected_runs, path_prefix): + if not path_prefix: + link_prefix = 'http://{}:{}/data/plugin/pytorch_profiler/'.format(host, port) + else: + path_prefix = path_prefix.strip('/') + link_prefix = 'http://{}:{}/{}/data/plugin/pytorch_profiler/'.format(host, port, path_prefix) + run_link = link_prefix + 'runs' + + expected_links_format=[ + link_prefix + 'overview?run={}&worker=worker0&span=1&view=Overview', + link_prefix + 'operation?run={}&worker=worker0&span=1&view=Operator&group_by=Operation', + link_prefix + 'operation/table?run={}&worker=worker0&span=1&view=Operator&group_by=Operation', + link_prefix + 'kernel/table?run={}&worker=worker0&span=1&view=Kernel&group_by=Kernel', + link_prefix + 'kernel?run={}&worker=worker0&span=1&view=Kernel&group_by=Kernel' + ] + + retry_times = 60 while True: try: socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port)) @@ -26,48 +78,47 @@ def test_tensorboard_end2end(self): break except socket.error: time.sleep(2) - timeout -= 1 - if timeout < 0: - tb.kill() - raise RuntimeError("tensorboard start timeout") + retry_times -= 1 + if retry_times < 0: + self.fail("tensorboard start timeout") continue - timeout = 60 + retry_times = 60 + while True: try: response = urllib.request.urlopen(run_link) - if response.read()==expected_runs: + data = response.read() + if data == expected_runs: break + if retry_times % 10 == 0: + print("receive mismatched data, retrying", data) time.sleep(2) - timeout -= 1 - if timeout<0: - tb.kill() - raise RuntimeError("Load run timeout") + retry_times -= 1 + if retry_times<0: + self.fail("Load run timeout") except Exception: - continue + if retry_times > 0: + continue + else: + raise - link_prefix = 'http://localhost:6006/data/plugin/pytorch_profiler/' - expected_links_format=[] - expected_links_format.append(link_prefix + 'overview?run={}&worker=worker0&view=Overview') - expected_links_format.append(link_prefix + 'operation?run={}&worker=worker0&view=Operator&group_by=Operation') - expected_links_format.append(link_prefix + 'operation/table?run={}&worker=worker0&view=Operator&group_by=Operation') - expected_links_format.append(link_prefix + 'kernel/table?run={}&worker=worker0&view=Kernel&group_by=Kernel') - expected_links_format.append(link_prefix + 'kernel?run={}&worker=worker0&view=Kernel&group_by=Kernel') links=[] - for run in ["resnet50_num_workers_0", - "resnet50_num_workers_4"]: + for run in json.loads(expected_runs): for expected_link in expected_links_format: links.append(expected_link.format(run)) - try: - with open('result_check_file.txt', 'r') as f: - lines=f.readlines() - i = 0 - for link in links: + with open('result_check_file.txt', 'r') as f: + lines=f.readlines() + i = 0 + print("starting testing...") + for link in links: + try: response = urllib.request.urlopen(link) self.assertEqual(response.read(), lines[i].strip().encode(encoding="utf-8")) i = i + 1 - self.assertEqual(i, 10) - finally: - tb.kill() + except HTTPError as e: + self.fail(e) + self.assertEqual(i, 10) + print("ending testing...") diff --git a/tb_plugin/torch_tb_profiler/__init__.py b/tb_plugin/torch_tb_profiler/__init__.py index 51b365adf..2f2b6acc4 100644 --- a/tb_plugin/torch_tb_profiler/__init__.py +++ b/tb_plugin/torch_tb_profiler/__init__.py @@ -3,3 +3,5 @@ # -------------------------------------------------------------------------- # Entry point for Pytorch TensorBoard plugin package. + +__version__ = "0.2.0" diff --git a/tb_plugin/torch_tb_profiler/consts.py b/tb_plugin/torch_tb_profiler/consts.py index 11a0804dc..1dbfc9fcf 100644 --- a/tb_plugin/torch_tb_profiler/consts.py +++ b/tb_plugin/torch_tb_profiler/consts.py @@ -1,22 +1,51 @@ # ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # -------------------------------------------------------------------------- - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - +import re from collections import namedtuple PLUGIN_NAME = "pytorch_profiler" -TRACE_FILE_SUFFIX = ".pt.trace.json" -TRACE_GZIP_FILE_SUFFIX = ".pt.trace.json.gz" +WORKER_PATTERN = re.compile(r"""^(.*?) # worker name + (\.\d+)? # optional timestamp like 1619499959628 used as span name + \.pt\.trace\.json # the ending suffix + (?:\.gz)?$""", re.X) # optional .gz extension +NODE_PROCESS_PATTERN = re.compile(r"""^(.*)_(\d+)""") MONITOR_RUN_REFRESH_INTERNAL_IN_SECONDS = 10 +MAX_GPU_PER_NODE = 64 View = namedtuple("View", "id, name, display_name") OVERALL_VIEW = View(1, "overall", "Overview") OP_VIEW = View(2, "operator", "Operator") KERNEL_VIEW = View(3, "kernel", "Kernel") TRACE_VIEW = View(4, "trace", "Trace") +DISTRIBUTED_VIEW = View(5, "distributed", "Distributed") +MEMORY_VIEW = View(6, "memory", "Memory") + +TOOLTIP_GPU_UTIL = \ + "GPU Utilization:\n" \ + "GPU busy time / All steps time. " \ + "GPU busy time is the time during which there is at least one GPU kernel running on it. " \ + "All steps time is the total time of all profiler steps(or called as iterations).\n" +TOOLTIP_SM_EFFICIENCY = \ + "Est. SM Efficiency:\n" \ + "Estimated Stream Multiprocessor Efficiency. " \ + "Est. SM Efficiency of a kernel, SM_Eff_K = min(blocks of this kernel / SM number of this GPU, 100%). " \ + "This overall number is the sum of all kernels' SM_Eff_K weighted by kernel's execution duration, " \ + "divided by all steps time.\n" +TOOLTIP_OCCUPANCY = \ + "Est. Achieved Occupancy:\n" \ + "Occupancy is the ratio of active threads on an SM " \ + "to the maximum number of active threads supported by the SM. " \ + "The theoretical occupancy of a kernel is upper limit occupancy of this kernel, " \ + "limited by multiple factors such as kernel shape, kernel used resource, " \ + "and the GPU compute capability." \ + "Est. Achieved Occupancy of a kernel, OCC_K = " \ + "min(threads of the kernel / SM number / max threads per SM, theoretical occupancy of the kernel). " \ + "This overall number is the weighted sum of all kernels OCC_K " \ + "using kernel's execution duration as weight." +TOOLTIP_BLOCKS_PER_SM = \ + "Blocks Per SM:\n" \ + "min(blocks of this kernel / SM number of this GPU). " \ + "If this number is less than 1, it indicates the GPU multiprocessors are not fully utilized." diff --git a/tb_plugin/torch_tb_profiler/io/__init__.py b/tb_plugin/torch_tb_profiler/io/__init__.py new file mode 100644 index 000000000..a3807634f --- /dev/null +++ b/tb_plugin/torch_tb_profiler/io/__init__.py @@ -0,0 +1,4 @@ +from .cache import Cache +from .file import (BaseFileSystem, StatData, abspath, basename, download_file, + exists, get_filesystem, glob, isdir, join, listdir, + makedirs, read, register_filesystem, relpath, walk) diff --git a/tb_plugin/torch_tb_profiler/io/azureblob.py b/tb_plugin/torch_tb_profiler/io/azureblob.py new file mode 100644 index 000000000..4a61a4170 --- /dev/null +++ b/tb_plugin/torch_tb_profiler/io/azureblob.py @@ -0,0 +1,190 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# ------------------------------------------------------------------------- +import os +import tempfile + +from azure.storage.blob import ContainerClient + +from .. import utils +from .base import BaseFileSystem, RemotePath, StatData +from .utils import as_bytes, as_text, parse_blob_url + +logger = utils.get_logger() + + +class AzureBlobSystem(RemotePath, BaseFileSystem): + """Provides filesystem access to S3.""" + + def __init__(self): + if not ContainerClient: + raise ImportError("azure-storage-blob must be installed for Azure Blob support.") + self.connection_string = os.environ.get("AZURE_STORAGE_CONNECTION_STRING", None) + + def exists(self, dirname): + """Returns whether the path is a directory or not.""" + basename, parts = self.split_blob_path(dirname) + if basename is None or parts is None: + return False + if basename == "": + # root container case + return True + else: + return basename == parts[0] + + def read(self, filename, binary_mode=False, size=None, continue_from=None): + """Reads contents of a file to a string.""" + logger.info("azure blob: starting reading file %s" % filename) + account, container, path = self.container_and_path(filename) + client = self.create_container_client(account, container) + blob_client = client.get_blob_client(path) + if not blob_client.exists(): + raise FileNotFoundError("file %s doesn't exist!" % path) + + downloader = blob_client.download_blob(offset=continue_from, length=size) + if continue_from is not None: + continuation_token = continue_from + downloader.size + else: + continuation_token = downloader.size + + data = downloader.readall() + logger.info("azure blob: file %s download is done, size is %d" % (filename, len(data))) + if binary_mode: + return as_bytes(data), continuation_token + else: + return as_text(data), continuation_token + + def write(self, filename, file_content, binary_mode=False): + """Writes string file contents to a file.""" + account, container, path = self.container_and_path(filename) + client = self.create_container_client(account, container) + + if binary_mode: + if not isinstance(file_content, bytes): + raise TypeError("File content type must be bytes") + else: + file_content = as_bytes(file_content) + client.upload_blob(path, file_content) + + def download_file(self, filename): + fp = tempfile.NamedTemporaryFile('w+t', suffix='.%s' % self.basename(filename), delete=False) + fp.close() + + logger.info("azure blob: starting downloading file %s as %s" % (filename, fp.name)) + account, container, path = self.container_and_path(filename) + client = self.create_container_client(account, container) + blob_client = client.get_blob_client(path) + if not blob_client.exists(): + raise FileNotFoundError("file %s doesn't exist!" % path) + + downloader = blob_client.download_blob() + with open(fp.name, 'wb') as downloaded_file: + data = downloader.readall() + downloaded_file.write(data) + logger.info("azure blob: file %s is downloaded as %s, size is %d" % (filename, fp.name, len(data))) + return fp.name + + def glob(self, filename): + """Returns a list of files that match the given pattern(s).""" + # Only support prefix with * at the end and no ? in the string + star_i = filename.find("*") + quest_i = filename.find("?") + if quest_i >= 0: + raise NotImplementedError( + "{} not supported by compat glob".format(filename) + ) + if star_i != len(filename) - 1: + return [] + + filename = filename[:-1] + + account, container, path = self.container_and_path(filename) + client = self.create_container_client(account, container) + blobs = client.list_blobs(name_starts_with=path) + return [blob.name for blob in blobs] + + def isdir(self, dirname): + """Returns whether the path is a directory or not.""" + basename, parts = self.split_blob_path(dirname) + if basename is None or parts is None: + return False + if basename == "": + # root container case + return True + else: + return basename == parts[0] and len(parts) > 1 + + def listdir(self, dirname): + """Returns a list of entries contained within a directory.""" + account, container, path = self.container_and_path(dirname) + client = self.create_container_client(account, container) + blob_iter = client.list_blobs(name_starts_with=path) + items = [] + for blob in blob_iter: + item = self.relpath(blob.name, path) + if items not in items: + items.append(item) + return items + + def makedirs(self, dirname): + """No need create directory since the upload blob will automatically create""" + pass + + def stat(self, filename): + """Returns file statistics for a given path.""" + account, container, path = self.container_and_path(filename) + client = self.create_container_client(account, container) + blob_client = client.get_blob_client(path) + props = blob_client.get_blob_properties() + return StatData(props.size) + + def walk(self, top, topdown=True, onerror=None): + account, container, path = self.container_and_path(top) + client = self.create_container_client(account, container) + blobs = client.list_blobs(name_starts_with=path) + results = {} + for blob in blobs: + dirname, basename = self.split(blob.name) + dirname = "https://{}/{}/{}".format(account, container, dirname) + results.setdefault(dirname, []).append(basename) + for key, value in results.items(): + yield key, None, value + + def split_blob_path(self, blob_path): + """ Find the first blob start with blob_path, then get the relative path starting from dirname(blob_path). Finally, split the relative path. + return (basename(blob_path), [relative splitted paths]) + If blob_path doesn't exist, return (None, None) + For example, + For blob https://trainingdaemon.blob.core.windows.net/tests/test1/test2/test.txt + * If the blob_path is '', return ('', [test1, test2, test.txt]) + * If the blob_path is test1, return (test1, [test2, test.txt]) + * If the blob_path is test1/test2, return (test2, [test2, test.txt]) + * If the blob_path is test1/test2/test.txt, return (test.txt, [test.txt]) + """ + account, container, path = self.container_and_path(blob_path) + client = self.create_container_client(account, container) + blobs = client.list_blobs(name_starts_with=path, maxresults=1) + + for blob in blobs: + dir_path, basename = self.split(path) + if dir_path: + rel_path = blob.name[len(dir_path):] + parts = rel_path.lstrip('/').split('/') + else: + parts = blob.name.split('/') + return (basename, parts) + return (None, None) + + def container_and_path(self, url): + """Split an Azure blob -prefixed URL into container and blob path.""" + root, parts = parse_blob_url(url) + if len(parts) != 2: + raise ValueError("Invalid azure blob url %s" % url) + return root, parts[0], parts[1] + + def create_container_client(self, account, container): + if self.connection_string: + client = ContainerClient.from_connection_string(self.connection_string, container) + else: + client = ContainerClient.from_container_url("https://{}/{}".format(account, container)) + return client diff --git a/tb_plugin/torch_tb_profiler/io/base.py b/tb_plugin/torch_tb_profiler/io/base.py new file mode 100644 index 000000000..2190522c4 --- /dev/null +++ b/tb_plugin/torch_tb_profiler/io/base.py @@ -0,0 +1,109 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# ------------------------------------------------------------------------- +import os +from abc import ABC, abstractmethod +from collections import namedtuple + +# Data returned from the Stat call. +StatData = namedtuple("StatData", ["length"]) + + +class BaseFileSystem(ABC): + def support_append(self): + return False + + def append(self, filename, file_content, binary_mode=False): + pass + + def download_file(self, filename): + return filename + + @abstractmethod + def exists(self, filename): + raise NotImplementedError + + @abstractmethod + def read(self, file, binary_mode=False, size=None, continue_from=None): + raise NotImplementedError + + @abstractmethod + def write(self, filename, file_content, binary_mode=False): + raise NotImplementedError + + @abstractmethod + def glob(self, filename): + raise NotImplementedError + + @abstractmethod + def isdir(self, dirname): + raise NotImplementedError + + @abstractmethod + def listdir(self, dirname): + raise NotImplementedError + + @abstractmethod + def makedirs(self, path): + raise NotImplementedError + + @abstractmethod + def stat(self, filename): + raise NotImplementedError + +class BasePath(ABC): + @abstractmethod + def join(self, path, *paths): + pass + + @abstractmethod + def abspath(self, path): + pass + + @abstractmethod + def basename(self, path): + pass + + @abstractmethod + def relpath(self, path, start): + pass + +class LocalPath(BasePath): + def abspath(self, path): + return os.path.abspath(os.path.expanduser(os.path.expandvars(path))) + + def basename(self, path): + return os.path.basename(path) + + def relpath(self, path, start): + return os.path.relpath(path, start) + + def join(self, path, *paths): + return os.path.join(path, *paths) + +class RemotePath(BasePath): + def split(self, path): + """Split a pathname. Returns tuple "(head, tail)" where "tail" is + everything after the final slash. Either part may be empty.""" + sep = '/' + i = path.rfind(sep) + 1 + head, tail = path[:i], path[i:] + head = head.rstrip(sep) + return (head, tail) + + def join(self, path, *paths): + """Join paths with a slash.""" + return "/".join((path,) + paths) + + def abspath(self, path): + return path + + def basename(self, path): + return path.split('/')[-1] + + def relpath(self, path, start): + if not path.startswith(start): + return path + start = start.rstrip('/') + begin = len(start) + 1 # include the ending slash '/' + return path[begin:] diff --git a/tb_plugin/torch_tb_profiler/io/cache.py b/tb_plugin/torch_tb_profiler/io/cache.py new file mode 100644 index 000000000..a4297075d --- /dev/null +++ b/tb_plugin/torch_tb_profiler/io/cache.py @@ -0,0 +1,76 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# ------------------------------------------------------------------------- +import multiprocessing as mp +import os + +from .. import utils +from .file import download_file, read + +logger = utils.get_logger() + +class Cache: + def __init__(self): + self._lock = mp.Lock() + self._manager = mp.Manager() + self._cache_dict = self._manager.dict() + + def __getstate__(self): + '''The multiprocessing module can start one of three ways: spawn, fork, or forkserver. + The default mode is fork in Unix and spawn on Windows and macOS. + Therefore, the __getstate__ and __setstate__ are used to pickle/unpickle the state in spawn mode. + ''' + data = self.__dict__.copy() + # remove the _manager to bypass the following pickle error + # TypeError: cannot pickle 'weakref' object + if hasattr(self, '_manager'): + del data['_manager'] + logger.debug("Cache.__getstate__: %s " % data) + return data + + def __setstate__(self, state): + '''The default logging level in new process is warning. Only warning and error log can be written to + streams. + So, we need call use_absl_handler in the new process. + ''' + from absl import logging + logging.use_absl_handler() + logger.debug("Cache.__setstate__ %s " % state) + self.__dict__.update(state) + + def read(self, filename): + local_file = self.get_remote_cache(filename) + return read(local_file) + + def get_remote_cache(self, filename): + '''Try to get the local file in the cache. download it to local if it cannot be found in cache.''' + local_file = self.get_file(filename) + if local_file is None: + local_file = download_file(filename) + # skip the cache for local files + if local_file != filename: + self.add_file(filename, local_file) + + return local_file + + def get_file(self, filename): + return self._cache_dict.get(filename) + + def add_file(self, source_file, local_file): + with self._lock: + logger.debug("add local cache %s for file %s" % (local_file, source_file)) + self._cache_dict[source_file] = local_file + + def close(self): + for key, value in self._cache_dict.items(): + if key != value: + logger.info("remove temporary file %s" % value) + os.remove(value) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + self._manager.__exit__(exc_type, exc_value, traceback) + diff --git a/tb_plugin/torch_tb_profiler/io/file.py b/tb_plugin/torch_tb_profiler/io/file.py new file mode 100644 index 000000000..83d16e702 --- /dev/null +++ b/tb_plugin/torch_tb_profiler/io/file.py @@ -0,0 +1,602 @@ +''' +This file is forked from https://github.com/tensorflow/tensorboard/blob/master/tensorboard/compat/tensorflow_stub/io/gfile.py. +The following functionalities are added after forking: +* Check Azure Blob & Google Cloud available or not +* get_filesystem changes to support Azure Blobs +* add BaseFileSystem and PathBase abstracted class for the filesystem. +* add download_file for each file system to cache the remote file to local temporary folder. +* add AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY for S3 file system which is not supported by tensorboard. +* add Azure blob file system +* add Google Cloud file system +* add specialized walk for Local file system, Azure Blob and Google Cloud to improve the walk performance. +* add global wrapper for abspath, basename, join, download_file. +* change the global walk wrapper to support specialized walk. +''' +import glob as py_glob +import os +import tempfile + +from .. import utils +from .base import BaseFileSystem, LocalPath, RemotePath, StatData +from .utils import as_bytes, as_text, parse_blob_url + +logger = utils.get_logger() + +try: + import boto3 + import botocore.exceptions + + S3_ENABLED = True +except ImportError: + S3_ENABLED = False + +try: + from azure.storage.blob import ContainerClient + BLOB_ENABLED = True +except ImportError: + BLOB_ENABLED = False + +try: + # Imports the Google Cloud client library + from google.cloud import storage + GS_ENABLED = True +except ImportError: + GS_ENABLED = False + + +_DEFAULT_BLOCK_SIZE = 16 * 1024 * 1024 + +# Registry of filesystems by prefix. +# +# Currently supports: +# * "s3://" URLs for S3 based on boto3 +# * "https://.blob.core.windows.net" for Azure Blob based on azure-storage-blob +# * "gs://" URLs for Google Cloud based on google-cloud-storage +# * Local filesystem when not match any prefix. +_REGISTERED_FILESYSTEMS = {} + + +def register_filesystem(prefix, filesystem): + if ":" in prefix: + raise ValueError("Filesystem prefix cannot contain a :") + _REGISTERED_FILESYSTEMS[prefix] = filesystem + + +def get_filesystem(filename): + """Return the registered filesystem for the given file.""" + prefix = "" + index = filename.find("://") + if index >= 0: + prefix = filename[:index] + if prefix.upper() in ('HTTP', 'HTTPS'): + root, _ = parse_blob_url(filename) + if root.lower().endswith('.blob.core.windows.net'): + fs = _REGISTERED_FILESYSTEMS.get('blob', None) + else: + raise ValueError("Not supported file system for prefix %s" % root) + else: + fs = _REGISTERED_FILESYSTEMS.get(prefix, None) + if fs is None: + raise ValueError("No recognized filesystem for prefix %s" % prefix) + return fs + + +class LocalFileSystem(LocalPath, BaseFileSystem): + def __init__(self): + pass + + def exists(self, filename): + return os.path.exists(filename) + + def read(self, filename, binary_mode=False, size=None, continue_from=None): + mode = "rb" if binary_mode else "r" + encoding = None if binary_mode else "utf8" + if not self.exists(filename): + raise FileNotFoundError(filename) + + offset = None + if continue_from is not None: + offset = continue_from.get("opaque_offset", None) + with open(filename, mode, encoding=encoding) as f: + if offset is not None: + f.seek(offset) + data = f.read(size) + # The new offset may not be `offset + len(data)`, due to decoding + # and newline translation. + # So, just measure it in whatever terms the underlying stream uses. + continuation_token = {"opaque_offset": f.tell()} + return (data, continuation_token) + + def write(self, filename, file_content, binary_mode=False): + """Writes string file contents to a file, overwriting any existing contents. + """ + self._write(filename, file_content, "wb" if binary_mode else "w") + + def support_append(self): + return True + + def append(self, filename, file_content, binary_mode=False): + """Append string file contents to a file. + """ + self._write(filename, file_content, "ab" if binary_mode else "a") + + def _write(self, filename, file_content, mode): + encoding = None if "b" in mode else "utf8" + with open(filename, mode, encoding=encoding) as f: + compatify = as_bytes if "b" in mode else as_text + f.write(compatify(file_content)) + + def glob(self, filename): + """Returns a list of files that match the given pattern(s).""" + if isinstance(filename, str): + return [ + matching_filename + for matching_filename in py_glob.glob(filename) + ] + else: + return [ + matching_filename + for single_filename in filename + for matching_filename in py_glob.glob(single_filename) + ] + + def isdir(self, dirname): + return os.path.isdir(dirname) + + def listdir(self, dirname): + entries = os.listdir(dirname) + entries = [item for item in entries] + return entries + + def makedirs(self, path): + os.makedirs(path, exist_ok=True) + + def stat(self, filename): + """Returns file statistics for a given path.""" + # NOTE: Size of the file is given by .st_size as returned from + # os.stat(), but we convert to .length + file_length = os.stat(filename).st_size + return StatData(file_length) + + def walk(self, top, topdown=True, onerror=None): + yield from os.walk(top, topdown, onerror) + +class S3FileSystem(RemotePath, BaseFileSystem): + """Provides filesystem access to S3.""" + + def __init__(self): + if not boto3: + raise ImportError("boto3 must be installed for S3 support.") + self._s3_endpoint = os.environ.get("S3_ENDPOINT", None) + access_key = os.environ.get("AWS_ACCESS_KEY_ID") + secret_key = os.environ.get("AWS_SECRET_ACCESS_KEY") + if access_key and secret_key: + boto3.setup_default_session( + aws_access_key_id=access_key, aws_secret_access_key=secret_key) + + def bucket_and_path(self, url): + """Split an S3-prefixed URL into bucket and path.""" + if url.startswith("s3://"): + url = url[len("s3://"):] + idx = url.index("/") + bucket = url[:idx] + path = url[(idx + 1):] + return bucket, path + + def exists(self, filename): + """Determines whether a path exists or not.""" + client = boto3.client("s3", endpoint_url=self._s3_endpoint) + bucket, path = self.bucket_and_path(filename) + r = client.list_objects(Bucket=bucket, Prefix=path, Delimiter="/") + if r.get("Contents") or r.get("CommonPrefixes"): + return True + return False + + def read(self, filename, binary_mode=False, size=None, continue_from=None): + """Reads contents of a file to a string.""" + s3 = boto3.resource("s3", endpoint_url=self._s3_endpoint) + bucket, path = self.bucket_and_path(filename) + args = {} + + # S3 use continuation tokens of the form: {byte_offset: number} + offset = 0 + if continue_from is not None: + offset = continue_from.get("byte_offset", 0) + + endpoint = "" + if size is not None: + endpoint = offset + size + + if offset != 0 or endpoint != "": + args["Range"] = "bytes={}-{}".format(offset, endpoint) + + logger.info("s3: starting reading file %s" % filename) + try: + stream = s3.Object(bucket, path).get(**args)["Body"].read() + except botocore.exceptions.ClientError as exc: + if exc.response["Error"]["Code"] in ["416", "InvalidRange"]: + if size is not None: + # Asked for too much, so request just to the end. Do this + # in a second request so we don't check length in all cases. + client = boto3.client("s3", endpoint_url=self._s3_endpoint) + obj = client.head_object(Bucket=bucket, Key=path) + content_length = obj["ContentLength"] + endpoint = min(content_length, offset + size) + if offset == endpoint: + # Asked for no bytes, so just return empty + stream = b"" + else: + args["Range"] = "bytes={}-{}".format(offset, endpoint) + stream = s3.Object(bucket, path).get(**args)["Body"].read() + else: + raise + + logger.info("s3: file %s download is done, size is %d" % + (filename, len(stream))) + # `stream` should contain raw bytes here (i.e., there has been neither decoding nor newline translation), + # so the byte offset increases by the expected amount. + continuation_token = {"byte_offset": (offset + len(stream))} + if binary_mode: + return (bytes(stream), continuation_token) + else: + return (stream.decode("utf-8"), continuation_token) + + def write(self, filename, file_content, binary_mode=False): + """Writes string file contents to a file.""" + client = boto3.client("s3", endpoint_url=self._s3_endpoint) + bucket, path = self.bucket_and_path(filename) + if binary_mode: + if not isinstance(file_content, bytes): + raise TypeError("File content type must be bytes") + else: + file_content = as_bytes(file_content) + client.put_object(Body=file_content, Bucket=bucket, Key=path) + + def download_file(self, filename): + fp = tempfile.NamedTemporaryFile( + 'w+t', suffix='.%s' % self.basename(filename), delete=False) + fp.close() + + logger.info("s3: starting downloading file %s as %s" % + (filename, fp.name)) + # Use boto3.resource instead of boto3.client('s3') to support minio. + # https://docs.min.io/docs/how-to-use-aws-sdk-for-python-with-minio-server.html + # To support minio, the S3_ENDPOINT need to be set like: S3_ENDPOINT=http://localhost:9000 + s3 = boto3.resource("s3", endpoint_url=self._s3_endpoint) + bucket, path = self.bucket_and_path(filename) + s3.Bucket(bucket).download_file(path, fp.name) + logger.info("s3: file %s is downloaded as %s" % + (filename, fp.name)) + return fp.name + + def glob(self, filename): + """Returns a list of files that match the given pattern(s).""" + # Only support prefix with * at the end and no ? in the string + star_i = filename.find("*") + quest_i = filename.find("?") + if quest_i >= 0: + raise NotImplementedError("{} not supported".format(filename)) + if star_i != len(filename) - 1: + return [] + + filename = filename[:-1] + client = boto3.client("s3", endpoint_url=self._s3_endpoint) + bucket, path = self.bucket_and_path(filename) + p = client.get_paginator("list_objects") + keys = [] + for r in p.paginate(Bucket=bucket, Prefix=path): + for o in r.get("Contents", []): + key = o["Key"][len(path):] + if key: + keys.append(filename + key) + return keys + + def isdir(self, dirname): + """Returns whether the path is a directory or not.""" + client = boto3.client("s3", endpoint_url=self._s3_endpoint) + bucket, path = self.bucket_and_path(dirname) + if not path.endswith("/"): + path += "/" + r = client.list_objects(Bucket=bucket, Prefix=path, Delimiter="/") + if r.get("Contents") or r.get("CommonPrefixes"): + return True + return False + + def listdir(self, dirname): + """Returns a list of entries contained within a directory.""" + client = boto3.client("s3", endpoint_url=self._s3_endpoint) + bucket, path = self.bucket_and_path(dirname) + p = client.get_paginator("list_objects") + if not path.endswith("/"): + path += "/" + keys = [] + for r in p.paginate(Bucket=bucket, Prefix=path, Delimiter="/"): + keys.extend( + o["Prefix"][len(path): -1] for o in r.get("CommonPrefixes", []) + ) + for o in r.get("Contents", []): + key = o["Key"][len(path):] + if key: + keys.append(key) + return keys + + def makedirs(self, dirname): + """Creates a directory and all parent/intermediate directories.""" + if not self.exists(dirname): + client = boto3.client("s3", endpoint_url=self._s3_endpoint) + bucket, path = self.bucket_and_path(dirname) + if not path.endswith("/"): + path += "/" + client.put_object(Body="", Bucket=bucket, Key=path) + + def stat(self, filename): + """Returns file statistics for a given path.""" + # Size of the file is given by ContentLength from S3 + client = boto3.client("s3", endpoint_url=self._s3_endpoint) + bucket, path = self.bucket_and_path(filename) + + obj = client.head_object(Bucket=bucket, Key=path) + return StatData(obj["ContentLength"]) + + +register_filesystem("", LocalFileSystem()) +if S3_ENABLED: + register_filesystem("s3", S3FileSystem()) + +if BLOB_ENABLED: + from .azureblob import AzureBlobSystem + register_filesystem("blob", AzureBlobSystem()) + +if GS_ENABLED: + from .gs import GoogleBlobSystem + register_filesystem("gs", GoogleBlobSystem()) + +class File(object): + def __init__(self, filename, mode): + if mode not in ("r", "rb", "br", "w", "wb", "bw"): + raise ValueError("mode {} not supported by File".format(mode)) + self.filename = filename + self.fs = get_filesystem(self.filename) + self.fs_supports_append = self.fs.support_append() + self.buff = None + self.buff_chunk_size = _DEFAULT_BLOCK_SIZE + self.buff_offset = 0 + self.continuation_token = None + self.write_temp = None + self.write_started = False + self.binary_mode = "b" in mode + self.write_mode = "w" in mode + self.closed = False + + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + self.buff = None + self.buff_offset = 0 + self.continuation_token = None + + def __iter__(self): + return self + + def _read_buffer_to_offset(self, new_buff_offset): + old_buff_offset = self.buff_offset + read_size = min(len(self.buff), new_buff_offset) - old_buff_offset + self.buff_offset += read_size + return self.buff[old_buff_offset : old_buff_offset + read_size] + + def read(self, n=None): + """Reads contents of file to a string. + + Args: + n: int, number of bytes or characters to read, otherwise + read all the contents of the file + + Returns: + Subset of the contents of the file as a string or bytes. + """ + if self.write_mode: + raise OSError("File not opened in read mode") + + result = None + if self.buff and len(self.buff) > self.buff_offset: + # read from local buffer + if n is not None: + chunk = self._read_buffer_to_offset(self.buff_offset + n) + if len(chunk) == n: + return chunk + result = chunk + n -= len(chunk) + else: + # add all local buffer and update offsets + result = self._read_buffer_to_offset(len(self.buff)) + + # read from filesystem + read_size = max(self.buff_chunk_size, n) if n is not None else None + (self.buff, self.continuation_token) = self.fs.read(self.filename, self.binary_mode, read_size, self.continuation_token) + self.buff_offset = 0 + + # add from filesystem + if n is not None: + chunk = self._read_buffer_to_offset(n) + else: + # add all local buffer and update offsets + chunk = self._read_buffer_to_offset(len(self.buff)) + result = result + chunk if result else chunk + + return result + + def write(self, file_content): + """Writes string file contents to file, clearing contents of the file + on first write and then appending on subsequent calls. + """ + if not self.write_mode: + raise OSError("File not opened in write mode") + + if self.closed: + raise OSError("File already closed") + + if self.fs_supports_append: + if not self.write_started: + # write the first chunk to truncate file if it already exists + self.fs.write(self.filename, file_content, self.binary_mode) + self.write_started = True + else: + # append the later chunks + self.fs.append(self.filename, file_content, self.binary_mode) + else: + # add to temp file, but wait for flush to write to final filesystem + if self.write_temp is None: + mode = "w+b" if self.binary_mode else "w+" + self.write_temp = tempfile.TemporaryFile(mode) + + compatify = as_bytes if self.binary_mode else as_text + self.write_temp.write(compatify(file_content)) + + def __next__(self): + line = None + while True: + if not self.buff: + # read one unit into the buffer + line = self.read(1) + if line and (line[-1] == "\n" or not self.buff): + return line + if not self.buff: + raise StopIteration() + else: + index = self.buff.find("\n", self.buff_offset) + if index != -1: + # include line until now plus newline + chunk = self.read(index + 1 - self.buff_offset) + line = line + chunk if line else chunk + return line + + # read one unit past end of buffer + chunk = self.read(len(self.buff) + 1 - self.buff_offset) + line = line + chunk if line else chunk + if line and (line[-1] == "\n" or not self.buff): + return line + if not self.buff: + raise StopIteration() + + def next(self): + return self.__next__() + + def flush(self): + if self.closed: + raise OSError("File already closed") + + if not self.fs_supports_append: + if self.write_temp is not None: + # read temp file from the beginning + self.write_temp.flush() + self.write_temp.seek(0) + chunk = self.write_temp.read() + if chunk is not None: + # write full contents and keep in temp file + self.fs.write(self.filename, chunk, self.binary_mode) + self.write_temp.seek(len(chunk)) + + def close(self): + self.flush() + if self.write_temp is not None: + self.write_temp.close() + self.write_temp = None + self.write_started = False + self.closed = True + + +def exists(filename): + """Determines whether a path exists or not.""" + return get_filesystem(filename).exists(filename) + +def abspath(path): + return get_filesystem(path).abspath(path) + +def basename(path): + return get_filesystem(path).basename(path) + +def relpath(path, start): + return get_filesystem(path).relpath(path, start) + +def join(path, *paths): + return get_filesystem(path).join(path, *paths) + +def download_file(filename): + """Downloads the file, returning a temporary path to the file after finishing.""" + return get_filesystem(filename).download_file(filename) + +def glob(filename): + """Returns a list of files that match the given pattern(s).""" + return get_filesystem(filename).glob(filename) + +def isdir(dirname): + """Returns whether the path is a directory or not.""" + return get_filesystem(dirname).isdir(dirname) + +def listdir(dirname): + """Returns a list of entries contained within a directory. + + The list is in arbitrary order. It does not contain the special entries "." + and "..". + """ + return get_filesystem(dirname).listdir(dirname) + +def makedirs(path): + """Creates a directory and all parent/intermediate directories.""" + return get_filesystem(path).makedirs(path) + +def walk(top, topdown=True, onerror=None): + """Recursive directory tree generator for directories. + + Args: + top: string, a Directory name + topdown: bool, Traverse pre order if True, post order if False. + onerror: optional handler for errors. Should be a function, it will be + called with the error as argument. Rethrowing the error aborts the walk. + + Errors that happen while listing directories are ignored. + + Yields: + Each yield is a 3-tuple: the pathname of a directory, followed by lists + of all its subdirectories and leaf files. + (dirname, [subdirname, subdirname, ...], [filename, filename, ...]) + as strings + """ + fs = get_filesystem(top) + if hasattr(fs, "walk"): + yield from fs.walk(top, topdown, onerror) + else: + top = fs.abspath(top) + listing = fs.listdir(top) + + files = [] + subdirs = [] + for item in listing: + full_path = fs.join(top, item) + if fs.isdir(full_path): + subdirs.append(item) + else: + files.append(item) + + here = (top, subdirs, files) + + if topdown: + yield here + + for subdir in subdirs: + joined_subdir = fs.join(top, subdir) + for subitem in walk(joined_subdir, topdown, onerror=onerror): + yield subitem + + if not topdown: + yield here + +def stat(filename): + """Returns file statistics for a given path.""" + return get_filesystem(filename).stat(filename) + +def read(file): + with File(file, 'rb') as f: + return f.read() diff --git a/tb_plugin/torch_tb_profiler/io/gs.py b/tb_plugin/torch_tb_profiler/io/gs.py new file mode 100644 index 000000000..5fc772611 --- /dev/null +++ b/tb_plugin/torch_tb_profiler/io/gs.py @@ -0,0 +1,131 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# ------------------------------------------------------------------------- +import tempfile + +from google.cloud import storage + +from .. import utils +from .base import BaseFileSystem, RemotePath, StatData + +logger = utils.get_logger() + + +class GoogleBlobSystem(RemotePath, BaseFileSystem): + """Provides filesystem access to S3.""" + + def __init__(self): + if not storage: + raise ImportError("google-cloud-storage must be installed for Google Cloud Blob support.") + + def exists(self, dirname): + """Returns whether the path is a directory or not.""" + bucket_name, path = self.bucket_and_path(dirname) + client = self.create_google_cloud_client() + bucket = client.bucket(bucket_name) + return bucket.blob(path).exists() + + def read(self, filename, binary_mode=False, size=None, continue_from=None): + raise NotImplementedError + + def write(self, filename, file_content, binary_mode=False): + raise NotImplementedError + + def glob(self, filename): + raise NotImplementedError + + def download_file(self, filename): + fp = tempfile.NamedTemporaryFile('w+t', suffix='.%s' % self.basename(filename), delete=False) + fp.close() + bucket_name, path = self.bucket_and_path(filename) + client = self.create_google_cloud_client() + bucket = client.bucket(bucket_name) + blob = bucket.blob(path) + blob.download_to_filename(fp.name) + return fp.name + + def isdir(self, dirname): + """Returns whether the path is a directory or not.""" + basename, parts = self.split_blob_path(dirname) + if basename is None or parts is None: + return False + if basename == "": + # root container case + return True + else: + return basename == parts[0] and len(parts) > 1 + + def listdir(self, dirname): + """Returns a list of entries contained within a directory.""" + bucket_name, path = self.bucket_and_path(dirname) + client = self.create_google_cloud_client() + blobs = client.list_blobs(bucket_name, prefix=path) + items = [] + for blob in blobs: + item = self.relpath(blob.name, path) + if items not in items: + items.append(item) + return items + + def makedirs(self, dirname): + """No need create directory since the upload blob will automatically create""" + pass + + def stat(self, filename): + """Returns file statistics for a given path.""" + bucket_name, path = self.bucket_and_path(filename) + client = self.create_google_cloud_client() + bucket = client.bucket(bucket_name) + blob = bucket.get_blob(path) + return StatData(blob.size) + + def walk(self, top, topdown=True, onerror=None): + bucket_name, path = self.bucket_and_path(top) + client = self.create_google_cloud_client() + blobs = client.list_blobs(bucket_name, prefix=path) + results = {} + for blob in blobs: + dirname, basename = self.split(blob.name) + dirname = "gs://{}/{}".format(bucket_name, dirname) + results.setdefault(dirname, []).append(basename) + for key, value in results.items(): + yield key, None, value + + def split_blob_path(self, blob_path): + """ Find the first blob start with blob_path, then get the relative path starting from dirname(blob_path). Finally, split the relative path. + return (basename(blob_path), [relative splitted paths]) + If blob_path doesn't exist, return (None, None) + For example, + For blob gs://tests/test1/test2/test.txt + * If the blob_path is '', return ('', [test1, test2, test.txt]) + * If the blob_path is test1, return (test1, [test2, test.txt]) + * If the blob_path is test1/test2, return (test2, [test2, test.txt]) + * If the blob_path is test1/test2/test.txt, return (test.txt, [test.txt]) + """ + bucket_name, path = self.bucket_and_path(blob_path) + client = self.create_google_cloud_client() + blobs = client.list_blobs(bucket_name, prefix=path, delimiter=None, max_results=1) + + for blob in blobs: + dir_path, basename = self.split(path) + if dir_path: + rel_path = blob.name[len(dir_path):] + parts = rel_path.lstrip('/').split('/') + else: + parts = blob.name.split('/') + return (basename, parts) + return (None, None) + + def bucket_and_path(self, url): + """Split an S3-prefixed URL into bucket and path.""" + if url.startswith("gs://"): + url = url[len("gs://"):] + idx = url.index("/") + bucket = url[:idx] + path = url[(idx + 1):] + return bucket, path + + def create_google_cloud_client(self): + # TODO: support client with credential? + client = storage.Client.create_anonymous_client() + return client diff --git a/tb_plugin/torch_tb_profiler/io/utils.py b/tb_plugin/torch_tb_profiler/io/utils.py new file mode 100644 index 000000000..56bf93fae --- /dev/null +++ b/tb_plugin/torch_tb_profiler/io/utils.py @@ -0,0 +1,69 @@ +def as_str_any(value): + """Converts to `str` as `str(value)`, but use `as_str` for `bytes`. + + Args: + value: A object that can be converted to `str`. + + Returns: + A `str` object. + """ + if isinstance(value, bytes): + return as_str(value) + else: + return str(value) + +def as_text(bytes_or_text, encoding="utf-8"): + """Returns the given argument as a unicode string. + + Args: + bytes_or_text: A `bytes`, `str`, or `unicode` object. + encoding: A string indicating the charset for decoding unicode. + + Returns: + A `str` (Python 3) object. + + Raises: + TypeError: If `bytes_or_text` is not a binary or unicode string. + """ + if isinstance(bytes_or_text, str): + return bytes_or_text + elif isinstance(bytes_or_text, bytes): + return bytes_or_text.decode(encoding) + else: + raise TypeError( + "Expected binary or unicode string, got %r" % bytes_or_text + ) + + +# Convert an object to a `str` in both Python 2 and 3. +as_str = as_text + +def as_bytes(bytes_or_text, encoding="utf-8"): + """Converts either bytes or unicode to `bytes`, using utf-8 encoding for + text. + + Args: + bytes_or_text: A `bytes`, `str`, or `unicode` object. + encoding: A string indicating the charset for encoding unicode. + + Returns: + A `bytes` object. + + Raises: + TypeError: If `bytes_or_text` is not a binary or unicode string. + """ + if isinstance(bytes_or_text, str): + return bytes_or_text.encode(encoding) + elif isinstance(bytes_or_text, bytes): + return bytes_or_text + else: + raise TypeError( + "Expected binary or unicode string, got %r" % (bytes_or_text,) + ) + +def parse_blob_url(url): + from urllib import parse + url_path = parse.urlparse(url) + + parts = url_path.path.lstrip('/').split('/', 1) + return url_path.netloc, tuple(parts) \ No newline at end of file diff --git a/tb_plugin/torch_tb_profiler/plugin.py b/tb_plugin/torch_tb_profiler/plugin.py index 09bf09f85..fa15705a4 100644 --- a/tb_plugin/torch_tb_profiler/plugin.py +++ b/tb_plugin/torch_tb_profiler/plugin.py @@ -1,26 +1,26 @@ # ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # -------------------------------------------------------------------------- - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - +import atexit +import gzip import json -import multiprocessing +import multiprocessing as mp import os +import sys +import tempfile import threading import time from collections import OrderedDict +from queue import Queue import werkzeug +from tensorboard import errors from tensorboard.plugins import base_plugin from werkzeug import wrappers -from . import consts -from . import utils +from . import consts, io, utils from .profiler import RunLoader -from .run import Run +from .run import DistributedRunProfile, Run, RunProfile logger = utils.get_logger() @@ -29,6 +29,7 @@ class TorchProfilerPlugin(base_plugin.TBPlugin): """TensorBoard plugin for Torch Profiler.""" plugin_name = consts.PLUGIN_NAME + headers = [('X-Content-Type-Options', 'nosniff')] def __init__(self, context): """Instantiates TorchProfilerPlugin. @@ -36,7 +37,10 @@ def __init__(self, context): context: A base_plugin.TBContext instance. """ super(TorchProfilerPlugin, self).__init__(context) - self.logdir = os.path.abspath(context.logdir) + start_method = os.getenv('TORCH_PROFILER_START_METHOD') + if start_method: + mp.set_start_method(start_method, force=True) + self.logdir = io.abspath(context.logdir.rstrip('/')) self._is_active = None self._is_active_initialized_event = threading.Event() @@ -44,13 +48,24 @@ def __init__(self, context): self._runs = OrderedDict() self._runs_lock = threading.Lock() - self._queue = multiprocessing.Queue() - monitor_runs = threading.Thread(target=self.monitor_runs, name="monitor_runs", daemon=True) + self._cache = io.Cache() + self._queue = Queue() + self._gpu_metrics_file_dict = {} + monitor_runs = threading.Thread(target=self._monitor_runs, name="monitor_runs", daemon=True) monitor_runs.start() - receive_runs = threading.Thread(target=self.receive_runs, name="receive_runs", daemon=True) + receive_runs = threading.Thread(target=self._receive_runs, name="receive_runs", daemon=True) receive_runs.start() + def clean(): + logger.debug("starting cleanup...") + self._cache.__exit__(*sys.exc_info()) + for temp_file in self._gpu_metrics_file_dict.values(): + logger.info("remove temporary file %s with gpu metrics" % temp_file) + os.remove(temp_file) + + atexit.register(clean) + def is_active(self): """Returns whether there is relevant data for the plugin to process. """ @@ -60,96 +75,30 @@ def is_active(self): def get_plugin_apps(self): return { "/index.js": self.static_file_route, - "/main.js": self.static_file_route, "/index.html": self.static_file_route, - "/overall.html": self.static_file_route, "/trace_viewer_full.html": self.static_file_route, "/trace_embedding.html": self.static_file_route, - "/operator.html": self.static_file_route, - "/kernel.html": self.static_file_route, "/runs": self.runs_route, "/views": self.views_route, "/workers": self.workers_route, + "/spans": self.spans_route, "/overview": self.overview_route, "/operation": self.operation_pie_route, "/operation/table": self.operation_table_route, + "/operation/stack": self.operation_stack_route, "/kernel": self.kernel_pie_route, "/kernel/table": self.kernel_table_route, - "/trace": self.trace_route + "/trace": self.trace_route, + "/distributed/gpuinfo": self.dist_gpu_info_route, + "/distributed/overlap": self.comm_overlap_route, + "/distributed/waittime": self.comm_wait_route, + "/distributed/commops": self.comm_ops_route, + "/memory": self.memory_route, } def frontend_metadata(self): return base_plugin.FrontendMetadata(es_module_path="/index.js") - def monitor_runs(self): - logger.info("Monitor runs begin") - - # Set _is_active quickly based on file pattern match, don't wait for data loading - self._is_active = any(self._get_run_dirs()) - self._is_active_initialized_event.set() - - touched = set() - while True: - try: - logger.debug("Scan run dir") - run_dirs = self._get_run_dirs() - - # Assume no deletion on run directories, trigger async load if find a new run - for name, run_dir in run_dirs: - if name not in touched: - logger.info("Find run %s under %s", name, run_dir) - touched.add(name) - # Use multiprocessing to avoid UI stall and reduce data parsing time - process = multiprocessing.Process(target=_load_run, args=(self._queue, name, run_dir)) - process.daemon = True - process.start() - except Exception as ex: - logger.warning("Failed to scan runs. Exception=%s", ex, exc_info=True) - - time.sleep(consts.MONITOR_RUN_REFRESH_INTERNAL_IN_SECONDS) - - def receive_runs(self): - while True: - run = self._queue.get() - if run is None: - continue - - logger.info("Add run %s", run.name) - with self._runs_lock: - is_new = run.name not in self._runs - self._runs[run.name] = run - if is_new: - self._runs = OrderedDict(sorted(self._runs.items())) - - # Update is_active - if not self._is_active: - self._is_active = True - - def _get_run_dirs(self): - """Scan logdir, find PyTorch Profiler run directories. - A directory is considered to be a run if it contains 1 or more *.pt.trace.json[.gz]. - E.g. there are 2 runs: run1, run2 - /run1 - /[worker1].pt.trace.json.gz - /[worker2].pt.trace.json.gz - /run2 - /[worker1].pt.trace.json - """ - for root, _, files in os.walk(self.logdir): - for file in files: - if utils.is_chrome_trace_file(file): - run_dir = os.path.abspath(root) - if run_dir == self.logdir: - name = os.path.basename(run_dir) - else: - name = os.path.relpath(run_dir, self.logdir) - yield name, run_dir - break - - def get_run(self, name) -> Run: - with self._runs_lock: - return self._runs.get(name, None) - @wrappers.Request.application def runs_route(self, request): with self._runs_lock: @@ -159,8 +108,9 @@ def runs_route(self, request): @wrappers.Request.application def views_route(self, request): name = request.args.get("run") - run = self.get_run(name) - views = sorted(run.views, key=lambda x: x.id) + self._validate(run=name) + run = self._get_run(name) + views = run.views views_list = [] for view in views: views_list.append(view.display_name) @@ -169,28 +119,49 @@ def views_route(self, request): @wrappers.Request.application def workers_route(self, request): name = request.args.get("run") - run = self.get_run(name) - return self.respond_as_json(run.workers) + view = request.args.get("view") + self._validate(run=name, view=view) + run = self._get_run(name) + self._check_run(run, name) + workers = run.get_workers(view) + return self.respond_as_json(run.get_workers(view)) + + @wrappers.Request.application + def spans_route(self, request): + name = request.args.get("run") + worker = request.args.get("worker") + self._validate(run=name, worker=worker) + run = self._get_run(name) + self._check_run(run, name) + return self.respond_as_json(run.get_spans(worker)) @wrappers.Request.application def overview_route(self, request): name = request.args.get("run") worker = request.args.get("worker") - run = self.get_run(name) - profile = run.get_profile(worker) + span = request.args.get("span") + self._validate(run=name, worker=worker) + profile = self._get_profile(name, worker, span) + self._check_normal_profile(profile, name, worker) + run = self._get_run(name) data = profile.overview is_gpu_used = profile.has_runtime or profile.has_kernel or profile.has_memcpy_or_memset - data["environments"] = [{"title": "Number of Worker(s)", "value": str(len(run.workers))}, + normal_workers = [worker for worker in run.workers if worker != 'All'] + data["environments"] = [{"title": "Number of Worker(s)", "value": str(len(normal_workers))}, {"title": "Device Type", "value": "GPU" if is_gpu_used else "CPU"}] + if len(profile.gpu_ids) > 0: + gpu_metrics_data, gpu_metrics_tooltip = profile.get_gpu_metrics_data_tooltip() + data["gpu_metrics"] = {"title": "GPU Summary", + "data": gpu_metrics_data, + "tooltip": gpu_metrics_tooltip} + return self.respond_as_json(data) @wrappers.Request.application def operation_pie_route(self, request): - name = request.args.get("run") - worker = request.args.get("worker") + profile = self._get_profile_for_request(request) + group_by = request.args.get("group_by") - run = self.get_run(name) - profile = run.get_profile(worker) if group_by == "OperationAndInputShape": return self.respond_as_json(profile.operation_pie_by_name_input) else: @@ -198,31 +169,38 @@ def operation_pie_route(self, request): @wrappers.Request.application def operation_table_route(self, request): - name = request.args.get("run") - worker = request.args.get("worker") + profile = self._get_profile_for_request(request) + group_by = request.args.get("group_by") - run = self.get_run(name) - profile = run.get_profile(worker) if group_by == "OperationAndInputShape": return self.respond_as_json(profile.operation_table_by_name_input) else: return self.respond_as_json(profile.operation_table_by_name) + @wrappers.Request.application + def operation_stack_route(self, request): + profile = self._get_profile_for_request(request) + + op_name = request.args.get("op_name") + self._validate(op_name=op_name) + group_by = request.args.get("group_by") + input_shape = request.args.get("input_shape") + if group_by == "OperationAndInputShape": + return self.respond_as_json(profile.operation_stack_by_name_input[str(op_name)+"###"+str(input_shape)]) + else: + return self.respond_as_json(profile.operation_stack_by_name[str(op_name)]) + @wrappers.Request.application def kernel_pie_route(self, request): - name = request.args.get("run") - worker = request.args.get("worker") - run = self.get_run(name) - profile = run.get_profile(worker) + profile = self._get_profile_for_request(request) + return self.respond_as_json(profile.kernel_pie) @wrappers.Request.application def kernel_table_route(self, request): - name = request.args.get("run") - worker = request.args.get("worker") + profile = self._get_profile_for_request(request) + group_by = request.args.get("group_by") - run = self.get_run(name) - profile = run.get_profile(worker) if group_by == "Kernel": return self.respond_as_json(profile.kernel_table) else: @@ -230,20 +208,58 @@ def kernel_table_route(self, request): @wrappers.Request.application def trace_route(self, request): - name = request.args.get("run") - worker = request.args.get("worker") + profile = self._get_profile_for_request(request) - run = self.get_run(name) - profile = run.get_profile(worker) - fopen = open - with fopen(profile.trace_file_path, 'rb') as f: - raw_data = f.read() - if profile.trace_file_path.endswith('.gz'): - headers = [] - headers.append(('Content-Encoding', 'gzip')) - return werkzeug.Response(raw_data, content_type="application/json", headers=headers) + if not profile.has_kernel:# Pure CPU. + raw_data = self._cache.read(profile.trace_file_path) + if not profile.trace_file_path.endswith('.gz'): + raw_data = gzip.compress(raw_data, 1) else: - return werkzeug.Response(raw_data, content_type="application/json") + file_with_gpu_metrics = self._gpu_metrics_file_dict.get(profile.trace_file_path) + if file_with_gpu_metrics: + raw_data = io.read(file_with_gpu_metrics) + else: + raw_data = self._cache.read(profile.trace_file_path) + if profile.trace_file_path.endswith('.gz'): + raw_data = gzip.decompress(raw_data) + raw_data = profile.append_gpu_metrics(raw_data) + + # write the data to temp file + fp = tempfile.NamedTemporaryFile('w+b', suffix='.json.gz', delete=False) + fp.close() + # Already compressed, no need to gzip.open + with open(fp.name, mode='wb') as file: + file.write(raw_data) + self._gpu_metrics_file_dict[profile.trace_file_path] = fp.name + + headers = [('Content-Encoding', 'gzip')] + headers.extend(TorchProfilerPlugin.headers) + return werkzeug.Response(raw_data, content_type="application/json", headers=headers) + + @wrappers.Request.application + def dist_gpu_info_route(self, request): + profile = self._get_profile_for_request(request, True) + return self.respond_as_json(profile.gpu_info) + + @wrappers.Request.application + def comm_overlap_route(self, request): + profile = self._get_profile_for_request(request, True) + return self.respond_as_json(profile.steps_to_overlap) + + @wrappers.Request.application + def comm_wait_route(self, request): + profile = self._get_profile_for_request(request, True) + return self.respond_as_json(profile.steps_to_wait) + + @wrappers.Request.application + def comm_ops_route(self, request): + profile = self._get_profile_for_request(request, True) + return self.respond_as_json(profile.comm_ops) + + @wrappers.Request.application + def memory_route(self, request): + profile = self._get_profile_for_request(request) + return self.respond_as_json(profile.memory_view) @wrappers.Request.application def static_file_route(self, request): @@ -262,27 +278,141 @@ def static_file_route(self, request): with open(filepath, 'rb') as infile: contents = infile.read() except IOError: - return werkzeug.Response('404 Not Found', 'text/plain', code=404) + raise errors.NotFoundError("404 Not Found") return werkzeug.Response( - contents, content_type=mimetype + contents, content_type=mimetype, headers=TorchProfilerPlugin.headers ) @staticmethod def respond_as_json(obj): content = json.dumps(obj) - return werkzeug.Response(content, content_type="application/json") - - -def _load_run(queue, name, run_dir): - import absl.logging - absl.logging.use_absl_handler() - - try: - logger.info("Load run %s", name) - # Currently, assume run data is immutable, so just load once - loader = RunLoader(name, run_dir) - run = loader.load() - logger.info("Run %s loaded", name) - queue.put(run) - except Exception as ex: - logger.warning("Failed to load run %s. Exception=%s", ex, name, exc_info=True) + return werkzeug.Response(content, content_type="application/json", headers=TorchProfilerPlugin.headers) + + def _monitor_runs(self): + logger.info("Monitor runs begin") + + try: + touched = set() + while True: + try: + logger.debug("Scan run dir") + run_dirs = self._get_run_dirs() + + # Assume no deletion on run directories, trigger async load if find a new run + for run_dir in run_dirs: + # Set _is_active quickly based on file pattern match, don't wait for data loading + if not self._is_active: + self._is_active = True + self._is_active_initialized_event.set() + + if run_dir not in touched: + touched.add(run_dir) + logger.info("Find run directory %s", run_dir) + # Use threading to avoid UI stall and reduce data parsing time + t = threading.Thread(target=self._load_run, args=(run_dir,)) + t.start() + except Exception as ex: + logger.warning("Failed to scan runs. Exception=%s", ex, exc_info=True) + + time.sleep(consts.MONITOR_RUN_REFRESH_INTERNAL_IN_SECONDS) + except: + logger.exception("Failed to start monitor_runs") + + def _receive_runs(self): + while True: + run = self._queue.get() + if run is None: + continue + + logger.info("Add run %s", run.name) + with self._runs_lock: + is_new = run.name not in self._runs + self._runs[run.name] = run + if is_new: + self._runs = OrderedDict(sorted(self._runs.items())) + + # Update is_active + if not self._is_active: + self._is_active = True + self._is_active_initialized_event.set() + + def _get_run_dirs(self): + """Scan logdir, find PyTorch Profiler run directories. + A directory is considered to be a run if it contains 1 or more *.pt.trace.json[.gz]. + E.g. there are 2 runs: run1, run2 + /run1 + /[worker1].pt.trace.json.gz + /[worker2].pt.trace.json.gz + /run2 + /[worker1].pt.trace.json + """ + for root, _, files in io.walk(self.logdir): + for file in files: + if utils.is_chrome_trace_file(file): + yield root + break + + def _load_run(self, run_dir): + try: + name = self._get_run_name(run_dir) + logger.info("Load run %s", name) + # Currently, assume run data is immutable, so just load once + loader = RunLoader(name, run_dir, self._cache) + run = loader.load() + logger.info("Run %s loaded", name) + self._queue.put(run) + except Exception as ex: + logger.warning("Failed to load run %s. Exception=%s", ex, name, exc_info=True) + + def _get_run(self, name) -> Run: + with self._runs_lock: + return self._runs.get(name, None) + + def _get_run_name(self, run_dir): + logdir = io.abspath(self.logdir) + if run_dir == logdir: + name = io.basename(run_dir) + else: + name = io.relpath(run_dir, logdir) + return name + + def _get_profile_for_request(self, request, distributed=False): + name = request.args.get("run") + span = request.args.get("span") + if distributed: + self._validate(run=name) + profile = self._get_profile(name, 'All', span) + self._check_distributed_profile(profile, name) + else: + worker = request.args.get("worker") + self._validate(run=name, worker=worker) + profile = self._get_profile(name, worker, span) + self._check_normal_profile(profile, name, worker) + + return profile + + def _get_profile(self, name, worker, span): + run = self._get_run(name) + self._check_run(run, name) + profile = run.get_profile(worker, span) + if profile is None: + raise errors.NotFoundError("could not find the profile for %s/%s " %(name, worker)) + return profile + + def _check_run(self, run, name): + if run is None: + raise errors.NotFoundError("could not find the run for %s" %(name)) + + def _check_normal_profile(self, profile, name, worker): + if not isinstance(profile, RunProfile): + raise errors.InvalidArgumentError("Get an unexpected profile type %s for %s/%s" %(type(profile), name, worker)) + + def _check_distributed_profile(self, profile, name): + if not isinstance(profile, DistributedRunProfile): + raise errors.InvalidArgumentError("Get an unexpected distributed profile type %s for %s/%s" %(type(profile), name)) + + def _validate(self, **kwargs): + for name,v in kwargs.items(): + if v is None: + raise errors.InvalidArgumentError("Must specify %s in request url" %(name)) + diff --git a/tb_plugin/torch_tb_profiler/profiler/communication.py b/tb_plugin/torch_tb_profiler/profiler/communication.py new file mode 100644 index 000000000..89269446a --- /dev/null +++ b/tb_plugin/torch_tb_profiler/profiler/communication.py @@ -0,0 +1,69 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# ------------------------------------------------------------------------- +from .. import utils + +logger = utils.get_logger() + + +def generate_communication_nodes(communication_data, steps, steps_names): + comm_node_list = [] + + # Sort the communication node according the start time, this is for correlating communication node between workers + for comm_node in communication_data.values(): + comm_node.kernel_ranges.sort(key=lambda x: (x[0], -x[1])) + comm_node_list.append(comm_node) + comm_node_list.sort(key=lambda x: (x.start_time, -x.end_time)) + + # Find each communication node belong to which step + index = 0 + valid_steps = len(steps) + for comm_node in comm_node_list: + while index < valid_steps: + if comm_node.start_time >= steps[index][0] and comm_node.end_time <= steps[index][1]: + comm_node.step_name = steps_names[index] + break + elif comm_node.start_time >= steps[index][1]: + index += 1 + else: + logger.error("Found a communication op not belong to any step.") + break + if index >= valid_steps: + logger.error("Found communication ops not belong to any step. ") + break + + return comm_node_list + + +def analyze_communication_nodes(comm_node_list): + step_comm_stats = {} + total_comm_stats = {} + + for comm_node in comm_node_list: + if comm_node.step_name not in step_comm_stats: + step_comm_stats[comm_node.step_name] = [0, 0] + step_comm_stats[comm_node.step_name][0] += comm_node.total_time + step_comm_stats[comm_node.step_name][1] += comm_node.real_time + if comm_node.name not in total_comm_stats: + total_comm_stats[comm_node.name] = [0, 0, 0, 0] + total_comm_stats[comm_node.name][0] += 1 + bytes_one_value = 0 + if comm_node.input_shape: + for i in range(len(comm_node.input_shape)): + if comm_node.input_type[i] == 'long int': + bytes_one_value = 8 + elif comm_node.input_type[i] == 'float': + bytes_one_value = 4 + elif comm_node.input_type[i] == 'int': + bytes_one_value = 4 + else: + logger.warning("Found an unknown tensor type: {}".format(comm_node.input_type[i])) + bytes_one_value = 0 + total_size = 1 + for size in comm_node.input_shape[i]: + total_size *= size + total_comm_stats[comm_node.name][1] += total_size * bytes_one_value + total_comm_stats[comm_node.name][2] += comm_node.total_time + total_comm_stats[comm_node.name][3] += comm_node.real_time + + return step_comm_stats, total_comm_stats diff --git a/tb_plugin/torch_tb_profiler/profiler/data.py b/tb_plugin/torch_tb_profiler/profiler/data.py index 64492b475..fdb5336cb 100644 --- a/tb_plugin/torch_tb_profiler/profiler/data.py +++ b/tb_plugin/torch_tb_profiler/profiler/data.py @@ -1,125 +1,185 @@ # ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # -------------------------------------------------------------------------- - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - import gzip -import io +import io as sysio import json -import os import re import tempfile from collections import OrderedDict +from json.decoder import JSONDecodeError +from .. import io, utils from . import trace +from .communication import analyze_communication_nodes +from .event_parser import EventParser, ProfileRole +from .gpu_metrics_parser import GPUMetricsParser from .kernel_parser import KernelParser +from .memory_parser import MemoryParser from .module_parser import ModuleParser from .overall_parser import OverallParser -from .. import consts, utils logger = utils.get_logger() - -class RunData(object): - def __init__(self, name, run_dir): - self.name = name - self.run_dir = run_dir - self.profiles = OrderedDict() - - class RunProfileData(object): - def __init__(self, worker): + def __init__(self, worker, span=None): self.worker = worker + self.span = span self.data_schema_version = None + self.distributed_info = None + self.device_props = None + self.used_devices = [] + self.use_dp = False + self.use_ddp =False + self.use_nccl = False self.events = None self.trace_file_path = None self.has_runtime = False self.has_kernel = False + self.has_communication = False self.has_memcpy_or_memset = False self.steps_costs = None self.steps_names = None self.avg_costs = None + self.runtime_node_list = None + self.gpu_ids = None + self.gpu_utilization = None + self.sm_efficency = None + self.occupancy = None + self.gpu_util_buckets = None # Cached here. Will be processed to json on first trace view. + self.approximated_sm_efficency_ranges = None # Cached here. Will be processed to json on first trace view. + self.blocks_per_sm_count = None + self.occupancy_count = None self.op_list_groupby_name = None self.op_list_groupby_name_input = None + self.stack_lists_group_by_name = None + self.stack_lists_group_by_name_input = None self.kernel_list_groupby_name_op = None self.kernel_stat = None self.recommendations = [] + self.comm_node_list = None + self.comm_overlap_costs = None + + # Memory stats + self.memory_stats = None + + @property + def has_memory_data(self): + if self.memory_stats: + for node_metrics in self.memory_stats.values(): + for metrics_values in node_metrics.values(): + if any(metrics_values): + return True + + return False @staticmethod - def parse(run_dir, worker): - logger.debug("Parse trace, run_dir=%s, worker=%s", run_dir, worker) + def parse(run_dir, worker, span, path, caches): + logger.debug("Parse trace, run_dir=%s, worker=%s", run_dir, path) + + trace_path, trace_json = RunProfileData._preprocess_file(caches, io.join(run_dir, path)) - trace_path = os.path.join(run_dir, "{}{}".format(worker, consts.TRACE_FILE_SUFFIX)) - fopen = open - if not os.path.isfile(trace_path): - trace_path += ".gz" - fopen = gzip.open + profile = RunProfileData(worker, span) + profile.trace_file_path = trace_path + if type(trace_json) is dict: + profile.data_schema_version = trace_json.get("schemaVersion", None) + profile.distributed_info = trace_json.get("distributedInfo", None) + profile.device_props = trace_json.get("deviceProperties", None) + trace_json = trace_json["traceEvents"] - if not os.path.isfile(trace_path): + profile.events = [] + for data in trace_json: + event = trace.create_event(data) + if event is not None: + profile.events.append(event) + + return profile + + @staticmethod + def _preprocess_file(caches, trace_path): + if not io.exists(trace_path): raise FileNotFoundError(trace_path) + local_file = caches.get_remote_cache(trace_path) + data = io.read(local_file) + if trace_path.endswith('.gz'): + data = gzip.decompress(data) + try: - with fopen(trace_path, 'r') as f: - trace_json = json.load(f) - except json.decoder.JSONDecodeError as e: + trace_json = json.loads(data) + except JSONDecodeError as e: # Kineto may export json file with non-ascii code. before this is fixed, use a workaround - # to handleJSONDecodeError, re-encode it and save to a temp file - with fopen(trace_path, 'rt') as f: - try: - trace_json = json.load(f, strict=False) - except json.decoder.JSONDecodeError: - # TODO: remove the workaround after the libkineto fix for N/A is merged into pytorch - f.seek(0) - with io.StringIO() as fout: - for line in f: - # only replace the N/A without surrounding double quote - fout.write(re.sub(r'(? 0.05: text = "This run has high time cost on input data loading. " \ "{}% of the step time is in DataLoader. You could " \ @@ -140,3 +201,77 @@ def analyze(self): "https://pytorch.org/docs/stable/data.html#single-and-multi-process-data-loading" ) self.recommendations.append(text) + + self._analyze_distributed_metrics() + self._analyze_gpu_metrics() + + def _analyze_distributed_metrics(self): + if self.use_dp and len(self.used_devices) > 1: + text = "It is recommended to use DistributedDataParallel, instead of DataParallel to do multi-GPU training." \ + "Reference: Use DistributedDataParallel instead of DataParallel".format( + "https://pytorch.org/docs/stable/notes/cuda.html#cuda-nn-ddp-instead" + ) + self.recommendations.append(text) + + if self.use_ddp and not self.use_nccl and self.device_props: + for device_prop in self.device_props: + major = device_prop.get("computeMajor") + minor = device_prop.get("computeMinor") + if major is None or minor is None: + continue + compute_capability = "{}.{}".format(major, minor) + if float(compute_capability) >= 3.5: + text = "Nccl backend is currently the fastest and highly recommended backend when using DDP for training." + self.recommendations.append(text) + break + + communication_ratio = self.avg_costs.costs[ProfileRole.Communication] / self.avg_costs.costs[ProfileRole.Total] + if communication_ratio > 0.1: + text = "This run has high time cost on communication. " \ + "{}% of the step time is in communication. You could " \ + "try Gradient Accumulation or increase the batch size. " \ + "Note: Gradient accumulation will increase global effective batch size, which may hurt model convergence and accuracy. " \ + "For such case, you may want to evaluate LAMB optimizer".format( + round(communication_ratio * 100, 1), "https://nvidia.github.io/apex/optimizers.html#apex.optimizers.FusedLAMB") + self.recommendations.append(text) + + def _analyze_gpu_metrics(self): + def get_gpus_str(gpus): + gpu_list_str = str(gpus[0]) + for i in range(1, len(gpus)): + if i == len(gpus) - 1: + gpu_list_str += "and {}".format(gpus[i]) + else: + gpu_list_str += ", {}".format(gpus[i]) + has_str = "has" if len(gpu_list_str) == 1 else "have" + return gpu_list_str, has_str + + low_util_gpus = [] + for gpu_id in self.gpu_ids: + if self.gpu_utilization[gpu_id] < 0.5: + low_util_gpus.append(gpu_id) + if len(low_util_gpus) > 0: + gpu_list_str, has_str = get_gpus_str(low_util_gpus) + text = "GPU {} {} low utilization. You could try to " \ + "increase batch size to improve. Note: Increasing batch size " \ + "may affect the speed and stability of model convergence.".format( + gpu_list_str, has_str) + self.recommendations.append(text) + +class DistributedRunProfileData: + def __init__(self, run_profile_data): + self.worker = run_profile_data.worker + self.span = run_profile_data.span + self.steps_names = run_profile_data.steps_names + self.has_communication = run_profile_data.has_communication + self.comm_node_list = run_profile_data.comm_node_list + self.comm_overlap_costs = run_profile_data.comm_overlap_costs + self.used_devices = run_profile_data.used_devices + self.device_props = run_profile_data.device_props + self.distributed_info = run_profile_data.distributed_info + + self.total_comm_stats = None + self.step_comm_stats = None + + def communication_parse(self): + self.step_comm_stats, self.total_comm_stats = analyze_communication_nodes(self.comm_node_list) diff --git a/tb_plugin/torch_tb_profiler/profiler/event_parser.py b/tb_plugin/torch_tb_profiler/profiler/event_parser.py new file mode 100644 index 000000000..cfd1999c8 --- /dev/null +++ b/tb_plugin/torch_tb_profiler/profiler/event_parser.py @@ -0,0 +1,362 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# ------------------------------------------------------------------------- +import sys +from collections import defaultdict +from enum import IntEnum + +from .. import utils +from .communication import generate_communication_nodes +from .node import (CommunicationNode, DeviceNode, OperatorNode, + ProfilerStepNode, RuntimeNode) +from .range_utils import merge_ranges +from .trace import EventTypes + +logger = utils.get_logger() + +CommunicationOpNameSet = ['nccl:broadcast', 'nccl:reduce', 'nccl:all_reduce', 'nccl:all_gather', 'nccl:reduce_scatter'] +ProfileRole = IntEnum('ProfileRole', ['Kernel', 'Memcpy', 'Memset', 'Communication', 'Runtime', 'DataLoader', 'CpuOp', 'Other', 'Total'], start=0) + + +class NodeContext: + def __init__(self, tid2list, tid2zero_rt_list, corrid_to_device): + self.tid2list = tid2list + self.tid2zero_rt_list = tid2zero_rt_list + self.corrid_to_device = corrid_to_device + +class StepContext: + def __init__(self, prev_step_end_time, steps_device, steps_matched_device_nodes): + self.prev_step_end_time = prev_step_end_time + self.steps_device = steps_device + self.steps_matched_device_nodes = steps_matched_device_nodes + +class NodeParserMixin: + def __init__(self, *args, **kwargs): + '''Please refer to https://stackoverflow.com/questions/9575409/calling-parent-class-init-with-multiple-inheritance-whats-the-right-way + to see the reason why we need call super().__init__ like this way + ''' + super().__init__(*args, **kwargs) + + self.communication_data = {} + self.device_node_list = [] + self.runtime_node_list = [] + self.used_devices = set() + self.use_dp = False + self.use_ddp = False + self.use_nccl = False + + def parse_nodes(self, events): + # For OperatorNode and ProfilerStepNode: + # Use time interval containing relationship to build father-child correlation, + # which is consistent with autograd profiler. + # For RuntimeNode: + # Use external_id to build correlation with its father OperatorNode or ProfilerStepNode. + # Because in the case when RuntimeNode has duration 0 and starts at same time as a OperatorNode, + # just use interval containing relationship can't tell it is child or brother of the OperatorNode. + tid2list = defaultdict(list) # value is a list of OperatorNode and ProfilerStepNode. Do not include RuntimeNode + tid2zero_rt_list = defaultdict(list) # value is a list of RuntimeNode with external_id=0. They will be attached to root nodes. + corrid_to_device = defaultdict(list) # value is a list of DeviceNode + + corrid_to_runtime = {} # value is a RuntimeNode + externalid_to_runtime = defaultdict(list) # value is a list of RuntimeNode + + for event in events: + if event.type == EventTypes.MEMORY: + continue + self._parse_node(event, corrid_to_device, corrid_to_runtime, externalid_to_runtime, tid2list, tid2zero_rt_list) + + for event in events: + if event.type == EventTypes.KERNEL: + self._update_communication_node(event) + + # associate CUDA Runtimes with CPU events + for _, op_list in tid2list.items(): + for op in op_list: + runtime_nodes = externalid_to_runtime.pop(op.external_id, []) + if runtime_nodes: + op.runtimes.extend(runtime_nodes) + for ext_id in externalid_to_runtime: + if ext_id != 0: + logger.warning("{} Runtime with external id {} don't correlate to any operator!".format( + len(externalid_to_runtime[ext_id]), ext_id)) + + return NodeContext(tid2list, tid2zero_rt_list, corrid_to_device) + + def _update_communication_node(self, event): + '''Update the communication node by using the TraceEvent instance''' + external_id = event.external_id + comm_node = self.communication_data.get(external_id) + if comm_node: + ts = event.ts + dur = event.duration + comm_node.kernel_ranges.append((ts, ts + dur)) + comm_node.total_time += dur + + return comm_node is not None + + def find_device_steps(self, steps): + '''return steps associated with device nodes. + ''' + runtime_node_list = sorted(self.runtime_node_list, key=lambda x: x.start_time) + + # Use similar code with two-way merge to get all runtimes inside each host-side step span, + # then record each step's min kernel start time and max kernel end time: + steps_device = [(sys.maxsize, -sys.maxsize - 1)] * len(steps) + # where the steps associated with devcie node, if yes, the related array item is larger than 0. + steps_matched_device_nodes = [0] * len(steps) + + i_step = 0 + i_runtime = 0 + step_device_min_ts = sys.maxsize + step_device_max_ts = -sys.maxsize - 1 + matched_device_nodes = set() + + while i_step < len(steps) and i_runtime < len(runtime_node_list): + step_host_start_time = steps[i_step][0] + step_host_end_time = steps[i_step][1] + if runtime_node_list[i_runtime].start_time < step_host_start_time: + # This runtime is ahead of or intersects with this step span. Skip this runtime. + i_runtime += 1 + elif runtime_node_list[i_runtime].end_time <= step_host_end_time: + # and runtime_node_list[i_runtime].start_time >= step_host_start_time + # This runtime is inside this step span. Scan its device_nodes. + rt = runtime_node_list[i_runtime] + if rt.device_nodes is not None: + for device_node in rt.device_nodes: + step_device_min_ts = min(device_node.start_time, step_device_min_ts) + step_device_max_ts = max(device_node.end_time, step_device_max_ts) + matched_device_nodes.add(device_node) + steps_matched_device_nodes[i_step] += 1 + i_runtime += 1 + elif runtime_node_list[i_runtime].start_time < step_host_end_time: + # and runtime_node_list[i_runtime].end_time > step_host_end_time + # This runtime intersects with this step span. Skip this runtime. + i_runtime += 1 + else: + # runtime_node_list[i_runtime].start_time >= step_host_end_time + # This runtime starts after this step's end. Record and move forward this step. + steps_device[i_step] = (step_device_min_ts, step_device_max_ts) + i_step += 1 + step_device_min_ts = sys.maxsize + step_device_max_ts = -sys.maxsize - 1 + + while i_step < len(steps): + # This step doesn't launch any device side event, just assign it as empty. + steps_device[i_step] = (step_device_min_ts, step_device_max_ts) + step_device_min_ts = sys.maxsize + step_device_max_ts = -sys.maxsize - 1 + i_step += 1 + + # If there are matched device, find the first step end time before steps_device[0][0] + prev_step_end_time = None + if len(matched_device_nodes) > 0: + prev_step_end_time = steps[0][0] + if steps_device[0][0] != sys.maxsize: # When step 0 has device event. + for device_node in self.device_node_list: + if device_node not in matched_device_nodes: + # Now this device_node is not launched inside any step span. + if device_node.end_time < steps_device[0][0]: + prev_step_end_time = max(prev_step_end_time, device_node.end_time) + + return StepContext(prev_step_end_time, steps_device, steps_matched_device_nodes) + + def _parse_node(self, event, corrid_to_device, corrid_to_runtime, externalid_to_runtime, tid2list, tid2zero_rt_list): + corrid = event.args.get("correlation", None) + tid = event.tid + if event.type in [EventTypes.KERNEL, EventTypes.MEMCPY, EventTypes.MEMSET]: + self.used_devices.add(event.pid) + device_node = DeviceNode.create(event) + if corrid in corrid_to_runtime: + rt_node = corrid_to_runtime[corrid] # Don't pop it because it may be used by next kernel. + if rt_node.device_nodes is None: + rt_node.device_nodes = [] + rt_node.device_nodes.append(device_node) + + # Check the external_id + if rt_node.external_id != device_node.external_id: + logger.warning("Runtime and Device-op have same correlation id %s but with different external id! (runtime external_id, device external_id): (%s, %s)" % + (corrid, rt_node.external_id, device_node.external_id)) + else: + corrid_to_device[corrid].append(device_node) + self.device_node_list.append(device_node) + elif event.type == EventTypes.RUNTIME: + device_nodes = corrid_to_device.pop(corrid, None) + rt_node = RuntimeNode.create(event, device_nodes) + corrid_to_runtime[corrid] = rt_node + externalid_to_runtime[rt_node.external_id].append(rt_node) + # Some runtimes has external_id 0, which will not be correlated to any operator. + # So get them and attach them to root node. + if rt_node.external_id == 0: + tid2zero_rt_list[tid].append(rt_node) + self.runtime_node_list.append(rt_node) + + # check the external_id + if device_nodes: + for device_node in device_nodes: + if rt_node.external_id != device_node.external_id: + logger.warning("Runtime and Device-op have same correlation id %s but with different external id! (rt external_id, device external_id): (%s, %s)" % + (corrid, rt_node.external_id, device_node.external_id)) + elif event.type in [EventTypes.PYTHON, EventTypes.OPERATOR, EventTypes.PROFILER_STEP]: + if event.type == EventTypes.PROFILER_STEP: + op_node = ProfilerStepNode.create(event, event.input_shape, event.input_type, None) + else: + op_node = OperatorNode.create(event, event.input_shape, event.input_type, event.callstack) + if event.name in CommunicationOpNameSet: + self.communication_data[op_node.external_id] = CommunicationNode.create(event, op_node.input_shape, op_node.input_type) + self.use_nccl = True + if event.name == "DataParallel.forward": + self.use_dp = True + if event.name == "DistributedDataParallel.forward": + self.use_ddp = True + tid2list[int(tid)].append(op_node) + + +class StepParser: + def __init__(self): + # we could not use [[]] * len here since they all point to same memory + # https://stackoverflow.com/questions/12791501/python-initializing-a-list-of-lists + # https://stackoverflow.com/questions/240178/list-of-lists-changes-reflected-across-sublists-unexpectedly + self.role_ranges = [[] for _ in range(ProfileRole.Total - 1)] + self.steps = [] + self.steps_names = [] + self.cpu_min_ts = sys.maxsize # Min time of CPU side events. + self.cpu_max_ts = -sys.maxsize - 1 # Max time of CPU side events. + self.global_min_ts = sys.maxsize # Min time of all events. + self.global_max_ts = -sys.maxsize - 1 # Max time of all events. + # The below two form time range for adding gpu utilization to trace view. + # Use "PyTorch Profiler (0)" as them. + # If not exists, assign global_min_ts and global_max_ts to them. + self.global_start_ts = sys.maxsize + self.global_end_ts = -sys.maxsize - 1 + + def parse_steps(self, events, comm_nodes): + for event in events: + if event.type == EventTypes.MEMORY: + continue + + self._parse_step(event, comm_nodes) + if event.type == EventTypes.TRACE and event.name == "PyTorch Profiler (0)": + self.global_start_ts = event.ts + self.global_end_ts = event.ts + event.duration + if self.global_start_ts == sys.maxsize: + self.global_start_ts = self.global_min_ts + if self.global_end_ts == -sys.maxsize - 1: + self.global_end_ts = self.global_max_ts + + if len(self.steps) == 0: + self.steps.append((self.cpu_min_ts, self.cpu_max_ts)) + self.steps_names.append("0") + + for i in range(len(self.role_ranges)): + self.role_ranges[i] = merge_ranges(self.role_ranges[i]) + + @property + def has_runtime(self): + return bool(self.role_ranges[ProfileRole.Runtime]) + + @property + def has_kernel(self): + return bool(self.role_ranges[ProfileRole.Kernel]) + + @property + def has_communication(self): + return bool(self.role_ranges[ProfileRole.Communication]) + + @property + def has_memcpy_or_memset(self): + return bool(self.role_ranges[ProfileRole.Memcpy] or self.role_ranges[ProfileRole.Memset]) + + def _parse_step(self, event, comm_nodes): + ts = event.ts + dur = event.duration + evt_type = event.type + if evt_type == EventTypes.KERNEL: + if event.external_id in comm_nodes: + self.role_ranges[ProfileRole.Communication].append((ts, ts + dur)) + else: + self.role_ranges[ProfileRole.Kernel].append((ts, ts + dur)) + elif evt_type == EventTypes.MEMCPY: + self.role_ranges[ProfileRole.Memcpy].append((ts, ts + dur)) + elif evt_type == EventTypes.MEMSET: + self.role_ranges[ProfileRole.Memset].append((ts, ts + dur)) + elif evt_type == EventTypes.RUNTIME: + self.role_ranges[ProfileRole.Runtime].append((ts, ts + dur)) + elif evt_type == EventTypes.OPERATOR and event.name.startswith("enumerate(DataLoader)#") \ + and event.name.endswith(".__next__"): + self.role_ranges[ProfileRole.DataLoader].append((ts, ts + dur)) + elif event.type == EventTypes.PROFILER_STEP: + self.steps.append((ts, ts + dur)) + self.steps_names.append(str(event.step)) + elif evt_type in [EventTypes.PYTHON, EventTypes.OPERATOR]: + self.role_ranges[ProfileRole.CpuOp].append((ts, ts + dur)) + + # Record host side min and max time. + if evt_type in [EventTypes.PYTHON, EventTypes.OPERATOR, EventTypes.PROFILER_STEP]: + self.cpu_min_ts = min(self.cpu_min_ts, ts) + self.cpu_max_ts = max(self.cpu_max_ts, ts + dur) + # Record global wise min and max time. + self.global_min_ts = min(self.global_min_ts, ts) + self.global_max_ts = max(self.global_max_ts, ts + dur) + + def update_steps_duration(self, context): + '''Update self.steps considering device side events launched by each host side step. + Update self.steps_names if some tail steps are removed.''' + + prev_step_end_time = context.prev_step_end_time + steps_device = context.steps_device + steps_matched_device_nodes = context.steps_matched_device_nodes + + # Change step time to device side on the condition that any step have device time. + is_use_gpu = prev_step_end_time is not None + if is_use_gpu: + for i_step in range(len(self.steps)): + step_start_time = max(prev_step_end_time, self.steps[i_step][0]) + step_end_time = self.steps[i_step][1] + if steps_device[i_step][0] == sys.maxsize: # When step i_step has no device event. + # Assign to step_start_time when kernel is behind host step end. + step_end_time = max(step_end_time, step_start_time) + else: + step_end_time = max(step_end_time, steps_device[i_step][1]) + if step_end_time < step_start_time: + logger.warning( + "Abnormal step_end_time of step {}: [{}, {}]".format( + i_step, step_start_time, step_end_time)) + step_end_time = step_start_time + self.steps[i_step] = (step_start_time, step_end_time) # Update step time considering device side. + prev_step_end_time = step_end_time + + is_remove_tail_steps = True # TODO: Use tensorboard argument instead. + if is_use_gpu and len(self.steps) > 1 and is_remove_tail_steps: + i_step = len(self.steps) - 1 + while i_step >= 0: + if steps_matched_device_nodes[i_step] > 0: + break + i_step -= 1 + if i_step >= 0: + keep_steps = i_step + 1 + if i_step > 0 and steps_matched_device_nodes[i_step - 1] * 0.8 > steps_matched_device_nodes[i_step]: + keep_steps = i_step + if keep_steps < len(self.steps): + logger.warning( + "Remove the last {} steps from overview. " + "Because the profiler may fail to capture all the kernels launched by these steps.".format( + len(self.steps) - keep_steps + )) + self.steps = self.steps[:keep_steps] + self.steps_names = self.steps_names[:keep_steps] + +class EventParser(NodeParserMixin, StepParser): + def __init__(self): + super().__init__() + + def parse(self, events): + node_context = self.parse_nodes(events) + self.parse_steps(events, self.communication_data) + + # Move the interleaved logic out of each NodeParser and StepParser + steps_context = self.find_device_steps(self.steps) + self.update_steps_duration(steps_context) + return node_context + + def generate_communication_nodes(self): + return generate_communication_nodes(self.communication_data, self.steps, self.steps_names) diff --git a/tb_plugin/torch_tb_profiler/profiler/gpu_metrics_parser.py b/tb_plugin/torch_tb_profiler/profiler/gpu_metrics_parser.py new file mode 100644 index 000000000..7cf699727 --- /dev/null +++ b/tb_plugin/torch_tb_profiler/profiler/gpu_metrics_parser.py @@ -0,0 +1,178 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# -------------------------------------------------------------------------- +from .. import consts, utils +from .range_utils import (get_ranges_sum, intersection_ranges_lists, + intersection_ranges_lists_with_value, merge_ranges, + merge_ranges_with_value) +from .trace import EventTypes + +logger = utils.get_logger() + + +# For calculating GPU utilization, and approximated SM efficiency. +class GPUMetricsParser(object): + def __init__(self): + # All gpu ids that used by any kernel. + self.gpu_ids = set() + # For calculating GPU utilization. + self.kernel_ranges_per_device = [[] for _ in range(consts.MAX_GPU_PER_NODE)] + self.gpu_utilization = [None] * consts.MAX_GPU_PER_NODE + self.gpu_util_timeline_unit_size = 0 + self.gpu_util_timeline_unit_name = "" + self.gpu_util_buckets = [[] for _ in range(consts.MAX_GPU_PER_NODE)] + # For calculating approximated SM efficiency. + self.blocks_per_sm_per_device = [[] for _ in range(consts.MAX_GPU_PER_NODE)] + self.avg_approximated_sm_efficency_per_device = [None] * consts.MAX_GPU_PER_NODE + self.approximated_sm_efficency_ranges = [[] for _ in range(consts.MAX_GPU_PER_NODE)] + self.gpu_sm_efficiency_json = None + self.blocks_per_sm_count = [0] * consts.MAX_GPU_PER_NODE + # For calculating averaged occupancy. + self.occupancy_per_device = [[] for _ in range(consts.MAX_GPU_PER_NODE)] + self.avg_occupancy_per_device = [None] * consts.MAX_GPU_PER_NODE + self.occupancy_count = [0] * consts.MAX_GPU_PER_NODE + + def calculate_gpu_utilization(self, global_start_time, global_end_time, steps_start_time, steps_end_time): + # Make bucket_size to 10-power's of us, and number of buckets to (10, 100]. + # 10-power's of us, in order to straight forward for user to understand. + # If number of buckets are too many, the value of gpu utilization will be either 0 or 1. + def get_bucket_info(range_micro_seconds): + max_buckets = 100 + bucket_size = 1 + while range_micro_seconds / bucket_size > max_buckets: + bucket_size *= 10 + buckets = int(range_micro_seconds / bucket_size) + unit = bucket_size + unit_str = "us" + if unit >= 1000: + unit /= 1000 + unit_str = "ms" + if unit >= 1000: + unit /= 1000 + unit_str = "s" + return int(bucket_size), int(buckets), int(unit), unit_str + + gpu_utilization_timeline = [[] for _ in range(consts.MAX_GPU_PER_NODE)] + for gpu_id in self.gpu_ids: + self.kernel_ranges_per_device[gpu_id] = merge_ranges(self.kernel_ranges_per_device[gpu_id]) + + # Top-level number still consider steps, to be consistent with overview's breakdown. + kernel_ranges_all_steps = intersection_ranges_lists( + self.kernel_ranges_per_device[gpu_id], [(steps_start_time, steps_end_time)]) + ranges_sum = get_ranges_sum(kernel_ranges_all_steps) + self.gpu_utilization[gpu_id] = ranges_sum / (steps_end_time - steps_start_time) + + # The timeline will use "PyTorch Profiler (0)" as start, + # in order to draw previous step's kernels' gpu utilization. + bucket_size, buckets, self.gpu_util_timeline_unit_size, self.gpu_util_timeline_unit_name = \ + get_bucket_info(global_end_time - global_start_time) + buckets_ranges = [] + for i in range(buckets): + buckets_ranges.append((global_start_time + i * bucket_size, + global_start_time + (i + 1) * bucket_size if i < buckets - 1 + else global_end_time)) # The last bucket may be longer. + gpu_utilization_timeline[gpu_id] = [0] * buckets + if len(self.kernel_ranges_per_device[gpu_id]) > 0: + current_range_index = 0 + current_range = self.kernel_ranges_per_device[gpu_id][current_range_index] + current_bucket_index = 0 + current_bucket = buckets_ranges[0] + while current_range_index < len(self.kernel_ranges_per_device[gpu_id]) and current_bucket_index < buckets: + if current_bucket[1] <= current_range[0]: + current_bucket_index += 1 + current_bucket = buckets_ranges[current_bucket_index] if current_bucket_index < buckets \ + else None + elif current_bucket[0] >= current_range[1]: + current_range_index += 1 + if current_range_index < len(self.kernel_ranges_per_device[gpu_id]): + current_range = self.kernel_ranges_per_device[gpu_id][current_range_index] + else: + left_bound = max(current_range[0], current_bucket[0]) + right_bound = min(current_range[1], current_bucket[1]) + gpu_utilization_timeline[gpu_id][current_bucket_index] += (right_bound - left_bound) + if current_bucket[1] < current_range[1]: + current_bucket_index += 1 + current_bucket = buckets_ranges[current_bucket_index] if current_bucket_index < buckets \ + else None + else: + current_range_index += 1 + if current_range_index < len(self.kernel_ranges_per_device[gpu_id]): + current_range = self.kernel_ranges_per_device[gpu_id][current_range_index] + for i_bucket in range(buckets): + bucket_size = buckets_ranges[i_bucket][1] - buckets_ranges[i_bucket][0] + gpu_utilization_timeline[gpu_id][i_bucket] /= bucket_size + start_time = buckets_ranges[i_bucket][0] + self.gpu_util_buckets[gpu_id].append((start_time, gpu_utilization_timeline[gpu_id][i_bucket])) + start_time = buckets_ranges[-1][1] + self.gpu_util_buckets[gpu_id].append((start_time, 0)) + + self.kernel_ranges_per_device = None # Release memory. + + def calculate_approximated_sm_efficency(self, steps_start_time, steps_end_time): + def calculate_avg(approximated_sm_efficency_ranges, total_dur): + total_weighted_sm_efficiency = 0.0 + for r in approximated_sm_efficency_ranges: + dur = r[1] - r[0] + total_weighted_sm_efficiency += r[2] * dur + avg_approximated_sm_efficency = total_weighted_sm_efficiency / total_dur + return avg_approximated_sm_efficency + + total_dur = steps_end_time - steps_start_time + for gpu_id in self.gpu_ids: + blocks_per_sm_ranges = self.blocks_per_sm_per_device[gpu_id] + approximated_sm_efficency_ranges = merge_ranges_with_value(blocks_per_sm_ranges) + # To be consistent with GPU utilization, here it must also intersect with all steps, + # in order to remove the kernels out of steps range. + approximated_sm_efficency_ranges_all_steps = intersection_ranges_lists_with_value( + approximated_sm_efficency_ranges, [(steps_start_time, steps_end_time)]) + if len(approximated_sm_efficency_ranges_all_steps) > 0: + avg_approximated_sm_efficency = calculate_avg(approximated_sm_efficency_ranges_all_steps, total_dur) + self.avg_approximated_sm_efficency_per_device[gpu_id] = avg_approximated_sm_efficency + + # The timeline still uses all kernels including out of steps scope's. + if len(approximated_sm_efficency_ranges) > 0: + self.approximated_sm_efficency_ranges[gpu_id] = approximated_sm_efficency_ranges + + self.blocks_per_sm_per_device = None # Release memory. + + # Weighted average. Weighted by kernel's time duration. + def calculate_occupancy(self): + for gpu_id in self.gpu_ids: + occupancys_on_a_device = self.occupancy_per_device[gpu_id] + total_time = 0 + total_occupancy = 0.0 + for r in occupancys_on_a_device: + dur = r[1] - r[0] + total_occupancy += r[2] * dur + total_time += dur + avg_occupancy = total_occupancy / total_time + self.avg_occupancy_per_device[gpu_id] = avg_occupancy + + def parse_events(self, events, global_start_time, global_end_time, steps_start_time, steps_end_time): + logger.debug("GPU Metrics, parse events") + for event in events: + if event.type == EventTypes.KERNEL: + self.parse_event(event) + + self.calculate_gpu_utilization(global_start_time, global_end_time, steps_start_time, steps_end_time) + self.calculate_approximated_sm_efficency(steps_start_time, steps_end_time) + self.calculate_occupancy() + + def parse_event(self, event): + ts = event.ts + dur = event.duration + gpu_id = event.args.get("device", None) + if gpu_id != event.pid: + logger.warning("pid '{}' is not equal to args.device '{}' on event with ts '{}'".format( + event.pid, gpu_id, event.ts)) + if gpu_id is not None: + if gpu_id not in self.gpu_ids: + self.gpu_ids.add(gpu_id) + self.kernel_ranges_per_device[gpu_id].append((ts, ts + dur)) + self.blocks_per_sm_per_device[gpu_id].append((ts, ts + dur, event.args.get("blocks per SM", 0.0))) + self.occupancy_per_device[gpu_id].append((ts, ts + dur, + event.args.get("est. achieved occupancy %", 0.0))) + if "blocks per SM" in event.args: + self.blocks_per_sm_count[gpu_id] += 1 + if "est. achieved occupancy %" in event.args: + self.occupancy_count[gpu_id] += 1 diff --git a/tb_plugin/torch_tb_profiler/profiler/kernel_parser.py b/tb_plugin/torch_tb_profiler/profiler/kernel_parser.py index 72d8cc6f5..ca3305181 100644 --- a/tb_plugin/torch_tb_profiler/profiler/kernel_parser.py +++ b/tb_plugin/torch_tb_profiler/profiler/kernel_parser.py @@ -1,9 +1,11 @@ # ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # -------------------------------------------------------------------------- - +import numpy as np import pandas as pd +from .trace import EventTypes + class KernelParser: def __init__(self): @@ -12,10 +14,25 @@ def __init__(self): def parse_events(self, events): events_dict = [] for event in events: - events_dict.append(event.to_dict()) + if event.type == EventTypes.KERNEL: + events_dict.append(vars(event)) + events_dict[-1]["blocks_per_sm"] = event.args.get("blocks per SM", 0) + events_dict[-1]["occupancy"] = event.args.get("est. achieved occupancy %", 0) events = events_dict events = pd.DataFrame(events) events = events.astype({"type": "category", "category": "category", "name": "string"}, copy=False) - kernels = events[events["category"] == "Kernel"] - self.kernel_stat = kernels.groupby("name")["duration"].agg(["count", "sum", "mean", "max", "min"]) \ - .sort_values("sum", ascending=False) + + def weighted_avg(x): + try: + return np.average(x, weights=events.loc[x.index, "duration"]) + except ZeroDivisionError: + return 0 + + self.kernel_stat = events.groupby("name").agg( + count=('duration', "count"), + sum=('duration', "sum"), + mean=('duration', "mean"), + max=('duration', "max"), + min=('duration', "min"), + blocks_per_sm=('blocks_per_sm', weighted_avg), + occupancy=('occupancy', weighted_avg)) diff --git a/tb_plugin/torch_tb_profiler/profiler/loader.py b/tb_plugin/torch_tb_profiler/profiler/loader.py index 916957b77..935a82519 100644 --- a/tb_plugin/torch_tb_profiler/profiler/loader.py +++ b/tb_plugin/torch_tb_profiler/profiler/loader.py @@ -1,73 +1,167 @@ # ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # -------------------------------------------------------------------------- - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - +import bisect import os +import sys +from collections import defaultdict +from multiprocessing import Barrier, Process, Queue -from .data import RunData, RunProfileData -from .run_generator import RunGenerator -from .. import consts, utils +from .. import consts, io, utils from ..run import Run +from .data import DistributedRunProfileData, RunProfileData +from .run_generator import DistributedRunGenerator, RunGenerator logger = utils.get_logger() class RunLoader(object): - def __init__(self, name, run_dir): - self.run = RunData(name, run_dir) + def __init__(self, name, run_dir, caches): + self.run_name = name + self.run_dir = run_dir + self.caches = caches + self.queue = Queue() def load(self): - self._parse() - if len(self.run.profiles) == 0: - logger.warning("No profile data found.") - return None + workers = [] + spans_by_workers = defaultdict(list) + for path in io.listdir(self.run_dir): + if io.isdir(io.join(self.run_dir, path)): + continue + match = consts.WORKER_PATTERN.match(path) + if not match: + continue + + worker = match.group(1) + span = match.group(2) + if span is not None: + # remove the starting dot (.) + span = span[1:] + bisect.insort(spans_by_workers[worker], span) - self._process() + workers.append((worker, span, path)) - self._analyze() + span_index_map = {} + for worker, span_array in spans_by_workers.items(): + for i, span in enumerate(span_array, 1): + span_index_map[(worker, span)] = i - run = self._generate_run() + barrier = Barrier(len(workers) + 1) + for worker, span, path in workers: + # convert the span timestamp to the index. + span_index = None if span is None else span_index_map[(worker, span)] + p = Process(target=self._process_data, args=(worker, span_index, path, barrier)) + p.start() + + logger.info("starting all processing") + # since there is one queue, its data must be read before join. + # https://stackoverflow.com/questions/31665328/python-3-multiprocessing-queue-deadlock-when-calling-join-before-the-queue-is-em + # The queue implementation in multiprocessing that allows data to be transferred between processes relies on standard OS pipes. + # OS pipes are not infinitely long, so the process which queues data could be blocked in the OS during the put() + # operation until some other process uses get() to retrieve data from the queue. + # During my testing, I found that the maximum buffer length is 65532 in my test machine. + # If I increase the message size to 65533, the join would hang the process. + barrier.wait() + + distributed_run = Run(self.run_name, self.run_dir) + run = Run(self.run_name, self.run_dir) + for _ in range(len(workers)): + r, d = self.queue.get() + if r is not None: + run.add_profile(r) + if d is not None: + distributed_run.add_profile(d) + + distributed_profiles = self._process_spans(distributed_run) + if distributed_profiles is not None: + if isinstance(distributed_profiles, list): + for d in distributed_profiles: + run.add_profile(d) + else: + run.add_profile(distributed_profiles) + + # for no daemon process, no need to join them since it will automatically join return run - def _parse(self): - workers = [] - for path in os.listdir(self.run.run_dir): - if os.path.isdir(path): - continue - for pattern in [consts.TRACE_GZIP_FILE_SUFFIX, consts.TRACE_FILE_SUFFIX]: - if path.endswith(pattern): - worker = path[:-len(pattern)] - workers.append(worker) - break - - for worker in sorted(workers): - try: - data = RunProfileData.parse(self.run.run_dir, worker) - self.run.profiles[worker] = data - except Exception as ex: - logger.warning("Failed to parse profile data for Run %s on %s. Exception=%s", - self.run.name, worker, ex, exc_info=True) - - def _process(self): - for data in self.run.profiles.values(): - logger.debug("Processing profile data") - data.process() - logger.debug("Processing profile data finish") + def _process_data(self, worker, span, path, barrier): + import absl.logging + absl.logging.use_absl_handler() - def _analyze(self): - for data in self.run.profiles.values(): - logger.debug("Analyzing profile data") + try: + logger.debug("starting process_data") + data = RunProfileData.parse(self.run_dir, worker, span, path, self.caches) + data.process() data.analyze() - logger.debug("Analyzing profile data finish") - def _generate_run(self): - run = Run(self.run.name, self.run.run_dir) - for worker, data in self.run.profiles.items(): - generator = RunGenerator(worker, data) + generator = RunGenerator(worker, span, data) profile = generator.generate_run_profile() - run.add_profile(profile) - return run + dist_data = DistributedRunProfileData(data) + + self.queue.put((profile, dist_data)) + except KeyboardInterrupt: + logger.warning("tb_plugin receive keyboard interrupt signal, process %d will exit" % (os.getpid())) + sys.exit(1) + except Exception as ex: + logger.warning("Failed to parse profile data for Run %s on %s. Exception=%s", + self.run_name, worker, ex, exc_info=True) + self.queue.put((None, None)) + barrier.wait() + logger.debug("finishing process data") + + def _process_spans(self, distributed_run): + spans = distributed_run.get_spans() + if spans is None: + return self._process_distributed_profiles(distributed_run.get_profiles(), None) + else: + span_profiles = [] + for span in spans: + profiles = distributed_run.get_profiles(span=span) + p = self._process_distributed_profiles(profiles, span) + if p is not None: + span_profiles.append(p) + return span_profiles + + def _process_distributed_profiles(self, profiles, span): + has_communication = True + comm_node_lists = [] + for data in profiles: + logger.debug("Processing profile data") + # Set has_communication to False and disable distributed view if any one worker has no communication + if not data.has_communication: + has_communication = False + else: + comm_node_lists.append(data.comm_node_list) + if len(comm_node_lists[-1]) != len(comm_node_lists[0]): + logger.error("Number of communication operation nodes don't match between workers in run: %s" % self.run_name) + has_communication = False + logger.debug("Processing profile data finish") + + if not has_communication: + logger.debug("There is no communication profile in this run.") + return None + + worker_num = len(comm_node_lists) + for i, node in enumerate(comm_node_lists[0]): + kernel_range_size = len(node.kernel_ranges) + # loop for all communication kernel ranges in order + for j in range(kernel_range_size): + min_range = sys.maxsize + # For each kernel_range, find the minist between workers as the real communication time + for k in range(worker_num): + kernel_ranges = comm_node_lists[k][i].kernel_ranges + if len(kernel_ranges) != kernel_range_size: + logger.error("Number of communication kernels don't match between workers in run: %s" % self.run_name) + has_communication = False + return None + if kernel_ranges: + if kernel_ranges[j][1] - kernel_ranges[j][0] < min_range: + min_range = kernel_ranges[j][1] - kernel_ranges[j][0] + for k in range(worker_num): + comm_node_lists[k][i].real_time += min_range + + for data in profiles: + data.communication_parse() + + generator = DistributedRunGenerator(profiles, span) + profile = generator.generate_run_profile() + return profile diff --git a/tb_plugin/torch_tb_profiler/profiler/memory_parser.py b/tb_plugin/torch_tb_profiler/profiler/memory_parser.py new file mode 100644 index 000000000..3ff526c90 --- /dev/null +++ b/tb_plugin/torch_tb_profiler/profiler/memory_parser.py @@ -0,0 +1,314 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# -------------------------------------------------------------------------- +import os +from collections import defaultdict + +from .. import utils +from .node import MemoryMetrics, is_operator_node +from .trace import DeviceType, EventTypes + +logger = utils.get_logger() + +BENCHMARK_MEMORY = os.getenv('TORCH_PROFILER_BENCHMARK_MEMORY') +if BENCHMARK_MEMORY is not None and BENCHMARK_MEMORY.upper() in ("1", "TRUE", "ON"): + BENCHMARK_MEMORY = True +else: + BENCHMARK_MEMORY = False + +def benchmark(func): + def wrapper(*args, **kwargs): + if BENCHMARK_MEMORY: + import time + start = time.time_ns() + ret = func(*args, **kwargs) + end = time.time_ns() + logger.info("{}: {} takes: {} seconds".format(os.getpid(), func.__name__, (end - start) / 1000000000)) + return ret + else: + return func(*args, **kwargs) + + return wrapper + +class MemoryRecord: + def __init__(self, scope, pid, tid, ts, device_type, device_id, bytes): + self.scope = scope + self.tid = tid + self.pid = pid + self.ts = ts + self.device_type = device_type + self.device_id = device_id + self.bytes = bytes + + @property + def device_name(self): + if self.device_type == DeviceType.CPU: + return "CPU" + elif self.device_type == DeviceType.CUDA: + return "GPU{}".format(self.device_id) + else: + return None + + +class MemoryParser: + def __init__(self, tid2tree, op_list): + self.tid2tree = tid2tree + self.op_list = op_list + + self.records_by_tid = defaultdict(list) + + # statistics purpose + self.staled_records = [] + self.processed_records = [] + + # the visited node times from parent to child + # troubleshooting issue purpose. + self.processed_node = defaultdict(int) + self.unreached_node = defaultdict(list) + + # normal search + self.staled_records_normal = [] + self.processed_records_normal = [] + + # for troubleshooting issues. + self.processed_node_normal = set() + self.unreached_node_normal = defaultdict(list) + + def parse_events(self, events): + for event in events: + if event.type == EventTypes.MEMORY: + record = MemoryRecord(event.scope, event.pid, event.tid, event.ts, event.device_type, event.device_id, event.bytes) + self.records_by_tid[record.tid].append(record) + + for val in self.records_by_tid.values(): + val.sort(key=lambda x: x.ts) + + if BENCHMARK_MEMORY: + self.update_node_recursive() + self.update_node() + else: + self.update_node() + + @benchmark + def get_memory_statistics(self): + SELF_METRICS_COUNT = MemoryMetrics.IncreaseSize + + def dict_factory(): + return defaultdict(lambda: [0] * MemoryMetrics.Total) + + # two level keys dictionary + # first keyed by node, then keyed by device (CPU/GPU0/GPU1/etc.) + memory_metrics_keyed_by_node = defaultdict(dict_factory) + + def traverse_node_memory(node): + if BENCHMARK_MEMORY: + if node not in self.processed_node_normal: + self.unreached_node_normal[tid].append(node) + + if node not in self.processed_node: + self.unreached_node[tid].append(node) + # since the node has not been visited for insert memory records, just ignore all childrens + return + elif is_operator_node(node): + node_memory_metrics = node.get_memory_metrics() + for device, metrics in node_memory_metrics.items(): + # device is name of device like: CPU/GPU0 + # metrics is an arrary [SelfIncreaseSize, SelfAllocationSize, SelfAllocationCount] + for i, value in enumerate(metrics): + memory_metrics_keyed_by_node[node][device][i] = value + memory_metrics_keyed_by_node[node][device][i + SELF_METRICS_COUNT] += value + else: + logger.debug("node {}:{} is not operator node, will skip its self metrics processing".format(node.name, node.start_time)) + + # recursive the children nodes + for child in node.children: + traverse_node_memory(child) + # sum up the child metrics + for device, metrics in memory_metrics_keyed_by_node[child].items(): + for i in range(SELF_METRICS_COUNT, MemoryMetrics.Total): + memory_metrics_keyed_by_node[node][device][i] += metrics[i] + + for tid, root in self.tid2tree.items(): + for child in root.children: + traverse_node_memory(child) + + # keyed first by device name like CPU/GPU0 etc, then keyed by operator name. + # the value is array [items indexed by MemoryMetrics] + memory_metrics_keyed_by_nodename = defaultdict(dict_factory) + # node: the instance, device_keyed_metrics: dictionary keyed by device name like CPU/GPU0 + for node, device_keyed_metrics in memory_metrics_keyed_by_node.items(): + if not is_operator_node(node): + # skip the node like Optimizer.step, DataLoader, ProfilerStep#1 etc. + continue + + for device, metrics in device_keyed_metrics.items(): + for i, metric in enumerate(metrics): + memory_metrics_keyed_by_nodename[device][node.name][i] += metric + + # get the op_calls dictionary from module parser result. + op_calls = defaultdict(int) + for op in self.op_list: + op_calls[op.name] += op.calls + + result = defaultdict(defaultdict) + for device, node_metrics in memory_metrics_keyed_by_nodename.items(): + for node, values in node_metrics.items(): + if any(values): + result[device][node] = values + [op_calls[node]] + + if BENCHMARK_MEMORY: + for tid, nodes in self.unreached_node.items(): + if nodes: + logger.info("LOOP: tid-{}: total {} node doesn't get reached.".format(tid, len(nodes))) + else: + logger.info("LOOP: tid-{}: all nodes are covered".format(tid)) + for tid, nodes in self.unreached_node_normal.items(): + if nodes: + logger.info("RECURSIVE: tid-{}: total {} node doesn't get reached.".format(tid, len(nodes))) + else: + logger.info("RECURSIVE: tid-{}: all nodes are covered".format(tid)) + + # for node in nodes: + # logger.debug("node {},{}:{} doesn't reached".format(node.tid, node.name, node.start_time)) + + for node, times in self.processed_node.items(): + assert times == 1 + # if times > 1: + # logger.info("node {} is processed {} times".format(node.start_time, times)) + + return result + + @property + def record_length(self): + return sum(len(v) for v in self.records_by_tid.values()) + + @benchmark + def update_node(self): + tree_height = 0 + for tid, records in self.records_by_tid.items(): + if not records: + continue + + # each item is (parent_node, child_index) that it is visiting. + node_stack = [] + + record_index = 0 + current_node = self.tid2tree.get(tid) + child_index = 0 + + if current_node: + self.processed_node[current_node] += 1 + + while record_index < len(records): + '''In the loop, one pass will process one record. The basic logic is: + It will search from the node that last visited since both the records and tree is ordered already + 1. it current node contains the records, then find the exactly child which just embrace it. + 2. otherwise, find the parent node and set the child_index, so that the parent node could continue from previous visited node. + 3. if there is not any node contains the records, then all remaining records will be ignored. + ''' + record = records[record_index] + + if len(node_stack) > tree_height: + tree_height = len(node_stack) + + if current_node is None: + # 3. Ignore all remaining records. + logger.debug("could not find the node for tid %d, timestamp: %d, record index: %d, total records: %d" % (record.tid, record.ts, record_index, len(records))) + self.staled_records.append(records[record_index]) + record_index += 1 + continue + + if record.ts < current_node.start_time: + # this should only happens for root node. + logger.debug("record timestamp %d is less that the start time of %s" % (record.ts, current_node.name)) + # This record has no chance to be appended to following tree node. + self.staled_records.append(record) + record_index += 1 + continue + elif record.ts >= current_node.end_time: + # 2. pop parent node and update the child_index accordingly. + if len(node_stack) > 0: + current_node, child_index = node_stack.pop() + child_index += 1 + else: + # if there is not item in stack, set it to None + current_node = None + continue + + # 1. find the real node embrace the record. + # Find the node which contains the records from top to downmost. + while child_index < len(current_node.children): + if record.ts < current_node.children[child_index].start_time: + # if current record timestamp is less than the current child's startime, + # we will break the search and keep the child_index not change. So that next time + # we can continue from here. + # there is no any child contains the record.timestamp + # child_find is False at this case. + break + elif record.ts >= current_node.children[child_index].end_time: + # if the record timestamp is greater than the children end time, increment to next child + # untile find one contains the records + child_index += 1 + else: + # current children contains the record + self.processed_node[current_node.children[child_index]] += 1 + + # push child index which will be visited, then continue the loop + node_stack.append((current_node, child_index)) + current_node = current_node.children[child_index] + child_index = 0 + + # the current_node is the one contains the record at this moment. + if is_operator_node(current_node): + if not BENCHMARK_MEMORY or record not in current_node.memory_records: + current_node.add_memory_record(record) + self.processed_records.append(record) + else: + self.staled_records.append(record) + + # the record is processed done, increment the index to process next one. + record_index += 1 + + # show summary information + if len(self.staled_records) > 0 and self.record_length > 0: + logger.debug("{} memory records are skipped in total {} memory records and only {} get processed".format(len(self.staled_records), self.record_length, len(self.processed_records))) + if tree_height > 0: + logger.debug("max tree height is {}".format(tree_height)) + + @benchmark + def update_node_recursive(self): + def _update_memory_event(record, node): + if BENCHMARK_MEMORY: + self.processed_node_normal.add(node) + + child_found = None + for child in node.children: + if record.ts >= child.start_time and record.ts < child.end_time: + child_found = child + break + if child_found is None: + # We use left close and right open deliberately here [start time, end time) + # to avoid one memory be calculated twice in case of it is equal to previous operator's end time + # and next operator's start time. + # the result might be different with PyTorch one. + # https://github.com/pytorch/pytorch/blob/26c1f0f72e71c096648a16993484234399da307c/torch/autograd/profiler.py#L1147-L1152 + if is_operator_node(node) and record.ts >= node.start_time and record.ts < node.end_time: + if not BENCHMARK_MEMORY or record not in node.memory_records: + node.add_memory_record(record) + self.processed_records_normal.append(record) + else: + self.staled_records_normal.append(record) + else: + _update_memory_event(record, child_found) + + for tid, records in self.records_by_tid.items(): + root_node = self.tid2tree.get(tid) + if root_node is None: + logger.warning("could not find the root node for tid %d " % tid) + self.staled_records_normal.extend(records) + + for mem_record in records: + _update_memory_event(mem_record, root_node) + + if len(self.staled_records_normal) > 0 and self.record_length > 0: + logger.info("{} memory records are skipped in total {} memory records and only {} get processed".format(len(self.staled_records_normal), self.record_length, len(self.processed_records_normal))) diff --git a/tb_plugin/torch_tb_profiler/profiler/module_parser.py b/tb_plugin/torch_tb_profiler/profiler/module_parser.py index 9db6b6df9..b244968d6 100644 --- a/tb_plugin/torch_tb_profiler/profiler/module_parser.py +++ b/tb_plugin/torch_tb_profiler/profiler/module_parser.py @@ -1,92 +1,35 @@ # ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # -------------------------------------------------------------------------- - import sys +from collections import defaultdict -from .trace import EventTypes from .. import utils +from .node import OperatorNode, is_operator_node +from .trace import EventTypes logger = utils.get_logger() -class BaseNode: - def __init__(self): - self.name = None - self.start_time = None - self.end_time = None - self.type = None - self.external_id = None # For consistency check. - - -class HostNode(BaseNode): - def __init__(self): - super(HostNode, self).__init__() - self.device_duration = 0 # Total time of Kernel, GPU Memcpy, GPU Memset. TODO: parallel multi-stream? - - -class OperatorNode(HostNode): - def __init__(self): - super(OperatorNode, self).__init__() - self.children = [] # OperatorNode and ProfilerStepNode. - self.runtimes = [] # RuntimeNode - self.input_shape = None - self.self_host_duration = 0 - self.self_device_duration = 0 - - def fill_stats(self): - self.self_host_duration = self.end_time - self.start_time - for child in self.children: - self.device_duration += child.device_duration - self.self_host_duration -= (child.end_time - child.start_time) - for rt in self.runtimes: - # From PyTorch 1.8 RC1, cpu_self_time does not include runtime's time. - # So here we keep consistent with it. - self.self_host_duration -= (rt.end_time - rt.start_time) - self.device_duration += rt.device_duration - self.self_device_duration += rt.device_duration - - -class ProfilerStepNode(OperatorNode): - def __init__(self): - super(ProfilerStepNode, self).__init__() - - -class RuntimeNode(HostNode): - def __init__(self): - super(RuntimeNode, self).__init__() - # One runtime could trigger more than one kernel, such as cudaLaunchCooperativeKernelMultiDevice. - self.device_nodes = None - - def fill_stats(self): - if self.device_nodes is None: - return - for device_node in self.device_nodes: - self.device_duration += device_node.end_time - device_node.start_time - - -class DeviceNode(BaseNode): - def __init__(self): - super(DeviceNode, self).__init__() - self.op_node = None # The cpu operator that launched it. - - class OperatorAgg: def __init__(self): self.name = None self.input_shape = None # Optional + self.call_stacks = set() # Optional self.calls = 0 self.host_duration = 0 self.device_duration = 0 self.self_host_duration = 0 self.self_device_duration = 0 # TODO: Think about adding these avgs to UI. - self.avg_host_duration = 0 - self.avg_device_duration = 0 - def average(self): - self.avg_host_duration = self.host_duration / self.calls - self.avg_device_duration = self.device_duration / self.calls + @property + def avg_host_duration(self): + return self.host_duration / self.calls + + @property + def avg_device_duration(self): + return self.device_duration / self.calls class KernelAggByNameOp: @@ -95,12 +38,22 @@ def __init__(self): self.op_name = None self.calls = 0 self.total_duration = 0 - self.avg_duration = 0 self.min_duration = sys.maxsize self.max_duration = 0 + self.blocks_per_sm = 0 + self.occupancy = 0 + + @property + def avg_duration(self): + return self.total_duration / self.calls - def average(self): - self.avg_duration = self.total_duration / self.calls + @property + def avg_blocks_per_sm(self): + return self.blocks_per_sm / self.total_duration if self.total_duration > 0 else 0 + + @property + def avg_occupancy(self): + return self.occupancy / self.total_duration if self.total_duration > 0 else 0 class ModuleParser: @@ -111,19 +64,20 @@ def __init__(self): self.op_list_groupby_name = [] # For Operator-view. self.op_list_groupby_name_input = [] # For Operator-view. self.kernel_list_groupby_name_op = {} # For Kernel-view. - self.runtime_node_list = [] # For Overall-view. - self.device_node_list = [] # For Overall-view. # host_node_list: list of OperatorNode and ProfilerStepNode. # zero_rt_list: list of RuntimeNode with external_id=0. - def _build_tree(self, host_node_list, zero_rt_list): + def _build_tree(self, host_node_list, zero_rt_list, tid): def build_tree_relationship(host_node_list, zero_rt_list): node_stack = [] - root_node = OperatorNode() - root_node.start_time = -sys.maxsize - 1 - root_node.end_time = sys.maxsize - root_node.runtimes = zero_rt_list # Give the list of RuntimeNode with external_id=0 to root node. + root_node = OperatorNode( + name="CallTreeRoot", + start_time=-sys.maxsize - 1, + end_time=sys.maxsize, + type=EventTypes.PYTHON, + tid=tid, + runtimes=zero_rt_list) # Give the list of RuntimeNode with external_id=0 to root node. node_stack.append(root_node) for node in host_node_list: while True: # break loop when the node is inserted. @@ -131,6 +85,7 @@ def build_tree_relationship(host_node_list, zero_rt_list): if node.start_time < tail_node.end_time: if node.end_time <= tail_node.end_time: tail_node.children.append(node) + # node.parent_node = weakref.ref(tail_node) node_stack.append(node) else: logger.error("Error in input data: ranges on the same thread should not intersect!" @@ -141,8 +96,6 @@ def build_tree_relationship(host_node_list, zero_rt_list): break else: node_stack.pop() - root_node.name = "CallTreeRoot" - root_node.type = EventTypes.PYTHON return root_node # Merge the consecutive calls to same function into one. @@ -161,111 +114,26 @@ def remove_dup_nodes(node): for child in node.children: remove_dup_nodes(child) - # TODO: Replace recursive by using a stack, in case of too deep callstack. - def fill_stats(node): + def traverse_node(node): if node.type != EventTypes.RUNTIME: for child in node.children: - fill_stats(child) + traverse_node(child) for rt in node.runtimes: - fill_stats(rt) - if rt.device_nodes is not None: - for device_node in rt.device_nodes: - device_node.op_node = node - - if node.name == "CallTreeRoot": - node.start_time = node.end_time = None - for i in range(len(node.children)): - if node.children[i].start_time is not None: - node.start_time = node.children[i].start_time - break - for i in range(len(node.children) - 1, -1, -1): - if node.children[i].end_time is not None: - node.end_time = node.children[i].end_time - break + traverse_node(rt) - node.fill_stats() - if type(node) is OperatorNode and node.type == EventTypes.OPERATOR \ - and not (node.name.startswith("enumerate(DataLoader)#") and node.name.endswith(".__next__")) \ - and not node.name.startswith("Optimizer."): + if is_operator_node(node): self.cpp_op_list.append(node) if node.type == EventTypes.RUNTIME and node.device_nodes is not None: self.kernel_list.extend([n for n in node.device_nodes if n.type == EventTypes.KERNEL]) root_node = build_tree_relationship(host_node_list, zero_rt_list) remove_dup_nodes(root_node) - fill_stats(root_node) + root_node.replace_time_by_children() + root_node.fill_stats() + traverse_node(root_node) return root_node - def parse_events(self, events): - - def parse_event(event, corrid_to_device, corrid_to_runtime, externalid_to_runtime, tid2list, tid2zero_rt_list): - - def build_node(node, event): - node.name = event.name - node.start_time = event.ts - node.end_time = event.ts + event.duration - node.type = event.type - if "external id" in event.args: - node.external_id = event.args["external id"] - elif "External id" in event.args: - node.external_id = event.args["External id"] - - corrid = event.args["correlation"] if "correlation" in event.args else None - input_shape = event.args["Input dims"] if "Input dims" in event.args else None - tid = event.tid - if event.type in [EventTypes.KERNEL, EventTypes.MEMCPY, EventTypes.MEMSET]: - device_node = DeviceNode() - build_node(device_node, event) - if corrid in corrid_to_runtime: - rt_node = corrid_to_runtime[corrid] # Don't pop it because it may be used by next kernel. - if rt_node.device_nodes is None: - rt_node.device_nodes = [device_node] - else: - rt_node.device_nodes.append(device_node) - if rt_node.external_id != device_node.external_id: - logger.warning( - "Runtime and Device-op have same correlation id but with different external id!" - ) - else: - if corrid not in corrid_to_device: - corrid_to_device[corrid] = [device_node] - else: - corrid_to_device[corrid].append(device_node) - self.device_node_list.append(device_node) - elif event.type == EventTypes.RUNTIME: - rt_node = RuntimeNode() - build_node(rt_node, event) - corrid_to_runtime[corrid] = rt_node - if corrid in corrid_to_device: - rt_node.device_nodes = [] - rt_node.device_nodes.extend(corrid_to_device[corrid]) - for device_node in corrid_to_device[corrid]: - if rt_node.external_id != device_node.external_id: - logger.warning( - "Runtime and Device-op have same correlation id but with different external id!" - ) - del corrid_to_device[corrid] - if rt_node.external_id in externalid_to_runtime: - externalid_to_runtime[rt_node.external_id].append(rt_node) - else: - externalid_to_runtime[rt_node.external_id] = [rt_node] - # Some runtimes has external_id 0, which will not be correlated to any operator. - # So get them and attach them to root node. - if rt_node.external_id == 0: - if tid not in tid2zero_rt_list: - tid2zero_rt_list[tid] = [] - tid2zero_rt_list[tid].append(rt_node) - self.runtime_node_list.append(rt_node) - elif event.type in [EventTypes.PYTHON, EventTypes.OPERATOR, EventTypes.PROFILER_STEP]: - if event.type == EventTypes.PROFILER_STEP: - op_node = ProfilerStepNode() - else: - op_node = OperatorNode() - build_node(op_node, event) - op_node.input_shape = input_shape - if tid not in tid2list: - tid2list[tid] = [] - tid2list[tid].append(op_node) + def aggregate(self, context): def parse_ops(cpp_op_list): def aggregate(key_to_agg, key, op): @@ -274,6 +142,7 @@ def aggregate(key_to_agg, key, op): agg = key_to_agg[key] agg.name = op.name agg.input_shape = str(op.input_shape) + agg.call_stacks.add(op.call_stack) agg.calls += 1 agg.host_duration += op.end_time - op.start_time agg.device_duration += op.device_duration @@ -282,25 +151,35 @@ def aggregate(key_to_agg, key, op): return agg name_to_agg = {} - for op in cpp_op_list: - agg = aggregate(name_to_agg, op.name, op) - for _, agg in name_to_agg.items(): - agg.average() - op_list_groupby_name = list(name_to_agg.values()) - name_input_to_agg = {} + name_stack_to_agg = {} + name_input_stack_to_agg = {} for op in cpp_op_list: - name_input = op.name + "###" + str(op.input_shape) - agg = aggregate(name_input_to_agg, name_input, op) - for _, agg in name_input_to_agg.items(): - agg.average() - op_list_groupby_name_input = list(name_input_to_agg.values()) + aggregate(name_to_agg, op.name, op) + aggregate(name_input_to_agg, op.name + "###" + str(op.input_shape), op) + aggregate(name_stack_to_agg, op.name + "###" + str(op.call_stack), op) + aggregate(name_input_stack_to_agg, op.name + "###" + str(op.input_shape) + "###" + str(op.call_stack), op) - return op_list_groupby_name, op_list_groupby_name_input + op_list_groupby_name = list(name_to_agg.values()) + op_list_groupby_name_input = list(name_input_to_agg.values()) + stack_lists_group_by_name = defaultdict(list) + stack_lists_group_by_name_input = defaultdict(list) + for agg in name_stack_to_agg.values(): + assert (len(agg.call_stacks) == 1) + if list(agg.call_stacks)[0]: + stack_lists_group_by_name[agg.name].append(agg) + for agg in name_input_stack_to_agg.values(): + assert (len(agg.call_stacks) == 1) + if list(agg.call_stacks)[0]: + key = agg.name + "###" + str(agg.input_shape) + stack_lists_group_by_name_input[key].append(agg) + + return op_list_groupby_name, op_list_groupby_name_input, stack_lists_group_by_name, stack_lists_group_by_name_input def parse_kernels(kernel_list): name_op_to_agg = {} for kernel in kernel_list: + dur = kernel.end_time - kernel.start_time op_name = "N/A" if kernel.op_node is None else kernel.op_node.name key = kernel.name + "###" + op_name if key not in name_op_to_agg: @@ -309,49 +188,29 @@ def parse_kernels(kernel_list): agg.name = kernel.name agg.op_name = op_name agg.calls += 1 - dur = kernel.end_time - kernel.start_time agg.total_duration += dur agg.min_duration = min(agg.min_duration, dur) agg.max_duration = max(agg.max_duration, dur) - for _, agg in name_op_to_agg.items(): - agg.average() - kernel_list_groupby_name_op = list(name_op_to_agg.values()) + agg.blocks_per_sm += kernel.blocks_per_sm * dur + agg.occupancy += kernel.occupancy * dur + kernel_list_groupby_name_op = list(name_op_to_agg.values()) return kernel_list_groupby_name_op - # For OperatorNode and ProfilerStepNode: - # Use time interval containing relationship to build father-child correlation, - # which is consistent with autograd profiler. - # For RuntimeNode: - # Use external_id to build correlation with its father OperatorNode or ProfilerStepNode. - # Because in the case when RuntimeNode has duration 0 and starts at same time as a OperatorNode, - # just use interval containing relationship can't tell it is child or brother of the OperatorNode. - tid2list = {} # value is a list of OperatorNode and ProfilerStepNode. Do not include RuntimeNode - tid2zero_rt_list = {} # value is a list of RuntimeNode with external_id=0. They will be attached to root nodes. - corrid_to_device = {} # value is a list of DeviceNode - corrid_to_runtime = {} # value is a RuntimeNode - externalid_to_runtime = {} # value is a list of RuntimeNode - for event in events: - parse_event(event, corrid_to_device, corrid_to_runtime, externalid_to_runtime, tid2list, tid2zero_rt_list) + tid2list = context.tid2list + tid2zero_rt_list = context.tid2zero_rt_list + corrid_to_device = context.corrid_to_device + # Kernel that not owned by any operator should also be shown in kernel view # when group by "Kernel Name + Op Name". for _, device_nodes in corrid_to_device.items(): self.kernel_list.extend([n for n in device_nodes if n.type == EventTypes.KERNEL]) - # associate CUDA Runtimes with CPU events - for _, op_list in tid2list.items(): - for op in op_list: - if op.external_id in externalid_to_runtime: - op.runtimes.extend(externalid_to_runtime[op.external_id]) - del externalid_to_runtime[op.external_id] - for ext_id in externalid_to_runtime: - if ext_id != 0: - logger.warning("{} Runtime with external id {} don't correlate to any operator!".format( - len(externalid_to_runtime[ext_id]), ext_id)) + for tid, op_list in tid2list.items(): zero_rt_list = tid2zero_rt_list[tid] if tid in tid2zero_rt_list else [] # Note that when 2 start_time are equal, the one with bigger end_time should be ahead of the other. op_list.sort(key=lambda x: (x.start_time, -x.end_time)) - root_node = self._build_tree(op_list, zero_rt_list) - self.tid2tree[tid] = root_node - self.op_list_groupby_name, self.op_list_groupby_name_input = parse_ops(self.cpp_op_list) + root_node = self._build_tree(op_list, zero_rt_list, tid) + self.tid2tree[int(tid)] = root_node + self.op_list_groupby_name, self.op_list_groupby_name_input, self.stack_lists_group_by_name, self.stack_lists_group_by_name_input = parse_ops(self.cpp_op_list) self.kernel_list_groupby_name_op = parse_kernels(self.kernel_list) diff --git a/tb_plugin/torch_tb_profiler/profiler/node.py b/tb_plugin/torch_tb_profiler/profiler/node.py new file mode 100644 index 000000000..57f16c60f --- /dev/null +++ b/tb_plugin/torch_tb_profiler/profiler/node.py @@ -0,0 +1,177 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# ------------------------------------------------------------------------- +from abc import ABC +from collections import defaultdict +from enum import IntEnum + +from .trace import EventTypes + +MemoryMetrics = IntEnum('MemoryMetrics', ['SelfIncreaseSize', 'SelfAllocationSize', 'SelfAllocationCount', 'IncreaseSize', 'AllocationSize', 'AllocationCount', 'Total'], start=0) + +class BaseNode(ABC): + def __init__(self, name, start_time, end_time, type, tid, external_id): + self.name = name + self.start_time = start_time + self.end_time = end_time + self.type = type + self.tid = tid + self.external_id = external_id # For consistency check. + + @staticmethod + def get_node_argument(event): + kwargs = {} + kwargs['name'] = event.name + kwargs['start_time'] = event.ts + kwargs['end_time'] = event.ts + event.duration + kwargs['type'] = event.type + kwargs['tid'] = event.tid + + if event.external_id is not None: + kwargs['external_id'] = event.external_id + + return kwargs + + +class CommunicationNode(BaseNode): + def __init__(self, name, start_time, end_time, type, tid, external_id, input_shape, input_type): + super().__init__(name, start_time, end_time, type, tid, external_id) + self.input_shape = input_shape + self.input_type = input_type + self.kernel_ranges = [] + self.total_time = 0 + self.real_time = 0 + self.step_name = None + + @classmethod + def create(cls, event, input_shape, input_type): + kwargs = BaseNode.get_node_argument(event) + return cls(input_shape=input_shape, input_type=input_type, **kwargs) + + +class HostNode(BaseNode): + def __init__(self, name, start_time, end_time, type, tid, external_id, device_duration=0): + super().__init__(name, start_time, end_time, type, tid, external_id) + self.device_duration = device_duration # Total time of Kernel, GPU Memcpy, GPU Memset. TODO: parallel multi-stream? + + +class OperatorNode(HostNode): + # Don't use [] as default parameters + # https://stackoverflow.com/questions/1132941/least-astonishment-and-the-mutable-default-argument?page=1&tab=votes#tab-top + # https://web.archive.org/web/20200221224620/http://effbot.org/zone/default-values.htm + def __init__(self, name, start_time, end_time, type, tid, external_id=None, device_duration=0, + children=None, runtimes=None, input_shape=None, input_type=None, call_stack=None, self_host_duration=0, self_device_duration=0): + super().__init__(name, start_time, end_time, type, tid, external_id, device_duration) + self.children = [] if children is None else children # OperatorNode and ProfilerStepNode. + self.runtimes = [] if runtimes is None else runtimes # RuntimeNode + self.input_shape = input_shape + self.input_type = input_type + self.call_stack = call_stack + self.self_host_duration = self_host_duration + self.self_device_duration = self_device_duration + self.memory_records = [] + # self.parent_node = None + + def add_memory_record(self, record): + self.memory_records.append(record) + + def get_memory_metrics(self): + metrics_count = MemoryMetrics.SelfAllocationCount + 1 + memory_metrics = defaultdict(lambda: [0] * metrics_count) + for record in self.memory_records: + name = record.device_name + if name is None: + continue + + memory_metrics[name][MemoryMetrics.SelfIncreaseSize] += record.bytes + if record.bytes > 0: + memory_metrics[name][MemoryMetrics.SelfAllocationSize] += record.bytes + memory_metrics[name][MemoryMetrics.SelfAllocationCount] += 1 + + return memory_metrics + + def fill_stats(self): + # TODO: Replace recursive by using a stack, in case of too deep callstack. + for child in self.children: + child.fill_stats() + for rt in self.runtimes: + rt.fill_stats() + rt.update_device_op_node(self) + + self.self_host_duration = self.end_time - self.start_time + for child in self.children: + self.device_duration += child.device_duration + self.self_host_duration -= (child.end_time - child.start_time) + for rt in self.runtimes: + # From PyTorch 1.8 RC1, cpu_self_time does not include runtime's time. + # So here we keep consistent with it. + self.self_host_duration -= (rt.end_time - rt.start_time) + self.device_duration += rt.device_duration + self.self_device_duration += rt.device_duration + + def replace_time_by_children(self): + self.start_time = next((child.start_time for child in self.children if child.start_time is not None), None) + self.end_time = next((child.end_time for child in reversed(self.children) if child.end_time is not None), None) + + @classmethod + def create(cls, event, input_shape, input_type, call_stack): + kwargs = BaseNode.get_node_argument(event) + return cls(input_shape=input_shape, input_type=input_type, call_stack=call_stack, **kwargs) + + +class ProfilerStepNode(OperatorNode): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + +class RuntimeNode(HostNode): + def __init__(self, name, start_time, end_time, type, tid, external_id=None, device_duration=0, + device_nodes=None): + super().__init__(name, start_time, end_time, type, tid, external_id, device_duration) + # One runtime could trigger more than one kernel, such as cudaLaunchCooperativeKernelMultiDevice. + self.device_nodes = device_nodes + + def fill_stats(self): + if self.device_nodes: + for device_node in self.device_nodes: + self.device_duration += device_node.end_time - device_node.start_time + + def update_device_op_node(self, node): + if self.device_nodes: + for device_node in self.device_nodes: + device_node.op_node = node + + @classmethod + def create(cls, event, device_nodes): + kwargs = BaseNode.get_node_argument(event) + return cls(device_nodes=device_nodes, **kwargs) + + +class DeviceNode(BaseNode): + def __init__(self, name, start_time, end_time, type, tid, external_id=None, + op_node=None, blocks_per_sm=None, occupancy=None): + super().__init__(name, start_time, end_time, type, tid, external_id) + self.op_node = op_node # The cpu operator that launched it. + self.blocks_per_sm = blocks_per_sm + self.occupancy = occupancy + + @classmethod + def create(cls, event): + kwargs = DeviceNode.get_node_argument(event) + return cls(**kwargs) + + @staticmethod + def get_node_argument(event): + kwargs = BaseNode.get_node_argument(event) + if event.type == EventTypes.KERNEL: + kwargs["blocks_per_sm"] = event.args.get("blocks per SM", 0) + kwargs["occupancy"] = event.args.get("est. achieved occupancy %", 0) + return kwargs + +def is_operator_node(node): + if type(node) is OperatorNode and node.type == EventTypes.OPERATOR \ + and not (node.name.startswith("enumerate(DataLoader)#") and node.name.endswith(".__next__")) \ + and not node.name.startswith("Optimizer."): + return True + else: + return False diff --git a/tb_plugin/torch_tb_profiler/profiler/overall_parser.py b/tb_plugin/torch_tb_profiler/profiler/overall_parser.py index 39b8d227e..c18b936fd 100644 --- a/tb_plugin/torch_tb_profiler/profiler/overall_parser.py +++ b/tb_plugin/torch_tb_profiler/profiler/overall_parser.py @@ -1,365 +1,92 @@ # ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # -------------------------------------------------------------------------- - -import sys - -from .trace import EventTypes from .. import utils +from .event_parser import ProfileRole +from .range_utils import (get_ranges_sum, intersection_ranges_lists, + merge_ranges, subtract_ranges_lists) logger = utils.get_logger() -def merge_ranges(src_ranges, is_sorted=False): - merged_ranges = [] - if len(src_ranges) > 0: - if not is_sorted: - src_ranges.sort(key=lambda x: x[0]) - src_id = 0 - merged_ranges.append( - (src_ranges[src_id][0], src_ranges[src_id][1])) - for src_id in range(1, len(src_ranges)): - dst_id = len(merged_ranges) - 1 - if src_ranges[src_id][1] > merged_ranges[dst_id][1]: - if src_ranges[src_id][0] <= merged_ranges[dst_id][1]: - merged_ranges[dst_id] = (merged_ranges[dst_id][0], src_ranges[src_id][1]) - else: - merged_ranges.append( - (src_ranges[src_id][0], src_ranges[src_id][1])) - return merged_ranges - - -def subtract_ranges_lists(range_list1, range_list2): - range_list_dst = [] - if len(range_list1) == 0: - return range_list_dst - if len(range_list2) == 0: - range_list_dst = list(range_list1) - return range_list_dst - r1 = range_list1[0] - r2 = range_list2[0] - i1 = i2 = 0 - while i1 < len(range_list1): - if i2 == len(range_list2): - range_list_dst.append(r1) - r1, i1 = pop_list(range_list1, i1) - elif r2[1] <= r1[0]: - r2, i2 = pop_list(range_list2, i2) - elif r2[0] <= r1[0] and r2[1] < r1[1]: - r1 = (r2[1], r1[1]) - r2, i2 = pop_list(range_list2, i2) - elif r2[0] <= r1[0]: - assert (r2[1] >= r1[1]) - r2 = (r1[1], r2[1]) - r1, i1 = pop_list(range_list1, i1) - elif r2[0] < r1[1]: - assert (r2[0] > r1[0]) - range_list_dst.append((r1[0], r2[0])) - r1 = (r2[0], r1[1]) - else: - assert (r2[0] >= r1[1]) - range_list_dst.append(r1) - r1, i1 = pop_list(range_list1, i1) - return range_list_dst - - -def intersection_ranges_lists(range_list1, range_list2): - range_list_dst = [] - if len(range_list1) == 0 or len(range_list2) == 0: - return range_list_dst - r1 = range_list1[0] - r2 = range_list2[0] - i1 = i2 = 0 - while i1 < len(range_list1): - if i2 == len(range_list2): - break - elif r2[1] <= r1[0]: - r2, i2 = pop_list(range_list2, i2) - elif r2[0] <= r1[0] and r2[1] < r1[1]: - assert (r2[1] > r1[0]) - range_list_dst.append((r1[0], r2[1])) - r1 = (r2[1], r1[1]) - r2, i2 = pop_list(range_list2, i2) - elif r2[0] <= r1[0]: - assert (r2[1] >= r1[1]) - range_list_dst.append(r1) - r2 = (r1[1], r2[1]) - r1, i1 = pop_list(range_list1, i1) - elif r2[1] < r1[1]: - assert (r2[0] > r1[0]) - range_list_dst.append(r2) - r1 = (r2[1], r1[1]) - r2, i2 = pop_list(range_list2, i2) - elif r2[0] < r1[1]: - assert (r2[1] >= r1[1]) - range_list_dst.append((r2[0], r1[1])) - r2 = (r1[1], r2[1]) - r1, i1 = pop_list(range_list1, i1) - else: - assert (r2[0] >= r1[1]) - r1, i1 = pop_list(range_list1, i1) - return range_list_dst - - -def get_ranges_sum(ranges): - sum = 0 - for range in ranges: - sum += (range[1] - range[0]) - return sum - - -def pop_list(range_list, index): - next_index = index + 1 - if next_index >= len(range_list): - return None, len(range_list) - next_item = range_list[next_index] - return next_item, next_index - - class OverallParser(object): class Costs: def __init__(self): - self.step_total_cost = 0 - self.kernel_cost = 0 - self.memcpy_cost = 0 - self.memset_cost = 0 - self.runtime_cost = 0 - self.dataloader_cost = 0 - self.cpuop_cost = 0 - self.other_cost = 0 + self.costs = [0] * len(ProfileRole) - def calculate_costs(self, statistics, step): - self.step_total_cost = step[1] - step[0] - self.kernel_cost = get_ranges_sum(statistics.kernel_cost_ranges) - self.memcpy_cost = get_ranges_sum(statistics.memcpy_cost_ranges) - self.memset_cost = get_ranges_sum(statistics.memset_cost_ranges) - self.runtime_cost = get_ranges_sum(statistics.runtime_cost_ranges) - self.dataloader_cost = get_ranges_sum(statistics.dataloader_cost_ranges) - self.cpuop_cost = get_ranges_sum(statistics.cpuop_cost_ranges) - self.other_cost = get_ranges_sum(statistics.other_cost_ranges) + @classmethod + def calculate_costs(cls, statistics, step): + cost_obj = cls() + for i in range(len(statistics.cost_ranges)): + cost_obj.costs[i] = get_ranges_sum(statistics.cost_ranges[i]) + cost_obj.costs[ProfileRole.Total] = step[1] - step[0] + return cost_obj class Statistics: - def __init__(self): - self.kernel_cost_ranges = [] - self.memcpy_cost_ranges = [] - self.memset_cost_ranges = [] - self.runtime_cost_ranges = [] - self.dataloader_cost_ranges = [] - self.cpuop_cost_ranges = [] - self.other_cost_ranges = [] + def __init__(self, cost_ranges): + if not cost_ranges: + raise ValueError("the cost ranges is None") - def intersection_with_step(self, step): - result = OverallParser.Statistics() - step = [step] - result.kernel_cost_ranges = intersection_ranges_lists(step, self.kernel_cost_ranges) - result.memcpy_cost_ranges = intersection_ranges_lists(step, self.memcpy_cost_ranges) - result.memset_cost_ranges = intersection_ranges_lists(step, self.memset_cost_ranges) - result.runtime_cost_ranges = intersection_ranges_lists(step, self.runtime_cost_ranges) - result.dataloader_cost_ranges = intersection_ranges_lists(step, self.dataloader_cost_ranges) - result.cpuop_cost_ranges = intersection_ranges_lists(step, self.cpuop_cost_ranges) - result.other_cost_ranges = intersection_ranges_lists(step, self.other_cost_ranges) - return result + self.cost_ranges = cost_ranges - def __init__(self): - self.kernel_ranges = [] - self.memcpy_ranges = [] - self.memset_ranges = [] - self.runtime_ranges = [] - self.dataloader_ranges = [] - self.cpuop_ranges = [] - self.steps = [] - self.steps_names = [] - self.has_runtime = False - self.has_kernel = False - self.has_memcpy_or_memset = False - self.min_ts = sys.maxsize - self.max_ts = -sys.maxsize - 1 - self.steps_costs = [] - self.avg_costs = OverallParser.Costs() + @classmethod + def create_statistics(cls, steps, role_ranges): + assert len(role_ranges) == ProfileRole.Total - 1 - # Update self.steps considering device side events launched by each host side step. - # Update self.steps_names if some tail steps are removed. - def update_steps_consider_device_side(self, runtime_node_list, device_node_list): - runtime_node_list = sorted(runtime_node_list, key=lambda x: x.start_time) - # Make sure self.steps is sorted by time. - self.steps = sorted(self.steps, key=lambda x: x[0]) - # Use similar code with two-way merge to get all runtimes inside each host-side step span, - # then record each step's min kernel start time and max kernel end time: - steps_device = [(sys.maxsize, -sys.maxsize - 1)] * len(self.steps) - steps_matched_device_nodes = [0] * len(self.steps) - i_step = 0 - i_runtime = 0 - step_device_min_ts = sys.maxsize - step_device_max_ts = -sys.maxsize - 1 - matched_device_nodes = set() - while i_step < len(self.steps) and i_runtime < len(runtime_node_list): - step_host_start_time = self.steps[i_step][0] - step_host_end_time = self.steps[i_step][1] - if runtime_node_list[i_runtime].start_time < step_host_start_time: - # This runtime is ahead of or intersects with this step span. Skip this runtime. - i_runtime += 1 - elif runtime_node_list[i_runtime].end_time <= step_host_end_time: - # and runtime_node_list[i_runtime].start_time >= step_host_start_time - # This runtime is inside this step span. Scan its device_nodes. - rt = runtime_node_list[i_runtime] - if rt.device_nodes is not None: - for device_node in rt.device_nodes: - step_device_min_ts = min(device_node.start_time, step_device_min_ts) - step_device_max_ts = max(device_node.end_time, step_device_max_ts) - matched_device_nodes.add(device_node) - steps_matched_device_nodes[i_step] += 1 - i_runtime += 1 - elif runtime_node_list[i_runtime].start_time < step_host_end_time: - # and runtime_node_list[i_runtime].end_time > step_host_end_time - # This runtime intersects with this step span. Skip this runtime. - i_runtime += 1 - else: - # runtime_node_list[i_runtime].start_time >= step_host_end_time - # This runtime starts after this step's end. Record and move forward this step. - steps_device[i_step] = (step_device_min_ts, step_device_max_ts) - i_step += 1 - step_device_min_ts = sys.maxsize - step_device_max_ts = -sys.maxsize - 1 - while i_step < len(self.steps): - # This step doesn't launch any device side event, just assign it as empty. - steps_device[i_step] = (step_device_min_ts, step_device_max_ts) - step_device_min_ts = sys.maxsize - step_device_max_ts = -sys.maxsize - 1 - i_step += 1 - # Change step time to device side on the condition that any step have device time. - is_use_gpu = (len(matched_device_nodes) > 0) - if is_use_gpu: - prev_step_end_time = self.steps[0][0] - if steps_device[0][0] != sys.maxsize: # When step 0 has device event. - for device_node in device_node_list: - if device_node not in matched_device_nodes: - # Now this device_node is not launched inside any step span. - if device_node.end_time < steps_device[0][0]: - prev_step_end_time = max(prev_step_end_time, device_node.end_time) - for i_step in range(len(self.steps)): - step_start_time = max(prev_step_end_time, self.steps[i_step][0]) - step_end_time = self.steps[i_step][1] - if steps_device[i_step][0] == sys.maxsize: # When step i_step has no device event. - # Assign to step_start_time when kernel is behind host step end. - step_end_time = max(step_end_time, step_start_time) + cost_ranges = [] + slots = [] + for role in role_ranges: + if slots: + range = intersection_ranges_lists(slots, role) else: - step_end_time = max(step_end_time, steps_device[i_step][1]) - if step_end_time < step_start_time: - logger.warning( - "Abnormal step_end_time of step {}: [{}, {}]".format( - i_step, step_start_time, step_end_time)) - step_end_time = step_start_time - self.steps[i_step] = (step_start_time, step_end_time) # Update step time considering device side. - prev_step_end_time = step_end_time + range = role + slots = merge_ranges(list(steps)) + cost_ranges.append(range) + slots = subtract_ranges_lists(slots, range) + # The last one is ProfileRole.Other + cost_ranges.append(slots) - is_remove_tail_steps = True # TODO: Use tensorboard argument instead. - if is_use_gpu and len(self.steps) > 1 and is_remove_tail_steps: - i_step = len(self.steps) - 1 - while i_step >= 0: - if steps_matched_device_nodes[i_step] > 0: - break - i_step -= 1 - if i_step >= 0: - keep_steps = i_step + 1 - if i_step > 0 and steps_matched_device_nodes[i_step - 1] * 0.8 > steps_matched_device_nodes[i_step]: - keep_steps = i_step - if keep_steps < len(self.steps): - logger.warning( - "Remove the last {} steps from overview. " - "Because the profiler may fail to capture all the kernels launched by these steps.".format( - len(self.steps) - keep_steps - )) - self.steps = self.steps[:keep_steps] - self.steps_names = self.steps_names[:keep_steps] + return cls(cost_ranges) + def intersection_with_step(self, step): + cost_ranges = [] + step = [step] + for range in self.cost_ranges: + cost_ranges.append(intersection_ranges_lists(step, range)) - def parse_events(self, events, runtime_node_list, device_node_list): - logger.debug("Overall, parse events") - for event in events: - self.parse_event(event) + return OverallParser.Statistics(cost_ranges) - if len(self.steps) == 0: - self.steps.append((self.min_ts, self.max_ts)) - self.steps_names.append("0") - self.update_steps_consider_device_side(runtime_node_list, device_node_list) - merged_steps = list(self.steps) - merged_steps = merge_ranges(merged_steps) + class StepCommunicationCosts: + def __init__(self): + self.computation = 0 + self.communication = 0 + self.overlap = 0 + self.other = 0 - self.kernel_ranges = merge_ranges(self.kernel_ranges) - self.memcpy_ranges = merge_ranges(self.memcpy_ranges) - self.memset_ranges = merge_ranges(self.memset_ranges) - self.runtime_ranges = merge_ranges(self.runtime_ranges) - self.dataloader_ranges = merge_ranges(self.dataloader_ranges) - self.cpuop_ranges = merge_ranges(self.cpuop_ranges) + def __init__(self): + self.steps_costs = [] + self.avg_costs = OverallParser.Costs() + self.communication_overlap = [] + def aggregate(self, steps, role_ranges): logger.debug("Overall, statistics") - global_stats = OverallParser.Statistics() - global_stats.kernel_cost_ranges = self.kernel_ranges - slots = subtract_ranges_lists(merged_steps, self.kernel_ranges) - global_stats.memcpy_cost_ranges = intersection_ranges_lists(slots, self.memcpy_ranges) - slots = subtract_ranges_lists(slots, global_stats.memcpy_cost_ranges) - global_stats.memset_cost_ranges = intersection_ranges_lists(slots, self.memset_ranges) - slots = subtract_ranges_lists(slots, global_stats.memset_cost_ranges) - global_stats.runtime_cost_ranges = intersection_ranges_lists(slots, self.runtime_ranges) - slots = subtract_ranges_lists(slots, global_stats.runtime_cost_ranges) - global_stats.dataloader_cost_ranges = intersection_ranges_lists(slots, self.dataloader_ranges) - slots = subtract_ranges_lists(slots, global_stats.dataloader_cost_ranges) - global_stats.cpuop_cost_ranges = intersection_ranges_lists(slots, self.cpuop_ranges) - slots = subtract_ranges_lists(slots, global_stats.cpuop_cost_ranges) - global_stats.other_cost_ranges = slots + global_stats = OverallParser.Statistics.create_statistics(steps, role_ranges) + comm_kernel_overlap = intersection_ranges_lists(role_ranges[ProfileRole.Kernel], role_ranges[ProfileRole.Communication]) logger.debug("Overall, aggregation") - valid_steps = len(self.steps) + valid_steps = len(steps) for i in range(valid_steps): - steps_stat = global_stats.intersection_with_step(self.steps[i]) - self.steps_costs.append(OverallParser.Costs()) - self.steps_costs[i].calculate_costs(steps_stat, self.steps[i]) - self.avg_costs.step_total_cost += self.steps_costs[i].step_total_cost - self.avg_costs.kernel_cost += self.steps_costs[i].kernel_cost - self.avg_costs.memcpy_cost += self.steps_costs[i].memcpy_cost - self.avg_costs.memset_cost += self.steps_costs[i].memset_cost - self.avg_costs.runtime_cost += self.steps_costs[i].runtime_cost - self.avg_costs.dataloader_cost += self.steps_costs[i].dataloader_cost - self.avg_costs.cpuop_cost += self.steps_costs[i].cpuop_cost - self.avg_costs.other_cost += self.steps_costs[i].other_cost - - self.avg_costs.step_total_cost /= valid_steps - self.avg_costs.kernel_cost /= valid_steps - self.avg_costs.memcpy_cost /= valid_steps - self.avg_costs.memset_cost /= valid_steps - self.avg_costs.runtime_cost /= valid_steps - self.avg_costs.dataloader_cost /= valid_steps - self.avg_costs.cpuop_cost /= valid_steps - self.avg_costs.other_cost /= valid_steps - - def parse_event(self, event): - ts = event.ts - dur = event.duration - evt_type = event.type - if evt_type == EventTypes.KERNEL: - self.kernel_ranges.append((ts, ts + dur)) - self.has_kernel = True - elif evt_type == EventTypes.MEMCPY: - self.memcpy_ranges.append((ts, ts + dur)) - self.has_memcpy_or_memset = True - elif evt_type == EventTypes.MEMSET: - self.memset_ranges.append((ts, ts + dur)) - self.has_memcpy_or_memset = True - elif evt_type == EventTypes.RUNTIME: - self.runtime_ranges.append((ts, ts + dur)) - self.has_runtime = True - elif evt_type == EventTypes.OPERATOR and event.name.startswith("enumerate(DataLoader)#") \ - and event.name.endswith(".__next__"): - self.dataloader_ranges.append((ts, ts + dur)) - elif event.type == EventTypes.PROFILER_STEP: - self.steps.append((ts, ts + dur)) - self.steps_names.append(str(event.step)) - elif evt_type in [EventTypes.PYTHON, EventTypes.OPERATOR]: - self.cpuop_ranges.append((ts, ts + dur)) - - # Record host side min and max time. - if evt_type in [EventTypes.PYTHON, EventTypes.OPERATOR, EventTypes.PROFILER_STEP]: - if ts < self.min_ts: - self.min_ts = ts - if ts + dur > self.max_ts: - self.max_ts = ts + dur + steps_stat = global_stats.intersection_with_step(steps[i]) + self.steps_costs.append(OverallParser.Costs.calculate_costs(steps_stat, steps[i])) + for cost_index in range(len(self.avg_costs.costs)): + self.avg_costs.costs[cost_index] += self.steps_costs[i].costs[cost_index] + + comm_costs = OverallParser.StepCommunicationCosts() + comm_costs.overlap = get_ranges_sum(intersection_ranges_lists([steps[i]], comm_kernel_overlap)) + comm_costs.computation = get_ranges_sum(intersection_ranges_lists([steps[i]], role_ranges[ProfileRole.Kernel])) + comm_costs.communication = get_ranges_sum(intersection_ranges_lists([steps[i]], role_ranges[ProfileRole.Communication])) + comm_costs.other = self.steps_costs[i].costs[ProfileRole.Total] + comm_costs.overlap - comm_costs.computation - comm_costs.communication + self.communication_overlap.append(comm_costs) + + for i in range(len(self.avg_costs.costs)): + self.avg_costs.costs[i] /= valid_steps diff --git a/tb_plugin/torch_tb_profiler/profiler/range_utils.py b/tb_plugin/torch_tb_profiler/profiler/range_utils.py new file mode 100644 index 000000000..f48a1e37b --- /dev/null +++ b/tb_plugin/torch_tb_profiler/profiler/range_utils.py @@ -0,0 +1,186 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# ------------------------------------------------------------------------- +from enum import IntEnum + +EndpointTypes = IntEnum('EndpointTypes', ['START', 'END'], start=0) + + +class EndPoint(object): + def __init__(self, ep_time, ep_pt_type, ep_value): + self.time = ep_time + self.pt_type = ep_pt_type + self.value = ep_value + + +# src_ranges: item of (start_time, end_time, value) +def merge_ranges_with_value(src_ranges): + merged_ranges = [] + if len(src_ranges) > 0: + # Build tuple of (time, type, value) + endpoints = [] + for r in src_ranges: + endpoints.append(EndPoint(r[0], EndpointTypes.START, r[2])) + endpoints.append(EndPoint(r[1], EndpointTypes.END, r[2])) + endpoints.sort(key=lambda x: [x.time, int(x.pt_type)]) # Make START in front of END if equal on time. + + last_endpoint_time = endpoints[0].time + last_value = endpoints[0].value + for i in range(1, len(endpoints)): + ep = endpoints[i] + if ep.time > last_endpoint_time and last_value > 0.0: + approximated_sm_efficiency = min(last_value, 1.0) + merged_ranges.append((last_endpoint_time, ep.time, approximated_sm_efficiency)) + last_endpoint_time = ep.time + if ep.pt_type == EndpointTypes.START: + last_value += ep.value + else: + last_value -= ep.value + + return merged_ranges + + +# range_list1 item is length 3. range_list2 item is length 2. +# Reture value's item is length 3. +def intersection_ranges_lists_with_value(range_list1, range_list2): + range_list_dst = [] + if len(range_list1) == 0 or len(range_list2) == 0: + return range_list_dst + r1 = range_list1[0] + r2 = range_list2[0] + i1 = i2 = 0 + while i1 < len(range_list1): + if i2 == len(range_list2): + break + elif r2[1] <= r1[0]: + r2, i2 = pop_list(range_list2, i2) + elif r2[0] <= r1[0] and r2[1] < r1[1]: + assert (r2[1] > r1[0]) + range_list_dst.append((r1[0], r2[1], r1[2])) + r1 = (r2[1], r1[1], r1[2]) + r2, i2 = pop_list(range_list2, i2) + elif r2[0] <= r1[0]: + assert (r2[1] >= r1[1]) + range_list_dst.append(r1) + r2 = (r1[1], r2[1]) + r1, i1 = pop_list(range_list1, i1) + elif r2[1] < r1[1]: + assert (r2[0] > r1[0]) + range_list_dst.append((r2[0], r2[1], r1[2])) + r1 = (r2[1], r1[1], r1[2]) + r2, i2 = pop_list(range_list2, i2) + elif r2[0] < r1[1]: + assert (r2[1] >= r1[1]) + range_list_dst.append((r2[0], r1[1], r1[2])) + r2 = (r1[1], r2[1]) + r1, i1 = pop_list(range_list1, i1) + else: + assert (r2[0] >= r1[1]) + r1, i1 = pop_list(range_list1, i1) + return range_list_dst + + +def subtract_ranges_lists(range_list1, range_list2): + range_list_dst = [] + if len(range_list1) == 0: + return range_list_dst + if len(range_list2) == 0: + range_list_dst = list(range_list1) + return range_list_dst + r1 = range_list1[0] + r2 = range_list2[0] + i1 = i2 = 0 + while i1 < len(range_list1): + if i2 == len(range_list2): + range_list_dst.append(r1) + r1, i1 = pop_list(range_list1, i1) + elif r2[1] <= r1[0]: + r2, i2 = pop_list(range_list2, i2) + elif r2[0] <= r1[0] and r2[1] < r1[1]: + r1 = (r2[1], r1[1]) + r2, i2 = pop_list(range_list2, i2) + elif r2[0] <= r1[0]: + assert (r2[1] >= r1[1]) + r2 = (r1[1], r2[1]) + r1, i1 = pop_list(range_list1, i1) + elif r2[0] < r1[1]: + assert (r2[0] > r1[0]) + range_list_dst.append((r1[0], r2[0])) + r1 = (r2[0], r1[1]) + else: + assert (r2[0] >= r1[1]) + range_list_dst.append(r1) + r1, i1 = pop_list(range_list1, i1) + return range_list_dst + + +def intersection_ranges_lists(range_list1, range_list2): + range_list_dst = [] + if len(range_list1) == 0 or len(range_list2) == 0: + return range_list_dst + r1 = range_list1[0] + r2 = range_list2[0] + i1 = i2 = 0 + while i1 < len(range_list1): + if i2 == len(range_list2): + break + elif r2[1] <= r1[0]: + r2, i2 = pop_list(range_list2, i2) + elif r2[0] <= r1[0] and r2[1] < r1[1]: + assert (r2[1] > r1[0]) + range_list_dst.append((r1[0], r2[1])) + r1 = (r2[1], r1[1]) + r2, i2 = pop_list(range_list2, i2) + elif r2[0] <= r1[0]: + assert (r2[1] >= r1[1]) + range_list_dst.append(r1) + r2 = (r1[1], r2[1]) + r1, i1 = pop_list(range_list1, i1) + elif r2[1] < r1[1]: + assert (r2[0] > r1[0]) + range_list_dst.append(r2) + r1 = (r2[1], r1[1]) + r2, i2 = pop_list(range_list2, i2) + elif r2[0] < r1[1]: + assert (r2[1] >= r1[1]) + range_list_dst.append((r2[0], r1[1])) + r2 = (r1[1], r2[1]) + r1, i1 = pop_list(range_list1, i1) + else: + assert (r2[0] >= r1[1]) + r1, i1 = pop_list(range_list1, i1) + return range_list_dst + + +def get_ranges_sum(ranges): + sum = 0 + for range in ranges: + sum += (range[1] - range[0]) + return sum + + +def pop_list(range_list, index): + next_index = index + 1 + if next_index >= len(range_list): + return None, len(range_list) + next_item = range_list[next_index] + return next_item, next_index + + +def merge_ranges(src_ranges, is_sorted=False): + merged_ranges = [] + if len(src_ranges) > 0: + if not is_sorted: + src_ranges.sort(key=lambda x: x[0]) + src_id = 0 + merged_ranges.append( + (src_ranges[src_id][0], src_ranges[src_id][1])) + for src_id in range(1, len(src_ranges)): + dst_id = len(merged_ranges) - 1 + if src_ranges[src_id][1] > merged_ranges[dst_id][1]: + if src_ranges[src_id][0] <= merged_ranges[dst_id][1]: + merged_ranges[dst_id] = (merged_ranges[dst_id][0], src_ranges[src_id][1]) + else: + merged_ranges.append( + (src_ranges[src_id][0], src_ranges[src_id][1])) + return merged_ranges diff --git a/tb_plugin/torch_tb_profiler/profiler/run_generator.py b/tb_plugin/torch_tb_profiler/profiler/run_generator.py index 588428121..ecadcb30c 100644 --- a/tb_plugin/torch_tb_profiler/profiler/run_generator.py +++ b/tb_plugin/torch_tb_profiler/profiler/run_generator.py @@ -1,33 +1,38 @@ # ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # -------------------------------------------------------------------------- +from collections import OrderedDict -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +from .. import consts, utils +from ..run import DistributedRunProfile, RunProfile +from .node import MemoryMetrics +from .overall_parser import ProfileRole -from .. import consts -from ..run import RunProfile +logger = utils.get_logger() class RunGenerator(object): - def __init__(self, worker, profile_data): + def __init__(self, worker, span, profile_data): self.worker = worker + self.span = span self.profile_data = profile_data def generate_run_profile(self): - profile_run = RunProfile(self.worker) + profile_run = RunProfile(self.worker, self.span) profile_run.has_runtime = self.profile_data.has_runtime profile_run.has_kernel = self.profile_data.has_kernel + profile_run.has_communication = self.profile_data.has_communication profile_run.has_memcpy_or_memset = self.profile_data.has_memcpy_or_memset profile_run.views.append(consts.OVERALL_VIEW) profile_run.overview = self._generate_overview() profile_run.views.append(consts.OP_VIEW) profile_run.operation_pie_by_name = self._generate_op_pie() - profile_run.operation_table_by_name = self._generate_op_table() + profile_run.operation_table_by_name = self._generate_op_table(self.profile_data.op_list_groupby_name) + profile_run.operation_stack_by_name = self._generate_op_table_for_stack(False) profile_run.operation_pie_by_name_input = self._generate_op_pie(True) - profile_run.operation_table_by_name_input = self._generate_op_table(True) + profile_run.operation_table_by_name_input = self._generate_op_table(self.profile_data.op_list_groupby_name_input, True) + profile_run.operation_stack_by_name_input = self._generate_op_table_for_stack(True) if self.profile_data.has_kernel: profile_run.views.append(consts.KERNEL_VIEW) @@ -37,6 +42,26 @@ def generate_run_profile(self): profile_run.views.append(consts.TRACE_VIEW) profile_run.trace_file_path = self.profile_data.trace_file_path + profile_run.gpu_util_buckets = self.profile_data.gpu_util_buckets + profile_run.approximated_sm_efficency_ranges = self.profile_data.approximated_sm_efficency_ranges + + profile_run.gpu_ids = self.profile_data.gpu_ids + profile_run.gpu_utilization = self.profile_data.gpu_utilization + profile_run.sm_efficency = self.profile_data.sm_efficency + profile_run.occupancy = self.profile_data.occupancy + profile_run.blocks_per_sm_count = self.profile_data.blocks_per_sm_count + profile_run.occupancy_count = self.profile_data.occupancy_count + + # add memory stats + if self.profile_data.has_memory_data: + profile_run.memory_view = self._generate_memory_view(self.profile_data.memory_stats) + profile_run.views.append(consts.MEMORY_VIEW) + + profile_run.gpu_infos = {} + for gpu_id in profile_run.gpu_ids: + gpu_info = RunGenerator._get_gpu_info(self.profile_data.device_props, gpu_id) + if gpu_info is not None: + profile_run.gpu_infos[gpu_id] = gpu_info return profile_run @@ -48,14 +73,14 @@ def build_part_time_str(part_cost, part_name): '{}: {}us
' \ 'Percentage: {}%' \ '' - percentage = round(100 * part_cost / costs.step_total_cost, 2) - return format_str.format(step_name, costs.step_total_cost, part_name, part_cost, percentage) + percentage = round(100 * part_cost / costs.costs[ProfileRole.Total], 2) + return format_str.format(step_name, costs.costs[ProfileRole.Total], part_name, part_cost, percentage) def build_avg_cost_dict(part_name, part_cost): cost_dict = {"name": part_name, "description": "", "value": round(part_cost), - "extra": round(100 * part_cost / self.profile_data.avg_costs.step_total_cost, 2)} + "extra": round(100 * part_cost / self.profile_data.avg_costs.costs[ProfileRole.Total], 2)} return cost_dict show_gpu = self.profile_data.has_runtime or self.profile_data.has_kernel or self.profile_data.has_memcpy_or_memset @@ -70,8 +95,12 @@ def build_avg_cost_dict(part_name, part_cost): {"type": "number", "name": "Memcpy"}, column_tootip, {"type": "number", "name": "Memset"}, - column_tootip, - {"type": "number", "name": "Runtime"}, + column_tootip]) + if self.profile_data.has_communication: + data["steps"]["columns"].extend([{"type": "number", "name": "Communication"}, + column_tootip]) + if show_gpu: + data["steps"]["columns"].extend([{"type": "number", "name": "Runtime"}, column_tootip]) data["steps"]["columns"].extend([{"type": "number", "name": "DataLoader"}, column_tootip, @@ -86,38 +115,49 @@ def build_avg_cost_dict(part_name, part_cost): step_name = self.profile_data.steps_names[i] row = [step_name] if show_gpu: - row.extend([costs.kernel_cost, - build_part_time_str(costs.kernel_cost, "Kernel"), - costs.memcpy_cost, - build_part_time_str(costs.memcpy_cost, "Memcpy"), - costs.memset_cost, - build_part_time_str(costs.memset_cost, "Memset"), - costs.runtime_cost, - build_part_time_str(costs.runtime_cost, "Runtime")]) - row.extend([costs.dataloader_cost, - build_part_time_str(costs.dataloader_cost, "DataLoader"), - costs.cpuop_cost, - build_part_time_str(costs.cpuop_cost, "CPU Exec"), - costs.other_cost, - build_part_time_str(costs.other_cost, "Other")]) + row.extend([costs.costs[ProfileRole.Kernel], + build_part_time_str(costs.costs[ProfileRole.Kernel], "Kernel"), + costs.costs[ProfileRole.Memcpy], + build_part_time_str(costs.costs[ProfileRole.Memcpy], "Memcpy"), + costs.costs[ProfileRole.Memset], + build_part_time_str(costs.costs[ProfileRole.Memset], "Memset")]) + if self.profile_data.has_communication: + row.extend([costs.costs[ProfileRole.Communication], + build_part_time_str(costs.costs[ProfileRole.Communication], "Communication")]) + if show_gpu: + row.extend([costs.costs[ProfileRole.Runtime], + build_part_time_str(costs.costs[ProfileRole.Runtime], "Runtime")]) + row.extend([costs.costs[ProfileRole.DataLoader], + build_part_time_str(costs.costs[ProfileRole.DataLoader], "DataLoader"), + costs.costs[ProfileRole.CpuOp], + build_part_time_str(costs.costs[ProfileRole.CpuOp], "CPU Exec"), + costs.costs[ProfileRole.Other], + build_part_time_str(costs.costs[ProfileRole.Other], "Other")]) data["steps"]["rows"].append(row) avg_costs = [] if show_gpu: avg_costs.extend([ - build_avg_cost_dict("Kernel", self.profile_data.avg_costs.kernel_cost), - build_avg_cost_dict("Memcpy", self.profile_data.avg_costs.memcpy_cost), - build_avg_cost_dict("Memset", self.profile_data.avg_costs.memset_cost), - build_avg_cost_dict("Runtime", self.profile_data.avg_costs.runtime_cost) + build_avg_cost_dict("Kernel", self.profile_data.avg_costs.costs[ProfileRole.Kernel]), + build_avg_cost_dict("Memcpy", self.profile_data.avg_costs.costs[ProfileRole.Memcpy]), + build_avg_cost_dict("Memset", self.profile_data.avg_costs.costs[ProfileRole.Memset]) + ]) + if self.profile_data.has_communication: + avg_costs.extend([ + build_avg_cost_dict("Communication", self.profile_data.avg_costs.costs[ProfileRole.Communication]) + ]) + if show_gpu: + avg_costs.extend([ + build_avg_cost_dict("Runtime", self.profile_data.avg_costs.costs[ProfileRole.Runtime]) ]) avg_costs.extend([ - build_avg_cost_dict("DataLoader", self.profile_data.avg_costs.dataloader_cost), - build_avg_cost_dict("CPU Exec", self.profile_data.avg_costs.cpuop_cost), - build_avg_cost_dict("Other", self.profile_data.avg_costs.other_cost) + build_avg_cost_dict("DataLoader", self.profile_data.avg_costs.costs[ProfileRole.DataLoader]), + build_avg_cost_dict("CPU Exec", self.profile_data.avg_costs.costs[ProfileRole.CpuOp]), + build_avg_cost_dict("Other", self.profile_data.avg_costs.costs[ProfileRole.Other]) ]) data["performance"] = [{"name": "Average Step Time", "description": "", - "value": round(self.profile_data.avg_costs.step_total_cost), + "value": round(self.profile_data.avg_costs.costs[ProfileRole.Total]), "extra": 100, "children": avg_costs}] if len(self.profile_data.recommendations) == 0: @@ -198,60 +238,86 @@ def _generate_op_pie(self, group_by_input_shape=False): return data - def _generate_op_table(self, group_by_input_shape=False): + def _generate_op_table(self, op_list, group_by_input_shape=False, call_stack=False): show_gpu = self.profile_data.has_kernel or self.profile_data.has_memcpy_or_memset - columns = [{"type": "string", "name": "Name"}] - if group_by_input_shape: - columns.append({"type": "string", "name": "Input Shape"}) - - columns.append({"type": "number", "name": "Calls"}) - if show_gpu: - columns.extend([{"type": "number", "name": "Device Self Duration (us)"}, - {"type": "number", "name": "Device Total Duration (us)"}]) - - columns.extend([{"type": "number", "name": "Host Self Duration (us)"}, - {"type": "number", "name": "Host Total Duration (us)"}]) - if group_by_input_shape: - op_list = self.profile_data.op_list_groupby_name_input + stack_list_dict = self.profile_data.stack_lists_group_by_name_input else: - op_list = self.profile_data.op_list_groupby_name + stack_list_dict = self.profile_data.stack_lists_group_by_name op_list = sorted(op_list, key=lambda x: x.self_device_duration if show_gpu else x.self_host_duration, reverse=True) - rows = [] + data = list() for op in op_list: # Whether device_duration & self_device_duration are accurate or not depends on the input tracing data. - row = [op.name] + row = dict() + row['name'] = op.name if group_by_input_shape: - row.append(op.input_shape) - - row.append(op.calls) + row['input_shape'] = op.input_shape + row['calls'] = op.calls if show_gpu: - row.extend([round(op.self_device_duration), round(op.device_duration)]) - - row.extend([round(op.self_host_duration), round(op.host_duration)]) - rows.append(row) + row['device_self_duration'] = round(op.self_device_duration) + row['device_total_duration'] = round(op.device_duration) + row['host_self_duration'] = round(op.self_host_duration) + row['host_total_duration'] = round(op.host_duration) + if call_stack: + row['call_stack'] = op.call_stacks.pop() + else: + if group_by_input_shape: + key = op.name + '###' + str(op.input_shape) + else: + key = op.name + row['has_call_stack'] = key in stack_list_dict + data.append(row) - data = {"data": {"columns": columns, "rows": rows}} return data + def _generate_op_table_for_stack(self, group_by_input_shape): + if group_by_input_shape: + stack_list_dict = self.profile_data.stack_lists_group_by_name_input + else: + stack_list_dict = self.profile_data.stack_lists_group_by_name + + result = dict() + for k,v in stack_list_dict.items(): + result[k] = self._generate_op_table(v, group_by_input_shape, True) + return result + + @staticmethod + def _get_gpu_metrics_columns(blocks_per_sm_count, occupancy_count): + columns = [] + if blocks_per_sm_count > 0: + columns.append({"type": "number", "name": "Mean Blocks Per SM", + "tooltip": consts.TOOLTIP_BLOCKS_PER_SM}) + if occupancy_count > 0: + columns.append({"type": "number", "name": "Mean Est. Achieved Occupancy (%)", + "tooltip": consts.TOOLTIP_OCCUPANCY}) + return columns + def _generate_kernel_op_table(self): table = {} table["columns"] = [{"type": "string", "name": "Name"}, {"type": "string", "name": "Operator"}] col_names = ["Calls", "Total Duration (us)", "Mean Duration (us)", "Max Duration (us)", "Min Duration (us)"] for column in col_names: table["columns"].append({"type": "number", "name": column}) + gpu_metrics_columns = RunGenerator._get_gpu_metrics_columns( + sum(self.profile_data.blocks_per_sm_count), sum(self.profile_data.occupancy_count)) + table["columns"].extend(gpu_metrics_columns) + table["rows"] = [] kernel_list = sorted(self.profile_data.kernel_list_groupby_name_op, key=lambda x: x.total_duration, reverse=True) for agg_by_name_op in kernel_list: kernel_op_row = [agg_by_name_op.name, agg_by_name_op.op_name, agg_by_name_op.calls, - agg_by_name_op.total_duration, agg_by_name_op.avg_duration, - agg_by_name_op.min_duration, agg_by_name_op.max_duration] + agg_by_name_op.total_duration, round(agg_by_name_op.avg_duration), + agg_by_name_op.max_duration, agg_by_name_op.min_duration] + if sum(self.profile_data.blocks_per_sm_count) > 0: + kernel_op_row.append(round(agg_by_name_op.avg_blocks_per_sm, 2)) + if sum(self.profile_data.occupancy_count) > 0: + kernel_op_row.append(round(agg_by_name_op.avg_occupancy, 2)) table["rows"].append(kernel_op_row) data = {"data": table} return data @@ -267,14 +333,209 @@ def _generate_kernel_table(self): table = {} table["columns"] = [{"type": "string", "name": "Name"}] columns = ["count", "sum", "mean", "max", "min"] + round_digits = [0, 0, 0, 0, 0] + if sum(self.profile_data.blocks_per_sm_count) > 0: + columns.append("blocks_per_sm") + round_digits.append(2) + if sum(self.profile_data.occupancy_count) > 0: + columns.append("occupancy") + round_digits.append(2) col_names = ["Calls", "Total Duration (us)", "Mean Duration (us)", "Max Duration (us)", "Min Duration (us)"] for column in col_names: table["columns"].append({"type": "number", "name": column}) + gpu_metrics_columns = RunGenerator._get_gpu_metrics_columns( + sum(self.profile_data.blocks_per_sm_count), sum(self.profile_data.occupancy_count)) + table["columns"].extend(gpu_metrics_columns) + table["rows"] = [] for _id, (name, row) in enumerate(self.profile_data.kernel_stat.iterrows()): kernel_row = [name] - for column in columns: - kernel_row.append(round(row[column])) + for i, column in enumerate(columns): + kernel_row.append(round(row[column]) if round_digits[i] == 0 + else round(row[column], round_digits[i])) table["rows"].append(kernel_row) data = {"data": table} return data + + def _generate_memory_view(self, memory_stats): + + data = OrderedDict() + result = { + "metadata": { + "title": "Memory View", + "default_device": "CPU", + "search": "Operator Name", + "sort": "Self Size Increase (KB)" + }, + "data": data + } + + columns_names = [ + ("Operator Name", "string", ""), + ("Calls", "number", "# of calls of the operator."), + ("Size Increase (KB)", "number", "The memory increase size include all children operators."), + ("Self Size Increase (KB)", "number", "The memory increase size associated with the operator itself."), + ("Allocation Count", "number", "The allocation count including all chidren operators."), + ("Self Allocation Count", "number", "The allocation count belonging to the operator itself."), + ("Allocation Size (KB)", "number", "The allocation size including all children operators."), + ("Self Allocation Size (KB)", "number", "The allocation size belonging to the operator itself.\nIt will sum up all allocation bytes without considering the memory free.") + ] + for name, memory in sorted(memory_stats.items()): + table = {} + + # Process columns + columns = [] + for col_name, col_type, tool_tip in columns_names: + if tool_tip: + columns.append({"type": col_type, "name": col_name, "tooltip": tool_tip}) + else: + columns.append({"type": col_type, "name": col_name}) + table["columns"] = columns + + # Process rows + rows = [] + for op_name, stat in sorted(memory.items()): + rows.append([ + op_name, + stat[6], + round(stat[MemoryMetrics.IncreaseSize] / 1024, 2), + round(stat[MemoryMetrics.SelfIncreaseSize] / 1024, 2), + stat[MemoryMetrics.AllocationCount], + stat[MemoryMetrics.SelfAllocationCount], + round(stat[MemoryMetrics.AllocationSize] / 1024, 2), + round(stat[MemoryMetrics.SelfAllocationSize] / 1024, 2) + ]) + table["rows"] = rows + + data[name] = table + return result + + @staticmethod + def _get_gpu_info(device_props, gpu_id): + if (device_props is None) or (gpu_id >= len(device_props)) or (gpu_id < 0): + return None + + device_prop = device_props[gpu_id] + gpu_info = {} + name = device_prop.get("name") + if name is not None: + gpu_info["Name"] = name + + mem = device_prop.get("totalGlobalMem") + if mem is not None: + gpu_info["Memory"] = "{} GB".format(round(float(mem) / 1024 / 1024 / 1024, 2)) + + major = device_prop.get("computeMajor") + minor = device_prop.get("computeMinor") + if major is not None and minor is not None: + gpu_info["Compute Capability"] = "{}.{}".format(major, minor) + + return gpu_info + + +class DistributedRunGenerator(object): + def __init__(self, all_profile_data, span): + self.all_profile_data = all_profile_data + self.span = span + + def generate_run_profile(self): + profile_run = DistributedRunProfile(self.span) + profile_run.views.append(consts.DISTRIBUTED_VIEW) + profile_run.gpu_info = self._generate_gpu_info() + profile_run.steps_to_overlap = self._generate_overlap_graph() + profile_run.steps_to_wait = self._generate_wait_graph() + profile_run.comm_ops = self._generate_ops_table() + return profile_run + + def _generate_gpu_info(self): + result = OrderedDict() + index = 0 + for data in sorted(self.all_profile_data, key=lambda x: x.worker): + if not data.device_props: + continue + + match = consts.NODE_PROCESS_PATTERN.match(data.worker) + if match: + node = match.group(1) + process_id = match.group(2) + else: + logger.warning("cannot parse node name from worker name {}".format(data.worker)) + node = data.worker + process_id = index + index += 1 + if node not in result: + result[node] = OrderedDict() + + process_id = "Process " + str(process_id) + result[node][process_id] = OrderedDict() + for used_device in data.used_devices: + gpu_info = RunGenerator._get_gpu_info(data.device_props, used_device) + if gpu_info is not None: + result[node][process_id]['GPU'+str(used_device)] = gpu_info + + if result: + for k,v in result.items(): + result[k] = OrderedDict(sorted(v.items())) + return { + "metadata": {"title": "Device Information"}, + "data": result + } + else: + return None + + def _generate_overlap_graph(self): + result = dict() + result["metadata"] = {"title": "Computation/Communication Overview", "legends": ["Computation", "Overlapping", "Communication", "Other"], "units": "us"} + steps_to_overlap = OrderedDict() + steps_to_overlap['all'] = OrderedDict() + for data in self.all_profile_data: + steps_to_overlap['all'][data.worker] = [0, 0, 0, 0] + step_number = len(data.steps_names) + for i,step_name in enumerate(data.steps_names): + steps_to_overlap.setdefault(step_name, OrderedDict()) + costs = data.comm_overlap_costs[i] + steps_to_overlap[step_name][data.worker] = [costs.computation - costs.overlap, costs.overlap, costs.communication - costs.overlap, costs.other] + steps_to_overlap['all'][data.worker] = [sum(x) for x in zip(steps_to_overlap['all'][data.worker], steps_to_overlap[step_name][data.worker])] + steps_to_overlap['all'][data.worker] = [x/step_number for x in steps_to_overlap['all'][data.worker]] + for k,v in steps_to_overlap.items(): + steps_to_overlap[k] = OrderedDict(sorted(v.items())) + result["data"] = steps_to_overlap + return result + + def _generate_wait_graph(self): + result = dict() + result["metadata"] = {"title": "Synchronizing/Communication Overview", "legends": ["Data Transfer Time", "Synchronizing Time"], "units": "us"} + steps_to_wait = OrderedDict() + + steps_to_wait['all'] = OrderedDict() + for data in self.all_profile_data: + steps_to_wait['all'][data.worker] = [0, 0] + step_number = len(data.step_comm_stats.values()) + for step,comm_stats in data.step_comm_stats.items(): + steps_to_wait.setdefault(step, OrderedDict())[data.worker] = [comm_stats[1], comm_stats[0]-comm_stats[1]] + steps_to_wait['all'][data.worker] = [sum(x) for x in zip(steps_to_wait['all'][data.worker], steps_to_wait[step][data.worker])] + steps_to_wait['all'][data.worker] = [x/step_number for x in steps_to_wait['all'][data.worker]] + + for k,v in steps_to_wait.items(): + steps_to_wait[k] = OrderedDict(sorted(v.items())) + result["data"] = steps_to_wait + return result + + def _generate_ops_table(self): + result = dict() + result["metadata"] = {"title": "Communication Operations Stats"} + workers_to_comm_ops = OrderedDict() + # Ignore the span for distributed view + for data in self.all_profile_data: + table = {} + table["columns"] = [{"type": "string", "name": "Name"}] + col_names = ["Calls", "Total Size (bytes)", "Avg Size (bytes)", "Total Latency (us)", "Avg Latency (us)", "Data Transfer Time (us)", "Avg Data Transfer Time (us)"] + for column in col_names: + table["columns"].append({"type": "number", "name": column}) + table["rows"] = [] + for op,stats in data.total_comm_stats.items(): + row = [op, stats[0], stats[1], round(stats[1]/stats[0]), stats[2], round(stats[2]/stats[0]), stats[3], round(stats[3]/stats[0])] + table["rows"].append(row) + workers_to_comm_ops[data.worker] = table + result["data"] = workers_to_comm_ops + return result diff --git a/tb_plugin/torch_tb_profiler/profiler/trace.py b/tb_plugin/torch_tb_profiler/profiler/trace.py index f92ba1713..6ab4adb92 100644 --- a/tb_plugin/torch_tb_profiler/profiler/trace.py +++ b/tb_plugin/torch_tb_profiler/profiler/trace.py @@ -1,124 +1,119 @@ # ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # -------------------------------------------------------------------------- - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +from enum import IntEnum from .. import utils -__all__ = ["EventTypes", "get_event_parser"] +__all__ = ["EventTypes", "create_event"] logger = utils.get_logger() +DeviceType = IntEnum('DeviceType', ['CPU', 'CUDA'], start=0) class EventTypes(object): - NET = "NetEvent" - OPERATOR = "OperatorEvent" - PROFILER_STEP = "ProfilerStepEvent" - RUNTIME = "RuntimeEvent" - KERNEL = "KernelEvent" - MEMCPY = "MemcpyEvent" - MEMSET = "MemsetEvent" - PYTHON = "PythonEvent" - - -class TraceEvent(object): + TRACE = "Trace" + OPERATOR = "Operator" + PROFILER_STEP = "ProfilerStep" + RUNTIME = "Runtime" + KERNEL = "Kernel" + MEMCPY = "Memcpy" + MEMSET = "Memset" + PYTHON = "Python" + MEMORY = "Memory" + +Supported_EventTypes = [v for k, v in vars(EventTypes).items() if not k.startswith("_") and v != EventTypes.PROFILER_STEP] + +class BaseEvent(object): def __init__(self, type, data): self.type = type + self.name = data.get("name") + self.ts = data.get("ts") + self.pid = data.get("pid") + self.tid = data.get("tid") + self.args = data.get("args", {}) + +class TraceEvent(BaseEvent): + def __init__(self, type, data): + super().__init__(type, data) self.category = data.get("cat", "") - self.name = data.get("name", None) - self.ts = data.get("ts", None) - self.duration = data.get("dur", None) - self.pid = data.get("pid", None) - self.tid = data.get("tid", None) - self.args = data.get("args", None) + self.duration = data.get("dur") - def to_dict(self): - return vars(self) + @property + def external_id(self): + extern_id = self.args.get("external id") + if extern_id is None: + extern_id = self.args.get("External id") + return extern_id -class NetEvent(TraceEvent): - def __init__(self, data): - super(NetEvent, self).__init__(EventTypes.NET, data) + @property + def callstack(self): + return self.args.get("Call stack", "") + @property + def input_shape(self): + shape = self.args.get("Input Dims") + if shape is None: + shape = self.args.get("Input dims") -class OperatorEvent(TraceEvent): - def __init__(self, data): - super(OperatorEvent, self).__init__(EventTypes.OPERATOR, data) + return shape + @property + def input_type(self): + return self.args.get("Input type") class ProfilerStepEvent(TraceEvent): def __init__(self, data): - super(ProfilerStepEvent, self).__init__(EventTypes.PROFILER_STEP, data) + super().__init__(EventTypes.PROFILER_STEP, data) # torch.profiler.profile.step will invoke record_function with name like "ProfilerStep#5" self.step = int(self.name.split("#")[1]) +class MemoryEvent(BaseEvent): + def __init__(self, type, data): + super().__init__(type, data) + self.scope = data.get("s", "") -class RuntimeEvent(TraceEvent): - def __init__(self, data): - super(RuntimeEvent, self).__init__(EventTypes.RUNTIME, data) - - -class KernelEvent(TraceEvent): - def __init__(self, data): - super(KernelEvent, self).__init__(EventTypes.KERNEL, data) - - -class MemcpyEvent(TraceEvent): - def __init__(self, data): - super(MemcpyEvent, self).__init__(EventTypes.MEMCPY, data) - - -class MemsetEvent(TraceEvent): - def __init__(self, data): - super(MemsetEvent, self).__init__(EventTypes.MEMSET, data) + @property + def device_type(self): + dtype = self.args.get("Device Type") + if dtype is None: + return None + try: + return DeviceType(dtype) + except ValueError: + return None -class PythonEvent(TraceEvent): - def __init__(self, data): - super(PythonEvent, self).__init__(EventTypes.PYTHON, data) - - -class EventParser(object): - def __init__(self): - self._handlers = { - "X": { - "Net": NetEvent, - "Operator": self._parse_operator_event, - "Runtime": RuntimeEvent, - "Kernel": KernelEvent, - "Memcpy": MemcpyEvent, - "Memset": MemsetEvent, - "Python": PythonEvent, - } - } - - def _get_handler(self, type=None, category=None): - handlers = self._handlers.get(type, None) - if handlers is None: + @property + def device_id(self): + return self.args.get("Device Id") + + @property + def bytes(self): + return self.args.get("Bytes", 0) + +def create_event(event): + try: + type = event.get("ph") + if type == "X": + return create_trace_event(event) + elif type == "i" and event.get('s') == 't': + return MemoryEvent(EventTypes.MEMORY, event) + else: return None - return handlers.get(category, None) + except Exception as ex: + logger.warning("Failed to parse profile event. Exception=%s. Event=%s", ex, event, exc_info=True) + raise - def parse(self, event): - try: - type = event.get("ph", None) - category = event.get("cat", None) - handler = self._get_handler(type, category) - if handler is None: - return None - return handler(event) - except Exception as ex: - logger.warning("Failed to parse profile event. Exception=%s. Event=%s", ex, event, exc_info=True) - raise ex - - def _parse_operator_event(self, event): +def create_trace_event(event): + category = event.get("cat") + if category == "Operator": name = event.get("name") - if name.startswith("ProfilerStep#"): + if name and name.startswith("ProfilerStep#"): return ProfilerStepEvent(event) - return OperatorEvent(event) - -def get_event_parser(version=None): - return EventParser() + if category in Supported_EventTypes: + return TraceEvent(category, event) + else: + return None diff --git a/tb_plugin/torch_tb_profiler/run.py b/tb_plugin/torch_tb_profiler/run.py index 4ee57ded5..2760111df 100644 --- a/tb_plugin/torch_tb_profiler/run.py +++ b/tb_plugin/torch_tb_profiler/run.py @@ -1,12 +1,7 @@ # ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # -------------------------------------------------------------------------- - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from collections import OrderedDict +from . import consts class Run(object): @@ -17,39 +12,83 @@ class Run(object): def __init__(self, name, run_dir): self.name = name self.run_dir = run_dir - self.profiles = OrderedDict() + self.profiles = {} @property def workers(self): - return list(self.profiles.keys()) + # get full worker list and remove the duplicated + worker_list, _ = zip(*self.profiles.keys()) + worker_list = sorted(list(dict.fromkeys(worker_list))) + return worker_list @property def views(self): - profile = self.get_profile() - if profile is None: + view_set = set() + for profile in self.profiles.values(): + view_set.update(profile.views) + return sorted(list(view_set), key=lambda x: x.id) + + def get_workers(self, view): + worker_set = set() + for profile in self.profiles.values(): + for v in profile.views: + if v.display_name == view: + worker_set.add(profile.worker) + break + return sorted(list(worker_set)) + + def get_spans(self, worker=None): + if worker is not None: + spans = [s for w, s in self.profiles.keys() if w == worker] + else: + spans = [s for _, s in self.profiles.keys()] + + spans = list(set(spans)) + if len(spans) == 1 and spans[0] is None: return None - return profile.views + else: + return sorted(spans) def add_profile(self, profile): - self.profiles[profile.worker] = profile + span = profile.span + if span is None: + span = "default" + else: + span = str(span) + self.profiles[(profile.worker, span)] = profile + + def get_profile(self, worker, span): + if worker is None: + raise ValueError("the worker parameter is mandatory") - def get_profile(self, worker=None): if len(self.profiles) == 0: return None - if not worker: - return next(iter(self.profiles.values())) - return self.profiles.get(worker, None) + return self.profiles.get((worker, span), None) + + def get_profiles(self, *, worker=None, span=None): + # Note: we could not use if span to check it is None or not + # since the span 0 will be skipped at this case. + if worker is not None and span is not None: + return self.profiles.get((worker, span), None) + elif worker is not None: + return [p for (w, s), p in self.profiles.items() if worker == w] + elif span is not None: + return [p for (w, s), p in self.profiles.items() if span == s] + else: + return self.profiles.values() class RunProfile(object): """ Cooked profiling result for a worker. For visualization purpose only. """ - def __init__(self, worker): + def __init__(self, worker, span): self.worker = worker + self.span = span self.views = [] self.has_runtime = False self.has_kernel = False + self.has_communication = False self.has_memcpy_or_memset = False self.overview = None self.operation_pie_by_name = None @@ -60,3 +99,135 @@ def __init__(self, worker): self.kernel_pie = None self.kernel_table = None self.trace_file_path = None + self.gpu_ids = None + self.gpu_utilization = None + self.sm_efficency = None + self.occupancy = None + self.gpu_util_buckets = None + self.approximated_sm_efficency_ranges = None + self.gpu_infos = None + + # memory stats + self.memory_view = None + + def get_gpu_metrics(self): + def build_trace_counter_gpu_util(gpu_id, start_time, counter_value): + util_json = "{{\"ph\":\"C\", \"name\":\"GPU {} Utilization\", " \ + "\"pid\":{}, \"ts\":{}, " \ + "\"args\":{{\"GPU Utilization\":{}}}}}".format( + gpu_id, gpu_id, start_time, counter_value + ) + return util_json + + def build_trace_counter_sm_efficiency(gpu_id, start_time, counter_value): + util_json = "{{\"ph\":\"C\", \"name\":\"GPU {} Est. SM Efficiency\", " \ + "\"pid\":{}, \"ts\":{}, " \ + "\"args\":{{\"Est. SM Efficiency\":{}}}}}".format( + gpu_id, gpu_id, start_time, counter_value + ) + return util_json + + def add_trace_counter_gpu_util(gpu_id, start_time, counter_value, counter_json_list): + json_str = build_trace_counter_gpu_util(gpu_id, start_time, counter_value) + counter_json_list.append(json_str) + + def add_trace_counter_sm_efficiency(gpu_id, start_time, end_time, value, counter_json_list): + efficiency_json_start = build_trace_counter_sm_efficiency(gpu_id, start_time, value) + efficiency_json_finish = build_trace_counter_sm_efficiency(gpu_id, end_time, 0) + counter_json_list.append(efficiency_json_start) + counter_json_list.append(efficiency_json_finish) + + counter_json_list = [] + for gpu_id, buckets in enumerate(self.gpu_util_buckets): + if len(buckets) > 0: + # Adding 1 as baseline. To avoid misleading virtualization when the max value is less than 1. + add_trace_counter_gpu_util(gpu_id, buckets[0][0], 1, counter_json_list) + add_trace_counter_gpu_util(gpu_id, buckets[0][0], 0, counter_json_list) + for b in buckets: + add_trace_counter_gpu_util(gpu_id, b[0], b[1], counter_json_list) + for gpu_id, ranges in enumerate(self.approximated_sm_efficency_ranges): + buckets = self.gpu_util_buckets[gpu_id] + if len(ranges) > 0 and len(buckets) > 0: + # Adding 1 as baseline. To avoid misleading virtualization when the max value is less than 1. + add_trace_counter_sm_efficiency(gpu_id, buckets[0][0], buckets[0][0], 1, counter_json_list) + for r in ranges: + add_trace_counter_sm_efficiency(gpu_id, r[0], r[1], r[2], counter_json_list) + + counter_json_str = ", {}".format(", ".join(counter_json_list)) + counter_json_bytes = bytes(counter_json_str, 'utf-8') + return counter_json_bytes + + def append_gpu_metrics(self, raw_data): + counter_json_bytes = self.get_gpu_metrics() + + raw_data_without_tail = raw_data[: raw_data.rfind(b']')] + raw_data = b''.join([raw_data_without_tail, counter_json_bytes, b']}']) + + import gzip + raw_data = gzip.compress(raw_data, 1) + return raw_data + + + def get_gpu_metrics_data_tooltip(self): + def get_gpu_metrics_data(profile): + gpu_metrics_data = [] + has_sm_efficiency = False + has_occupancy = False + is_first = True + gpu_info_columns = ["Name", "Memory", "Compute Capability"] + for gpu_id in profile.gpu_ids: + if not is_first: + # Append separator line for beautiful to see. + gpu_metrics_data.append({"title": "
", + "value": ""}) + + gpu_metrics_data.append({"title": "GPU {}:".format(gpu_id), + "value": ""}) + gpu_info = profile.gpu_infos.get(gpu_id, None) + if gpu_info is not None: + for key in gpu_info_columns: + if key in gpu_info: + gpu_metrics_data.append({"title": key, + "value": gpu_info[key]}) + + gpu_metrics_data.append({"title": "GPU Utilization", + "value": "{} %".format( + round(profile.gpu_utilization[gpu_id] * 100, 2))}) + if profile.blocks_per_sm_count[gpu_id] > 0: + gpu_metrics_data.append({"title": "Est. SM Efficiency", + "value": "{} %".format( + round(profile.sm_efficency[gpu_id] * 100, 2))}) + has_sm_efficiency = True + if profile.occupancy_count[gpu_id] > 0: + gpu_metrics_data.append({"title": "Est. Achieved Occupancy", + "value": "{} %".format(round(profile.occupancy[gpu_id], 2))}) + has_occupancy = True + is_first = False + return gpu_metrics_data, has_occupancy, has_sm_efficiency + + def get_gpu_metrics_tooltip(has_sm_efficiency, has_occupancy): + tooltip_summary = "The GPU usage metrics:\n" + tooltip = "{}\n{}".format(tooltip_summary, consts.TOOLTIP_GPU_UTIL) + if has_sm_efficiency: + tooltip += "\n" + consts.TOOLTIP_SM_EFFICIENCY + if has_occupancy: + tooltip += "\n" + consts.TOOLTIP_OCCUPANCY + return tooltip + + data, has_occupancy, has_sm_efficiency = get_gpu_metrics_data(self) + tooltip = get_gpu_metrics_tooltip(has_occupancy, has_sm_efficiency) + return data, tooltip + + +class DistributedRunProfile(object): + """ Profiling all workers in a view. + """ + + def __init__(self, span): + self.worker = 'All' + self.span = span + self.views = [] + self.gpu_info = None + self.steps_to_overlap = None + self.steps_to_wait = None + self.comm_ops = None diff --git a/tb_plugin/torch_tb_profiler/static/index.html b/tb_plugin/torch_tb_profiler/static/index.html index 55350cccb..2d2ebf57c 100644 --- a/tb_plugin/torch_tb_profiler/static/index.html +++ b/tb_plugin/torch_tb_profiler/static/index.html @@ -1,2 +1,2 @@
\ No newline at end of file +(()=>{var n={676:(n,e,t)=>{"use strict";function r(n,e){(null==e||e>n.length)&&(e=n.length);for(var t=0,r=new Array(e);tr})},9968:(n,e,t)=>{"use strict";function r(n){if(Array.isArray(n))return n}t.d(e,{Z:()=>r})},3349:(n,e,t)=>{"use strict";function r(n){if(void 0===n)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return n}t.d(e,{Z:()=>r})},5991:(n,e,t)=>{"use strict";function r(n,e){for(var t=0;to})},6156:(n,e,t)=>{"use strict";function r(n,e,t){return e in n?Object.defineProperty(n,e,{value:t,enumerable:!0,configurable:!0,writable:!0}):n[e]=t,n}t.d(e,{Z:()=>r})},2122:(n,e,t)=>{"use strict";function r(){return(r=Object.assign||function(n){for(var e=1;er})},1788:(n,e,t)=>{"use strict";t.d(e,{Z:()=>o});var r=t(4665);function o(n,e){n.prototype=Object.create(e.prototype),n.prototype.constructor=n,(0,r.Z)(n,e)}},6410:(n,e,t)=>{"use strict";function r(n){if("undefined"!=typeof Symbol&&Symbol.iterator in Object(n))return Array.from(n)}t.d(e,{Z:()=>r})},8970:(n,e,t)=>{"use strict";function r(){throw new TypeError("Invalid attempt to destructure non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}t.d(e,{Z:()=>r})},1253:(n,e,t)=>{"use strict";t.d(e,{Z:()=>o});var r=t(9756);function o(n,e){if(null==n)return{};var t,o,a=(0,r.Z)(n,e);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(n);for(o=0;o=0||Object.prototype.propertyIsEnumerable.call(n,t)&&(a[t]=n[t])}return a}},9756:(n,e,t)=>{"use strict";function r(n,e){if(null==n)return{};var t,r,o={},a=Object.keys(n);for(r=0;r=0||(o[t]=n[t]);return o}t.d(e,{Z:()=>r})},4665:(n,e,t)=>{"use strict";function r(n,e){return(r=Object.setPrototypeOf||function(n,e){return n.__proto__=e,n})(n,e)}t.d(e,{Z:()=>r})},8481:(n,e,t)=>{"use strict";t.d(e,{Z:()=>i});var r=t(9968),o=t(2961),a=t(8970);function i(n,e){return(0,r.Z)(n)||function(n,e){if("undefined"!=typeof Symbol&&Symbol.iterator in Object(n)){var t=[],r=!0,o=!1,a=void 0;try{for(var i,l=n[Symbol.iterator]();!(r=(i=l.next()).done)&&(t.push(i.value),!e||t.length!==e);r=!0);}catch(n){o=!0,a=n}finally{try{r||null==l.return||l.return()}finally{if(o)throw a}}return t}}(n,e)||(0,o.Z)(n,e)||(0,a.Z)()}},5061:(n,e,t)=>{"use strict";t.d(e,{Z:()=>i});var r=t(676),o=t(6410),a=t(2961);function i(n){return function(n){if(Array.isArray(n))return(0,r.Z)(n)}(n)||(0,o.Z)(n)||(0,a.Z)(n)||function(){throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}},484:(n,e,t)=>{"use strict";function r(n){return(r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(n){return typeof n}:function(n){return n&&"function"==typeof Symbol&&n.constructor===Symbol&&n!==Symbol.prototype?"symbol":typeof n})(n)}t.d(e,{Z:()=>r})},2961:(n,e,t)=>{"use strict";t.d(e,{Z:()=>o});var r=t(676);function o(n,e){if(n){if("string"==typeof n)return(0,r.Z)(n,e);var t=Object.prototype.toString.call(n).slice(8,-1);return"Object"===t&&n.constructor&&(t=n.constructor.name),"Map"===t||"Set"===t?Array.from(n):"Arguments"===t||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(t)?(0,r.Z)(n,e):void 0}}},5318:n=>{n.exports=function(n){return n&&n.__esModule?n:{default:n}},n.exports.default=n.exports,n.exports.__esModule=!0},862:(n,e,t)=>{var r=t(8).default;function o(){if("function"!=typeof WeakMap)return null;var n=new WeakMap;return o=function(){return n},n}n.exports=function(n){if(n&&n.__esModule)return n;if(null===n||"object"!==r(n)&&"function"!=typeof n)return{default:n};var e=o();if(e&&e.has(n))return e.get(n);var t={},a=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var i in n)if(Object.prototype.hasOwnProperty.call(n,i)){var l=a?Object.getOwnPropertyDescriptor(n,i):null;l&&(l.get||l.set)?Object.defineProperty(t,i,l):t[i]=n[i]}return t.default=n,e&&e.set(n,t),t},n.exports.default=n.exports,n.exports.__esModule=!0},8:n=>{function e(t){return"function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?(n.exports=e=function(n){return typeof n},n.exports.default=n.exports,n.exports.__esModule=!0):(n.exports=e=function(n){return n&&"function"==typeof Symbol&&n.constructor===Symbol&&n!==Symbol.prototype?"symbol":typeof n},n.exports.default=n.exports,n.exports.__esModule=!0),e(t)}n.exports=e,n.exports.default=n.exports,n.exports.__esModule=!0},7757:(n,e,t)=>{n.exports=t(5666)},9693:(n,e,t)=>{"use strict";t.d(e,{mi:()=>l,U1:()=>c,_j:()=>u,$n:()=>d});var r=t(288);function o(n){var e=arguments.length>1&&void 0!==arguments[1]?arguments[1]:0,t=arguments.length>2&&void 0!==arguments[2]?arguments[2]:1;return Math.min(Math.max(e,n),t)}function a(n){if(n.type)return n;if("#"===n.charAt(0))return a(function(n){n=n.substr(1);var e=new RegExp(".{1,".concat(n.length>=6?2:1,"}"),"g"),t=n.match(e);return t&&1===t[0].length&&(t=t.map((function(n){return n+n}))),t?"rgb".concat(4===t.length?"a":"","(").concat(t.map((function(n,e){return e<3?parseInt(n,16):Math.round(parseInt(n,16)/255*1e3)/1e3})).join(", "),")"):""}(n));var e=n.indexOf("("),t=n.substring(0,e);if(-1===["rgb","rgba","hsl","hsla"].indexOf(t))throw new Error((0,r.Z)(3,n));var o=n.substring(e+1,n.length-1).split(",");return{type:t,values:o=o.map((function(n){return parseFloat(n)}))}}function i(n){var e=n.type,t=n.values;return-1!==e.indexOf("rgb")?t=t.map((function(n,e){return e<3?parseInt(n,10):n})):-1!==e.indexOf("hsl")&&(t[1]="".concat(t[1],"%"),t[2]="".concat(t[2],"%")),"".concat(e,"(").concat(t.join(", "),")")}function l(n,e){var t=s(n),r=s(e);return(Math.max(t,r)+.05)/(Math.min(t,r)+.05)}function s(n){var e="hsl"===(n=a(n)).type?a(function(n){var e=(n=a(n)).values,t=e[0],r=e[1]/100,o=e[2]/100,l=r*Math.min(o,1-o),s=function(n){var e=arguments.length>1&&void 0!==arguments[1]?arguments[1]:(n+t/30)%12;return o-l*Math.max(Math.min(e-3,9-e,1),-1)},c="rgb",u=[Math.round(255*s(0)),Math.round(255*s(8)),Math.round(255*s(4))];return"hsla"===n.type&&(c+="a",u.push(e[3])),i({type:c,values:u})}(n)).values:n.values;return e=e.map((function(n){return(n/=255)<=.03928?n/12.92:Math.pow((n+.055)/1.055,2.4)})),Number((.2126*e[0]+.7152*e[1]+.0722*e[2]).toFixed(3))}function c(n,e){return n=a(n),e=o(e),"rgb"!==n.type&&"hsl"!==n.type||(n.type+="a"),n.values[3]=e,i(n)}function u(n,e){if(n=a(n),e=o(e),-1!==n.type.indexOf("hsl"))n.values[2]*=1-e;else if(-1!==n.type.indexOf("rgb"))for(var t=0;t<3;t+=1)n.values[t]*=1-e;return i(n)}function d(n,e){if(n=a(n),e=o(e),-1!==n.type.indexOf("hsl"))n.values[2]+=(100-n.values[2])*e;else if(-1!==n.type.indexOf("rgb"))for(var t=0;t<3;t+=1)n.values[t]+=(255-n.values[t])*e;return i(n)}},9112:(n,e,t)=>{"use strict";t.d(e,{Z:()=>ln});var r=t(1253),o=t(5953),a=t(2122),i=["xs","sm","md","lg","xl"];function l(n){var e=n.values,t=void 0===e?{xs:0,sm:600,md:960,lg:1280,xl:1920}:e,o=n.unit,l=void 0===o?"px":o,s=n.step,c=void 0===s?5:s,u=(0,r.Z)(n,["values","unit","step"]);function d(n){var e="number"==typeof t[n]?t[n]:n;return"@media (min-width:".concat(e).concat(l,")")}function f(n,e){var r=i.indexOf(e);return r===i.length-1?d(n):"@media (min-width:".concat("number"==typeof t[n]?t[n]:n).concat(l,") and ")+"(max-width:".concat((-1!==r&&"number"==typeof t[i[r+1]]?t[i[r+1]]:e)-c/100).concat(l,")")}return(0,a.Z)({keys:i,values:t,up:d,down:function(n){var e=i.indexOf(n)+1,r=t[i[e]];return e===i.length?d("xs"):"@media (max-width:".concat(("number"==typeof r&&e>0?r:n)-c/100).concat(l,")")},between:f,only:function(n){return f(n,n)},width:function(n){return t[n]}},u)}var s=t(6156);function c(n,e,t){var r;return(0,a.Z)({gutters:function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return(0,a.Z)({paddingLeft:e(2),paddingRight:e(2)},t,(0,s.Z)({},n.up("sm"),(0,a.Z)({paddingLeft:e(3),paddingRight:e(3)},t[n.up("sm")])))},toolbar:(r={minHeight:56},(0,s.Z)(r,"".concat(n.up("xs")," and (orientation: landscape)"),{minHeight:48}),(0,s.Z)(r,n.up("sm"),{minHeight:64}),r)},t)}var u=t(288);const d={black:"#000",white:"#fff"},f={50:"#fafafa",100:"#f5f5f5",200:"#eeeeee",300:"#e0e0e0",400:"#bdbdbd",500:"#9e9e9e",600:"#757575",700:"#616161",800:"#424242",900:"#212121",A100:"#d5d5d5",A200:"#aaaaaa",A400:"#303030",A700:"#616161"},p="#7986cb",m="#3f51b5",h="#303f9f",g="#ff4081",b="#f50057",v="#c51162",y="#e57373",x="#f44336",w="#d32f2f",k="#ffb74d",E="#ff9800",C="#f57c00",S="#64b5f6",O="#2196f3",T="#1976d2",N="#81c784",P="#4caf50",M="#388e3c";var Z=t(9693),R={text:{primary:"rgba(0, 0, 0, 0.87)",secondary:"rgba(0, 0, 0, 0.54)",disabled:"rgba(0, 0, 0, 0.38)",hint:"rgba(0, 0, 0, 0.38)"},divider:"rgba(0, 0, 0, 0.12)",background:{paper:d.white,default:f[50]},action:{active:"rgba(0, 0, 0, 0.54)",hover:"rgba(0, 0, 0, 0.04)",hoverOpacity:.04,selected:"rgba(0, 0, 0, 0.08)",selectedOpacity:.08,disabled:"rgba(0, 0, 0, 0.26)",disabledBackground:"rgba(0, 0, 0, 0.12)",disabledOpacity:.38,focus:"rgba(0, 0, 0, 0.12)",focusOpacity:.12,activatedOpacity:.12}},I={text:{primary:d.white,secondary:"rgba(255, 255, 255, 0.7)",disabled:"rgba(255, 255, 255, 0.5)",hint:"rgba(255, 255, 255, 0.5)",icon:"rgba(255, 255, 255, 0.5)"},divider:"rgba(255, 255, 255, 0.12)",background:{paper:f[800],default:"#303030"},action:{active:d.white,hover:"rgba(255, 255, 255, 0.08)",hoverOpacity:.08,selected:"rgba(255, 255, 255, 0.16)",selectedOpacity:.16,disabled:"rgba(255, 255, 255, 0.3)",disabledBackground:"rgba(255, 255, 255, 0.12)",disabledOpacity:.38,focus:"rgba(255, 255, 255, 0.12)",focusOpacity:.12,activatedOpacity:.24}};function z(n,e,t,r){var o=r.light||r,a=r.dark||1.5*r;n[e]||(n.hasOwnProperty(t)?n[e]=n[t]:"light"===e?n.light=(0,Z.$n)(n.main,o):"dark"===e&&(n.dark=(0,Z._j)(n.main,a)))}function _(n){var e=n.primary,t=void 0===e?{light:p,main:m,dark:h}:e,i=n.secondary,l=void 0===i?{light:g,main:b,dark:v}:i,s=n.error,c=void 0===s?{light:y,main:x,dark:w}:s,_=n.warning,j=void 0===_?{light:k,main:E,dark:C}:_,D=n.info,A=void 0===D?{light:S,main:O,dark:T}:D,L=n.success,F=void 0===L?{light:N,main:P,dark:M}:L,W=n.type,B=void 0===W?"light":W,H=n.contrastThreshold,U=void 0===H?3:H,K=n.tonalOffset,V=void 0===K?.2:K,G=(0,r.Z)(n,["primary","secondary","error","warning","info","success","type","contrastThreshold","tonalOffset"]);function q(n){return(0,Z.mi)(n,I.text.primary)>=U?I.text.primary:R.text.primary}var $=function(n){var e=arguments.length>1&&void 0!==arguments[1]?arguments[1]:500,t=arguments.length>2&&void 0!==arguments[2]?arguments[2]:300,r=arguments.length>3&&void 0!==arguments[3]?arguments[3]:700;if(!(n=(0,a.Z)({},n)).main&&n[e]&&(n.main=n[e]),!n.main)throw new Error((0,u.Z)(4,e));if("string"!=typeof n.main)throw new Error((0,u.Z)(5,JSON.stringify(n.main)));return z(n,"light",t,V),z(n,"dark",r,V),n.contrastText||(n.contrastText=q(n.main)),n},Y={dark:I,light:R};return(0,o.Z)((0,a.Z)({common:d,type:B,primary:$(t),secondary:$(l,"A400","A200","A700"),error:$(c),warning:$(j),info:$(A),success:$(F),grey:f,contrastThreshold:U,getContrastText:q,augmentColor:$,tonalOffset:V},Y[B]),G)}function j(n){return Math.round(1e5*n)/1e5}var D={textTransform:"uppercase"},A='"Roboto", "Helvetica", "Arial", sans-serif';function L(n,e){var t="function"==typeof e?e(n):e,i=t.fontFamily,l=void 0===i?A:i,s=t.fontSize,c=void 0===s?14:s,u=t.fontWeightLight,d=void 0===u?300:u,f=t.fontWeightRegular,p=void 0===f?400:f,m=t.fontWeightMedium,h=void 0===m?500:m,g=t.fontWeightBold,b=void 0===g?700:g,v=t.htmlFontSize,y=void 0===v?16:v,x=t.allVariants,w=t.pxToRem,k=(0,r.Z)(t,["fontFamily","fontSize","fontWeightLight","fontWeightRegular","fontWeightMedium","fontWeightBold","htmlFontSize","allVariants","pxToRem"]),E=c/14,C=w||function(n){return"".concat(n/y*E,"rem")},S=function(n,e,t,r,o){return(0,a.Z)({fontFamily:l,fontWeight:n,fontSize:C(e),lineHeight:t},l===A?{letterSpacing:"".concat(j(r/e),"em")}:{},o,x)},O={h1:S(d,96,1.167,-1.5),h2:S(d,60,1.2,-.5),h3:S(p,48,1.167,0),h4:S(p,34,1.235,.25),h5:S(p,24,1.334,0),h6:S(h,20,1.6,.15),subtitle1:S(p,16,1.75,.15),subtitle2:S(h,14,1.57,.1),body1:S(p,16,1.5,.15),body2:S(p,14,1.43,.15),button:S(h,14,1.75,.4,D),caption:S(p,12,1.66,.4),overline:S(p,12,2.66,1,D)};return(0,o.Z)((0,a.Z)({htmlFontSize:y,pxToRem:C,round:j,fontFamily:l,fontSize:c,fontWeightLight:d,fontWeightRegular:p,fontWeightMedium:h,fontWeightBold:b},O),k,{clone:!1})}function F(){return["".concat(arguments.length<=0?void 0:arguments[0],"px ").concat(arguments.length<=1?void 0:arguments[1],"px ").concat(arguments.length<=2?void 0:arguments[2],"px ").concat(arguments.length<=3?void 0:arguments[3],"px rgba(0,0,0,").concat(.2,")"),"".concat(arguments.length<=4?void 0:arguments[4],"px ").concat(arguments.length<=5?void 0:arguments[5],"px ").concat(arguments.length<=6?void 0:arguments[6],"px ").concat(arguments.length<=7?void 0:arguments[7],"px rgba(0,0,0,").concat(.14,")"),"".concat(arguments.length<=8?void 0:arguments[8],"px ").concat(arguments.length<=9?void 0:arguments[9],"px ").concat(arguments.length<=10?void 0:arguments[10],"px ").concat(arguments.length<=11?void 0:arguments[11],"px rgba(0,0,0,").concat(.12,")")].join(",")}const W=["none",F(0,2,1,-1,0,1,1,0,0,1,3,0),F(0,3,1,-2,0,2,2,0,0,1,5,0),F(0,3,3,-2,0,3,4,0,0,1,8,0),F(0,2,4,-1,0,4,5,0,0,1,10,0),F(0,3,5,-1,0,5,8,0,0,1,14,0),F(0,3,5,-1,0,6,10,0,0,1,18,0),F(0,4,5,-2,0,7,10,1,0,2,16,1),F(0,5,5,-3,0,8,10,1,0,3,14,2),F(0,5,6,-3,0,9,12,1,0,3,16,2),F(0,6,6,-3,0,10,14,1,0,4,18,3),F(0,6,7,-4,0,11,15,1,0,4,20,3),F(0,7,8,-4,0,12,17,2,0,5,22,4),F(0,7,8,-4,0,13,19,2,0,5,24,4),F(0,7,9,-4,0,14,21,2,0,5,26,4),F(0,8,9,-5,0,15,22,2,0,6,28,5),F(0,8,10,-5,0,16,24,2,0,6,30,5),F(0,8,11,-5,0,17,26,2,0,6,32,5),F(0,9,11,-5,0,18,28,2,0,7,34,6),F(0,9,12,-6,0,19,29,2,0,7,36,6),F(0,10,13,-6,0,20,31,3,0,8,38,7),F(0,10,13,-6,0,21,33,3,0,8,40,7),F(0,10,14,-6,0,22,35,3,0,8,42,7),F(0,11,14,-7,0,23,36,3,0,9,44,8),F(0,11,15,-7,0,24,38,3,0,9,46,8)],B={borderRadius:4};var H=t(8481),U=t(484),K=(t(5697),{xs:0,sm:600,md:960,lg:1280,xl:1920}),V={keys:["xs","sm","md","lg","xl"],up:function(n){return"@media (min-width:".concat(K[n],"px)")}};const G=function(n,e){return e?(0,o.Z)(n,e,{clone:!1}):n};var q,$,Y={m:"margin",p:"padding"},X={t:"Top",r:"Right",b:"Bottom",l:"Left",x:["Left","Right"],y:["Top","Bottom"]},Q={marginX:"mx",marginY:"my",paddingX:"px",paddingY:"py"},J=(q=function(n){if(n.length>2){if(!Q[n])return[n];n=Q[n]}var e=n.split(""),t=(0,H.Z)(e,2),r=t[0],o=t[1],a=Y[r],i=X[o]||"";return Array.isArray(i)?i.map((function(n){return a+n})):[a+i]},$={},function(n){return void 0===$[n]&&($[n]=q(n)),$[n]}),nn=["m","mt","mr","mb","ml","mx","my","p","pt","pr","pb","pl","px","py","margin","marginTop","marginRight","marginBottom","marginLeft","marginX","marginY","padding","paddingTop","paddingRight","paddingBottom","paddingLeft","paddingX","paddingY"];function en(n){var e=n.spacing||8;return"number"==typeof e?function(n){return e*n}:Array.isArray(e)?function(n){return e[n]}:"function"==typeof e?e:function(){}}function tn(n){var e=en(n.theme);return Object.keys(n).map((function(t){if(-1===nn.indexOf(t))return null;var r=function(n,e){return function(t){return n.reduce((function(n,r){return n[r]=function(n,e){if("string"==typeof e||null==e)return e;var t=n(Math.abs(e));return e>=0?t:"number"==typeof t?-t:"-".concat(t)}(e,t),n}),{})}}(J(t),e),o=n[t];return function(n,e,t){if(Array.isArray(e)){var r=n.theme.breakpoints||V;return e.reduce((function(n,o,a){return n[r.up(r.keys[a])]=t(e[a]),n}),{})}if("object"===(0,U.Z)(e)){var o=n.theme.breakpoints||V;return Object.keys(e).reduce((function(n,r){return n[o.up(r)]=t(e[r]),n}),{})}return t(e)}(n,o,r)})).reduce(G,{})}function rn(){var n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:8;if(n.mui)return n;var e=en({spacing:n}),t=function(){for(var n=arguments.length,t=new Array(n),r=0;r0&&void 0!==arguments[0]?arguments[0]:{},e=n.breakpoints,t=void 0===e?{}:e,a=n.mixins,i=void 0===a?{}:a,s=n.palette,u=void 0===s?{}:s,d=n.spacing,f=n.typography,p=void 0===f?{}:f,m=(0,r.Z)(n,["breakpoints","mixins","palette","spacing","typography"]),h=_(u),g=l(t),b=rn(d),v=(0,o.Z)({breakpoints:g,direction:"ltr",mixins:c(g,b,i),overrides:{},palette:h,props:{},shadows:W,typography:L(h,p),spacing:b,shape:B,transitions:on.ZP,zIndex:an.Z},m),y=arguments.length,x=new Array(y>1?y-1:0),w=1;w{"use strict";t.d(e,{x9:()=>a,ZP:()=>l});var r=t(1253),o={easeInOut:"cubic-bezier(0.4, 0, 0.2, 1)",easeOut:"cubic-bezier(0.0, 0, 0.2, 1)",easeIn:"cubic-bezier(0.4, 0, 1, 1)",sharp:"cubic-bezier(0.4, 0, 0.6, 1)"},a={shortest:150,shorter:200,short:250,standard:300,complex:375,enteringScreen:225,leavingScreen:195};function i(n){return"".concat(Math.round(n),"ms")}const l={easing:o,duration:a,create:function(){var n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:["all"],e=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},t=e.duration,l=void 0===t?a.standard:t,s=e.easing,c=void 0===s?o.easeInOut:s,u=e.delay,d=void 0===u?0:u;return(0,r.Z)(e,["duration","easing","delay"]),(Array.isArray(n)?n:[n]).map((function(n){return"".concat(n," ").concat("string"==typeof l?l:i(l)," ").concat(c," ").concat("string"==typeof d?d:i(d))})).join(",")},getAutoHeightDuration:function(n){if(!n)return 0;var e=n/36;return Math.round(10*(4+15*Math.pow(e,.25)+e/5))}}},4670:(n,e,t)=>{"use strict";t.d(e,{Z:()=>f});var r=t(2122),o=t(1253),a=t(7294),i=(t(5697),t(8679)),l=t.n(i),s=t(1314),c=t(3869),u=t(5959);var d=t(9112);const f=function(n,e){return function(n){var e=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return function(t){var i=e.defaultTheme,d=e.withTheme,f=void 0!==d&&d,p=e.name,m=(0,o.Z)(e,["defaultTheme","withTheme","name"]),h=p,g=(0,s.Z)(n,(0,r.Z)({defaultTheme:i,Component:t,name:p||t.displayName,classNamePrefix:h},m)),b=a.forwardRef((function(n,e){n.classes;var l,s=n.innerRef,d=(0,o.Z)(n,["classes","innerRef"]),m=g((0,r.Z)({},t.defaultProps,n)),h=d;return("string"==typeof p||f)&&(l=(0,u.Z)()||i,p&&(h=(0,c.Z)({theme:l,name:p,props:d})),f&&!h.theme&&(h.theme=l)),a.createElement(t,(0,r.Z)({ref:s||e,classes:m},h))}));return l()(b,t),b}}(n,(0,r.Z)({defaultTheme:d.Z},e))}},2781:(n,e,t)=>{"use strict";t.d(e,{Z:()=>r});const r={mobileStepper:1e3,speedDial:1050,appBar:1100,drawer:1200,modal:1300,snackbar:1400,tooltip:1500}},3871:(n,e,t)=>{"use strict";t.d(e,{Z:()=>o});var r=t(288);function o(n){if("string"!=typeof n)throw new Error((0,r.Z)(7));return n.charAt(0).toUpperCase()+n.slice(1)}},2568:(n,e,t)=>{"use strict";function r(){for(var n=arguments.length,e=new Array(n),t=0;tr})},5209:(n,e,t)=>{"use strict";t.d(e,{Z:()=>d});var r=t(2122),o=t(7294),a=t(1253),i=(t(5697),t(6010)),l=t(4670),s=t(3871),c=o.forwardRef((function(n,e){var t=n.children,l=n.classes,c=n.className,u=n.color,d=void 0===u?"inherit":u,f=n.component,p=void 0===f?"svg":f,m=n.fontSize,h=void 0===m?"default":m,g=n.htmlColor,b=n.titleAccess,v=n.viewBox,y=void 0===v?"0 0 24 24":v,x=(0,a.Z)(n,["children","classes","className","color","component","fontSize","htmlColor","titleAccess","viewBox"]);return o.createElement(p,(0,r.Z)({className:(0,i.Z)(l.root,c,"inherit"!==d&&l["color".concat((0,s.Z)(d))],"default"!==h&&l["fontSize".concat((0,s.Z)(h))]),focusable:"false",viewBox:y,color:g,"aria-hidden":!b||void 0,role:b?"img":void 0,ref:e},x),t,b?o.createElement("title",null,b):null)}));c.muiName="SvgIcon";const u=(0,l.Z)((function(n){return{root:{userSelect:"none",width:"1em",height:"1em",display:"inline-block",fill:"currentColor",flexShrink:0,fontSize:n.typography.pxToRem(24),transition:n.transitions.create("fill",{duration:n.transitions.duration.shorter})},colorPrimary:{color:n.palette.primary.main},colorSecondary:{color:n.palette.secondary.main},colorAction:{color:n.palette.action.active},colorError:{color:n.palette.error.main},colorDisabled:{color:n.palette.action.disabled},fontSizeInherit:{fontSize:"inherit"},fontSizeSmall:{fontSize:n.typography.pxToRem(20)},fontSizeLarge:{fontSize:n.typography.pxToRem(35)}}}),{name:"MuiSvgIcon"})(c);function d(n,e){var t=function(e,t){return o.createElement(u,(0,r.Z)({ref:t},e),n)};return t.muiName=u.muiName,o.memo(o.forwardRef(t))}},9437:(n,e,t)=>{"use strict";function r(n){var e,t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:166;function r(){for(var r=arguments.length,o=new Array(r),a=0;ar})},8546:(n,e,t)=>{"use strict";t.r(e),t.d(e,{capitalize:()=>r.Z,createChainedFunction:()=>o.Z,createSvgIcon:()=>a.Z,debounce:()=>i.Z,deprecatedPropType:()=>l,isMuiElement:()=>s.Z,ownerDocument:()=>c.Z,ownerWindow:()=>u.Z,requirePropFactory:()=>d,setRef:()=>f.Z,unstable_useId:()=>b.Z,unsupportedProp:()=>p,useControlled:()=>m.Z,useEventCallback:()=>h.Z,useForkRef:()=>g.Z,useIsFocusVisible:()=>v.Z});var r=t(3871),o=t(2568),a=t(5209),i=t(9437);function l(n,e){return function(){return null}}var s=t(3711),c=t(626),u=t(713);function d(n){return function(){return null}}var f=t(4236);function p(n,e,t,r,o){return null}var m=t(2775),h=t(5192),g=t(3834),b=t(5001),v=t(4896)},3711:(n,e,t)=>{"use strict";t.d(e,{Z:()=>o});var r=t(7294);function o(n,e){return r.isValidElement(n)&&-1!==e.indexOf(n.type.muiName)}},626:(n,e,t)=>{"use strict";function r(n){return n&&n.ownerDocument||document}t.d(e,{Z:()=>r})},713:(n,e,t)=>{"use strict";t.d(e,{Z:()=>o});var r=t(626);function o(n){return(0,r.Z)(n).defaultView||window}},4236:(n,e,t)=>{"use strict";function r(n,e){"function"==typeof n?n(e):n&&(n.current=e)}t.d(e,{Z:()=>r})},5001:(n,e,t)=>{"use strict";t.d(e,{Z:()=>o});var r=t(7294);function o(n){var e=r.useState(n),t=e[0],o=e[1],a=n||t;return r.useEffect((function(){null==t&&o("mui-".concat(Math.round(1e5*Math.random())))}),[t]),a}},2775:(n,e,t)=>{"use strict";t.d(e,{Z:()=>o});var r=t(7294);function o(n){var e=n.controlled,t=n.default,o=(n.name,n.state,r.useRef(void 0!==e).current),a=r.useState(t),i=a[0],l=a[1];return[o?e:i,r.useCallback((function(n){o||l(n)}),[])]}},5192:(n,e,t)=>{"use strict";t.d(e,{Z:()=>a});var r=t(7294),o="undefined"!=typeof window?r.useLayoutEffect:r.useEffect;function a(n){var e=r.useRef(n);return o((function(){e.current=n})),r.useCallback((function(){return e.current.apply(void 0,arguments)}),[])}},3834:(n,e,t)=>{"use strict";t.d(e,{Z:()=>a});var r=t(7294),o=t(4236);function a(n,e){return r.useMemo((function(){return null==n&&null==e?null:function(t){(0,o.Z)(n,t),(0,o.Z)(e,t)}}),[n,e])}},4896:(n,e,t)=>{"use strict";t.d(e,{Z:()=>m});var r=t(7294),o=t(3935),a=!0,i=!1,l=null,s={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function c(n){n.metaKey||n.altKey||n.ctrlKey||(a=!0)}function u(){a=!1}function d(){"hidden"===this.visibilityState&&i&&(a=!0)}function f(n){var e,t,r,o=n.target;try{return o.matches(":focus-visible")}catch(n){}return a||(t=(e=o).type,!("INPUT"!==(r=e.tagName)||!s[t]||e.readOnly)||"TEXTAREA"===r&&!e.readOnly||!!e.isContentEditable)}function p(){i=!0,window.clearTimeout(l),l=window.setTimeout((function(){i=!1}),100)}function m(){return{isFocusVisible:f,onBlurVisible:p,ref:r.useCallback((function(n){var e,t=o.findDOMNode(n);null!=t&&((e=t.ownerDocument).addEventListener("keydown",c,!0),e.addEventListener("mousedown",u,!0),e.addEventListener("pointerdown",u,!0),e.addEventListener("touchstart",u,!0),e.addEventListener("visibilitychange",d,!0))}),[])}}},341:(n,e)=>{"use strict";if("function"==typeof Symbol&&Symbol.for){var t=Symbol.for;t("react.element"),t("react.portal"),t("react.fragment"),t("react.strict_mode"),t("react.profiler"),t("react.provider"),t("react.context"),t("react.forward_ref"),t("react.suspense"),t("react.suspense_list"),t("react.memo"),t("react.lazy"),t("react.block"),t("react.server.block"),t("react.fundamental"),t("react.debug_trace_mode"),t("react.legacy_hidden")}},5122:(n,e,t)=>{"use strict";t(341)},2354:(n,e)=>{"use strict";e.Z=function(n){var e,t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:166;function r(){for(var r=arguments.length,o=new Array(r),a=0;a{"use strict";var r=t(5318),o=t(862);e.Z=void 0;var a=o(t(7294)),i=(0,r(t(2108)).default)(a.createElement("path",{d:"M15.41 7.41L14 6l-6 6 6 6 1.41-1.41L10.83 12z"}),"ChevronLeft");e.Z=i},6735:(n,e,t)=>{"use strict";var r=t(5318),o=t(862);e.Z=void 0;var a=o(t(7294)),i=(0,r(t(2108)).default)(a.createElement("path",{d:"M10 6L8.59 7.41 13.17 12l-4.58 4.59L10 18l6-6z"}),"ChevronRight");e.Z=i},2369:(n,e,t)=>{"use strict";var r=t(5318),o=t(862);e.Z=void 0;var a=o(t(7294)),i=(0,r(t(2108)).default)(a.createElement("path",{d:"M11 18h2v-2h-2v2zm1-16C6.48 2 2 6.48 2 12s4.48 10 10 10 10-4.48 10-10S17.52 2 12 2zm0 18c-4.41 0-8-3.59-8-8s3.59-8 8-8 8 3.59 8 8-3.59 8-8 8zm0-14c-2.21 0-4 1.79-4 4h2c0-1.1.9-2 2-2s2 .9 2 2c0 2-3 1.75-3 5h2c0-2.25 3-2.5 3-5 0-2.21-1.79-4-4-4z"}),"HelpOutline");e.Z=i},2108:(n,e,t)=>{"use strict";Object.defineProperty(e,"__esModule",{value:!0}),Object.defineProperty(e,"default",{enumerable:!0,get:function(){return r.createSvgIcon}});var r=t(8546)},3869:(n,e,t)=>{"use strict";function r(n){var e=n.theme,t=n.name,r=n.props;if(!e||!e.props||!e.props[t])return r;var o,a=e.props[t];for(o in a)void 0===r[o]&&(r[o]=a[o]);return r}t.d(e,{Z:()=>r})},1314:(n,e,t)=>{"use strict";t.d(e,{Z:()=>st});var r=t(1253),o=t(2122),a=t(7294),i="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(n){return typeof n}:function(n){return n&&"function"==typeof Symbol&&n.constructor===Symbol&&n!==Symbol.prototype?"symbol":typeof n};const l="object"===("undefined"==typeof window?"undefined":i(window))&&"object"===("undefined"==typeof document?"undefined":i(document))&&9===document.nodeType;var s=t(5991),c=t(1788),u=t(3349),d=t(9756),f={}.constructor;function p(n){if(null==n||"object"!=typeof n)return n;if(Array.isArray(n))return n.map(p);if(n.constructor!==f)return n;var e={};for(var t in n)e[t]=p(n[t]);return e}function m(n,e,t){void 0===n&&(n="unnamed");var r=t.jss,o=p(e);return r.plugins.onCreateRule(n,o,t)||(n[0],null)}var h=function(n,e){for(var t="",r=0;r<+~=|^:(),"'`\s])/g,x="undefined"!=typeof CSS&&CSS.escape,w=function(n){return x?x(n):n.replace(y,"\\$1")},k=function(){function n(n,e,t){this.type="style",this.key=void 0,this.isProcessed=!1,this.style=void 0,this.renderer=void 0,this.renderable=void 0,this.options=void 0;var r=t.sheet,o=t.Renderer;this.key=n,this.options=t,this.style=e,r?this.renderer=r.renderer:o&&(this.renderer=new o)}return n.prototype.prop=function(n,e,t){if(void 0===e)return this.style[n];var r=!!t&&t.force;if(!r&&this.style[n]===e)return this;var o=e;t&&!1===t.process||(o=this.options.jss.plugins.onChangeValue(e,n,this));var a=null==o||!1===o,i=n in this.style;if(a&&!i&&!r)return this;var l=a&&i;if(l?delete this.style[n]:this.style[n]=o,this.renderable&&this.renderer)return l?this.renderer.removeProperty(this.renderable,n):this.renderer.setProperty(this.renderable,n,o),this;var s=this.options.sheet;return s&&s.attached,this},n}(),E=function(n){function e(e,t,r){var o;(o=n.call(this,e,t,r)||this).selectorText=void 0,o.id=void 0,o.renderable=void 0;var a=r.selector,i=r.scoped,l=r.sheet,s=r.generateId;return a?o.selectorText=a:!1!==i&&(o.id=s((0,u.Z)((0,u.Z)(o)),l),o.selectorText="."+w(o.id)),o}(0,c.Z)(e,n);var t=e.prototype;return t.applyTo=function(n){var e=this.renderer;if(e){var t=this.toJSON();for(var r in t)e.setProperty(n,r,t[r])}return this},t.toJSON=function(){var n={};for(var e in this.style){var t=this.style[e];"object"!=typeof t?n[e]=t:Array.isArray(t)&&(n[e]=g(t))}return n},t.toString=function(n){var e=this.options.sheet,t=e&&e.options.link?(0,o.Z)({},n,{allowEmpty:!0}):n;return v(this.selectorText,this.style,t)},(0,s.Z)(e,[{key:"selector",set:function(n){if(n!==this.selectorText){this.selectorText=n;var e=this.renderer,t=this.renderable;t&&e&&(e.setSelector(t,n)||e.replaceRule(t,this))}},get:function(){return this.selectorText}}]),e}(k),C={onCreateRule:function(n,e,t){return"@"===n[0]||t.parent&&"keyframes"===t.parent.type?null:new E(n,e,t)}},S={indent:1,children:!0},O=/@([\w-]+)/,T=function(){function n(n,e,t){this.type="conditional",this.at=void 0,this.key=void 0,this.query=void 0,this.rules=void 0,this.options=void 0,this.isProcessed=!1,this.renderable=void 0,this.key=n;var r=n.match(O);for(var a in this.at=r?r[1]:"unknown",this.query=t.name||"@"+this.at,this.options=t,this.rules=new Y((0,o.Z)({},t,{parent:this})),e)this.rules.add(a,e[a]);this.rules.process()}var e=n.prototype;return e.getRule=function(n){return this.rules.get(n)},e.indexOf=function(n){return this.rules.indexOf(n)},e.addRule=function(n,e,t){var r=this.rules.add(n,e,t);return r?(this.options.jss.plugins.onProcessRule(r),r):null},e.toString=function(n){if(void 0===n&&(n=S),null==n.indent&&(n.indent=S.indent),null==n.children&&(n.children=S.children),!1===n.children)return this.query+" {}";var e=this.rules.toString(n);return e?this.query+" {\n"+e+"\n}":""},n}(),N=/@media|@supports\s+/,P={onCreateRule:function(n,e,t){return N.test(n)?new T(n,e,t):null}},M={indent:1,children:!0},Z=/@keyframes\s+([\w-]+)/,R=function(){function n(n,e,t){this.type="keyframes",this.at="@keyframes",this.key=void 0,this.name=void 0,this.id=void 0,this.rules=void 0,this.options=void 0,this.isProcessed=!1,this.renderable=void 0;var r=n.match(Z);r&&r[1]?this.name=r[1]:this.name="noname",this.key=this.type+"-"+this.name,this.options=t;var a=t.scoped,i=t.sheet,l=t.generateId;for(var s in this.id=!1===a?this.name:w(l(this,i)),this.rules=new Y((0,o.Z)({},t,{parent:this})),e)this.rules.add(s,e[s],(0,o.Z)({},t,{parent:this}));this.rules.process()}return n.prototype.toString=function(n){if(void 0===n&&(n=M),null==n.indent&&(n.indent=M.indent),null==n.children&&(n.children=M.children),!1===n.children)return this.at+" "+this.id+" {}";var e=this.rules.toString(n);return e&&(e="\n"+e+"\n"),this.at+" "+this.id+" {"+e+"}"},n}(),I=/@keyframes\s+/,z=/\$([\w-]+)/g,_=function(n,e){return"string"==typeof n?n.replace(z,(function(n,t){return t in e?e[t]:n})):n},j=function(n,e,t){var r=n[e],o=_(r,t);o!==r&&(n[e]=o)},D={onCreateRule:function(n,e,t){return"string"==typeof n&&I.test(n)?new R(n,e,t):null},onProcessStyle:function(n,e,t){return"style"===e.type&&t?("animation-name"in n&&j(n,"animation-name",t.keyframes),"animation"in n&&j(n,"animation",t.keyframes),n):n},onChangeValue:function(n,e,t){var r=t.options.sheet;if(!r)return n;switch(e){case"animation":case"animation-name":return _(n,r.keyframes);default:return n}}},A=function(n){function e(){for(var e,t=arguments.length,r=new Array(t),o=0;o=this.index)e.push(n);else for(var r=0;rt)return void e.splice(r,0,n)},e.reset=function(){this.registry=[]},e.remove=function(n){var e=this.registry.indexOf(n);this.registry.splice(e,1)},e.toString=function(n){for(var e=void 0===n?{}:n,t=e.attached,r=(0,d.Z)(e,["attached"]),o="",a=0;at?t:e},mn=function(){function n(n){this.getPropertyValue=an,this.setProperty=ln,this.removeProperty=sn,this.setSelector=cn,this.element=void 0,this.sheet=void 0,this.hasInsertedRules=!1,this.cssRules=[],n&&J.add(n),this.sheet=n;var e=this.sheet?this.sheet.options:{},t=e.media,r=e.meta,o=e.element;this.element=o||function(){var n=document.createElement("style");return n.textContent="\n",n}(),this.element.setAttribute("data-jss",""),t&&this.element.setAttribute("media",t),r&&this.element.setAttribute("data-meta",r);var a=dn();a&&this.element.setAttribute("nonce",a)}var e=n.prototype;return e.attach=function(){if(!this.element.parentNode&&this.sheet){!function(n,e){var t=e.insertionPoint,r=function(n){var e=J.registry;if(e.length>0){var t=function(n,e){for(var t=0;te.index&&r.options.insertionPoint===e.insertionPoint)return r}return null}(e,n);if(t&&t.renderer)return{parent:t.renderer.element.parentNode,node:t.renderer.element};if((t=function(n,e){for(var t=n.length-1;t>=0;t--){var r=n[t];if(r.attached&&r.options.insertionPoint===e.insertionPoint)return r}return null}(e,n))&&t.renderer)return{parent:t.renderer.element.parentNode,node:t.renderer.element.nextSibling}}var r=n.insertionPoint;if(r&&"string"==typeof r){var o=function(n){for(var e=un(),t=0;t-1){var o=Re[n];if(!Array.isArray(o))return ie+ge(o)in e&&le+o;if(!r)return!1;for(var a=0;ae?1:-1:n.length-e.length},{onProcessStyle:function(n,e){if("style"!==e.type)return n;for(var t={},r=Object.keys(n).sort(qe),o=0;o0&&void 0!==arguments[0]?arguments[0]:{},e=n.disableGlobal,t=void 0!==e&&e,r=n.productionPrefix,o=void 0===r?"jss":r,a=n.seed,i=void 0===a?"":a,l=""===i?"":"".concat(i,"-"),s=0,c=function(){return s+=1};return function(n,e){var r=e.options.name;if(r&&0===r.indexOf("Mui")&&!e.options.link&&!t){if(-1!==On.indexOf(n.key))return"Mui-".concat(n.key);var a="".concat(l).concat(r,"-").concat(n.key);return e.options.theme[Sn]&&""===i?"".concat(a,"-").concat(c()):a}return"".concat(l).concat(o).concat(c())}}(),jss:$e,sheetsCache:null,sheetsManager:new Map,sheetsRegistry:null},Xe=a.createContext(Ye),Qe=-1e9;function Je(){return Qe+=1}var nt=t(5953);function et(n){var e="function"==typeof n;return{create:function(t,r){var a;try{a=e?n(t):n}catch(n){throw n}if(!r||!t.overrides||!t.overrides[r])return a;var i=t.overrides[r],l=(0,o.Z)({},a);return Object.keys(i).forEach((function(n){l[n]=(0,nt.Z)(l[n],i[n])})),l},options:{}}}const tt={};function rt(n,e,t){var r=n.state;if(n.stylesOptions.disableGeneration)return e||{};r.cacheClasses||(r.cacheClasses={value:null,lastProp:null,lastJSS:{}});var o=!1;return r.classes!==r.cacheClasses.lastJSS&&(r.cacheClasses.lastJSS=r.classes,o=!0),e!==r.cacheClasses.lastProp&&(r.cacheClasses.lastProp=e,o=!0),o&&(r.cacheClasses.value=(0,xn.Z)({baseClasses:r.cacheClasses.lastJSS,newClasses:e,Component:t})),r.cacheClasses.value}function ot(n,e){var t=n.state,r=n.theme,a=n.stylesOptions,i=n.stylesCreator,l=n.name;if(!a.disableGeneration){var s=kn(a.sheetsManager,i,r);s||(s={refs:0,staticSheet:null,dynamicStyles:null},wn(a.sheetsManager,i,r,s));var c=(0,o.Z)({},i.options,a,{theme:r,flip:"boolean"==typeof a.flip?a.flip:"rtl"===r.direction});c.generateId=c.serverGenerateClassName||c.generateClassName;var u=a.sheetsRegistry;if(0===s.refs){var d;a.sheetsCache&&(d=kn(a.sheetsCache,i,r));var f=i.create(r,l);d||((d=a.jss.createStyleSheet(f,(0,o.Z)({link:!1},c))).attach(),a.sheetsCache&&wn(a.sheetsCache,i,r,d)),u&&u.add(d),s.staticSheet=d,s.dynamicStyles=bn(f)}if(s.dynamicStyles){var p=a.jss.createStyleSheet(s.dynamicStyles,(0,o.Z)({link:!0},c));p.update(e),p.attach(),t.dynamicSheet=p,t.classes=(0,xn.Z)({baseClasses:s.staticSheet.classes,newClasses:p.classes}),u&&u.add(p)}else t.classes=s.staticSheet.classes;s.refs+=1}}function at(n,e){var t=n.state;t.dynamicSheet&&t.dynamicSheet.update(e)}function it(n){var e=n.state,t=n.theme,r=n.stylesOptions,o=n.stylesCreator;if(!r.disableGeneration){var a=kn(r.sheetsManager,o,t);a.refs-=1;var i=r.sheetsRegistry;0===a.refs&&(En(r.sheetsManager,o,t),r.jss.removeStyleSheet(a.staticSheet),i&&i.remove(a.staticSheet)),e.dynamicSheet&&(r.jss.removeStyleSheet(e.dynamicSheet),i&&i.remove(e.dynamicSheet))}}function lt(n,e){var t,r=a.useRef([]),o=a.useMemo((function(){return{}}),e);r.current!==o&&(r.current=o,t=n()),a.useEffect((function(){return function(){t&&t()}}),[o])}function st(n){var e=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},t=e.name,i=e.classNamePrefix,l=e.Component,s=e.defaultTheme,c=void 0===s?tt:s,u=(0,r.Z)(e,["name","classNamePrefix","Component","defaultTheme"]),d=et(n),f=t||i||"makeStyles";d.options={index:Je(),name:t,meta:f,classNamePrefix:f};var p=function(){var n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},e=(0,Cn.Z)()||c,r=(0,o.Z)({},a.useContext(Xe),u),i=a.useRef(),s=a.useRef();lt((function(){var o={name:t,state:{},stylesCreator:d,stylesOptions:r,theme:e};return ot(o,n),s.current=!1,i.current=o,function(){it(o)}}),[e,d]),a.useEffect((function(){s.current&&at(i.current,n),s.current=!0}));var f=rt(i.current,n.classes,l);return f};return p}},5835:(n,e,t)=>{"use strict";t.d(e,{Z:()=>o});var r=t(2122);function o(){var n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},e=n.baseClasses,t=n.newClasses;if(n.Component,!t)return e;var o=(0,r.Z)({},e);return Object.keys(t).forEach((function(n){t[n]&&(o[n]="".concat(e[n]," ").concat(t[n]))})),o}},5959:(n,e,t)=>{"use strict";t.d(e,{Z:()=>a});var r=t(7294);const o=r.createContext(null);function a(){return r.useContext(o)}},5953:(n,e,t)=>{"use strict";t.d(e,{Z:()=>i});var r=t(2122),o=t(484);function a(n){return n&&"object"===(0,o.Z)(n)&&n.constructor===Object}function i(n,e){var t=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{clone:!0},o=t.clone?(0,r.Z)({},n):n;return a(n)&&a(e)&&Object.keys(e).forEach((function(r){"__proto__"!==r&&(a(e[r])&&r in n?o[r]=i(n[r],e[r],t):o[r]=e[r])})),o}},288:(n,e,t)=>{"use strict";function r(n){for(var e="https://material-ui.com/production-error/?code="+n,t=1;tr})},4184:(n,e)=>{var t;!function(){"use strict";var r={}.hasOwnProperty;function o(){for(var n=[],e=0;e{"use strict";function r(n){var e,t,o="";if("string"==typeof n||"number"==typeof n)o+=n;else if("object"==typeof n)if(Array.isArray(n))for(e=0;eo})},5346:(n,e,t)=>{"use strict";t.d(e,{Z:()=>a});var r=t(3645),o=t.n(r)()((function(n){return n[1]}));o.push([n.id,"/* stylelint-disable at-rule-empty-line-before,at-rule-name-space-after,at-rule-no-unknown */\n/* stylelint-disable no-duplicate-selectors */\n/* stylelint-disable */\n/* stylelint-disable declaration-bang-space-before,no-duplicate-selectors,string-no-newline */\n.ant-btn {\n line-height: 1.5715;\n position: relative;\n display: inline-block;\n font-weight: 400;\n white-space: nowrap;\n text-align: center;\n background-image: none;\n border: 1px solid transparent;\n box-shadow: 0 2px 0 rgba(0, 0, 0, 0.015);\n cursor: pointer;\n transition: all 0.3s cubic-bezier(0.645, 0.045, 0.355, 1);\n -webkit-user-select: none;\n -moz-user-select: none;\n -ms-user-select: none;\n user-select: none;\n touch-action: manipulation;\n height: 32px;\n padding: 4px 15px;\n font-size: 14px;\n border-radius: 2px;\n color: rgba(0, 0, 0, 0.85);\n background: #fff;\n border-color: #d9d9d9;\n}\n.ant-btn > .anticon {\n line-height: 1;\n}\n.ant-btn,\n.ant-btn:active,\n.ant-btn:focus {\n outline: 0;\n}\n.ant-btn:not([disabled]):hover {\n text-decoration: none;\n}\n.ant-btn:not([disabled]):active {\n outline: 0;\n box-shadow: none;\n}\n.ant-btn[disabled] {\n cursor: not-allowed;\n}\n.ant-btn[disabled] > * {\n pointer-events: none;\n}\n.ant-btn-lg {\n height: 40px;\n padding: 6.4px 15px;\n font-size: 16px;\n border-radius: 2px;\n}\n.ant-btn-sm {\n height: 24px;\n padding: 0px 7px;\n font-size: 14px;\n border-radius: 2px;\n}\n.ant-btn > a:only-child {\n color: currentColor;\n}\n.ant-btn > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn:hover,\n.ant-btn:focus {\n color: #40a9ff;\n background: #fff;\n border-color: #40a9ff;\n}\n.ant-btn:hover > a:only-child,\n.ant-btn:focus > a:only-child {\n color: currentColor;\n}\n.ant-btn:hover > a:only-child::after,\n.ant-btn:focus > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn:active {\n color: #096dd9;\n background: #fff;\n border-color: #096dd9;\n}\n.ant-btn:active > a:only-child {\n color: currentColor;\n}\n.ant-btn:active > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn[disabled],\n.ant-btn[disabled]:hover,\n.ant-btn[disabled]:focus,\n.ant-btn[disabled]:active {\n color: rgba(0, 0, 0, 0.25);\n background: #f5f5f5;\n border-color: #d9d9d9;\n text-shadow: none;\n box-shadow: none;\n}\n.ant-btn[disabled] > a:only-child,\n.ant-btn[disabled]:hover > a:only-child,\n.ant-btn[disabled]:focus > a:only-child,\n.ant-btn[disabled]:active > a:only-child {\n color: currentColor;\n}\n.ant-btn[disabled] > a:only-child::after,\n.ant-btn[disabled]:hover > a:only-child::after,\n.ant-btn[disabled]:focus > a:only-child::after,\n.ant-btn[disabled]:active > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn:hover,\n.ant-btn:focus,\n.ant-btn:active {\n text-decoration: none;\n background: #fff;\n}\n.ant-btn > span {\n display: inline-block;\n}\n.ant-btn-primary {\n color: #fff;\n background: #1890ff;\n border-color: #1890ff;\n text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.12);\n box-shadow: 0 2px 0 rgba(0, 0, 0, 0.045);\n}\n.ant-btn-primary > a:only-child {\n color: currentColor;\n}\n.ant-btn-primary > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-primary:hover,\n.ant-btn-primary:focus {\n color: #fff;\n background: #40a9ff;\n border-color: #40a9ff;\n}\n.ant-btn-primary:hover > a:only-child,\n.ant-btn-primary:focus > a:only-child {\n color: currentColor;\n}\n.ant-btn-primary:hover > a:only-child::after,\n.ant-btn-primary:focus > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-primary:active {\n color: #fff;\n background: #096dd9;\n border-color: #096dd9;\n}\n.ant-btn-primary:active > a:only-child {\n color: currentColor;\n}\n.ant-btn-primary:active > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-primary[disabled],\n.ant-btn-primary[disabled]:hover,\n.ant-btn-primary[disabled]:focus,\n.ant-btn-primary[disabled]:active {\n color: rgba(0, 0, 0, 0.25);\n background: #f5f5f5;\n border-color: #d9d9d9;\n text-shadow: none;\n box-shadow: none;\n}\n.ant-btn-primary[disabled] > a:only-child,\n.ant-btn-primary[disabled]:hover > a:only-child,\n.ant-btn-primary[disabled]:focus > a:only-child,\n.ant-btn-primary[disabled]:active > a:only-child {\n color: currentColor;\n}\n.ant-btn-primary[disabled] > a:only-child::after,\n.ant-btn-primary[disabled]:hover > a:only-child::after,\n.ant-btn-primary[disabled]:focus > a:only-child::after,\n.ant-btn-primary[disabled]:active > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-group .ant-btn-primary:not(:first-child):not(:last-child) {\n border-right-color: #40a9ff;\n border-left-color: #40a9ff;\n}\n.ant-btn-group .ant-btn-primary:not(:first-child):not(:last-child):disabled {\n border-color: #d9d9d9;\n}\n.ant-btn-group .ant-btn-primary:first-child:not(:last-child) {\n border-right-color: #40a9ff;\n}\n.ant-btn-group .ant-btn-primary:first-child:not(:last-child)[disabled] {\n border-right-color: #d9d9d9;\n}\n.ant-btn-group .ant-btn-primary:last-child:not(:first-child),\n.ant-btn-group .ant-btn-primary + .ant-btn-primary {\n border-left-color: #40a9ff;\n}\n.ant-btn-group .ant-btn-primary:last-child:not(:first-child)[disabled],\n.ant-btn-group .ant-btn-primary + .ant-btn-primary[disabled] {\n border-left-color: #d9d9d9;\n}\n.ant-btn-ghost {\n color: rgba(0, 0, 0, 0.85);\n background: transparent;\n border-color: #d9d9d9;\n}\n.ant-btn-ghost > a:only-child {\n color: currentColor;\n}\n.ant-btn-ghost > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-ghost:hover,\n.ant-btn-ghost:focus {\n color: #40a9ff;\n background: transparent;\n border-color: #40a9ff;\n}\n.ant-btn-ghost:hover > a:only-child,\n.ant-btn-ghost:focus > a:only-child {\n color: currentColor;\n}\n.ant-btn-ghost:hover > a:only-child::after,\n.ant-btn-ghost:focus > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-ghost:active {\n color: #096dd9;\n background: transparent;\n border-color: #096dd9;\n}\n.ant-btn-ghost:active > a:only-child {\n color: currentColor;\n}\n.ant-btn-ghost:active > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-ghost[disabled],\n.ant-btn-ghost[disabled]:hover,\n.ant-btn-ghost[disabled]:focus,\n.ant-btn-ghost[disabled]:active {\n color: rgba(0, 0, 0, 0.25);\n background: #f5f5f5;\n border-color: #d9d9d9;\n text-shadow: none;\n box-shadow: none;\n}\n.ant-btn-ghost[disabled] > a:only-child,\n.ant-btn-ghost[disabled]:hover > a:only-child,\n.ant-btn-ghost[disabled]:focus > a:only-child,\n.ant-btn-ghost[disabled]:active > a:only-child {\n color: currentColor;\n}\n.ant-btn-ghost[disabled] > a:only-child::after,\n.ant-btn-ghost[disabled]:hover > a:only-child::after,\n.ant-btn-ghost[disabled]:focus > a:only-child::after,\n.ant-btn-ghost[disabled]:active > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-dashed {\n color: rgba(0, 0, 0, 0.85);\n background: #fff;\n border-color: #d9d9d9;\n border-style: dashed;\n}\n.ant-btn-dashed > a:only-child {\n color: currentColor;\n}\n.ant-btn-dashed > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-dashed:hover,\n.ant-btn-dashed:focus {\n color: #40a9ff;\n background: #fff;\n border-color: #40a9ff;\n}\n.ant-btn-dashed:hover > a:only-child,\n.ant-btn-dashed:focus > a:only-child {\n color: currentColor;\n}\n.ant-btn-dashed:hover > a:only-child::after,\n.ant-btn-dashed:focus > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-dashed:active {\n color: #096dd9;\n background: #fff;\n border-color: #096dd9;\n}\n.ant-btn-dashed:active > a:only-child {\n color: currentColor;\n}\n.ant-btn-dashed:active > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-dashed[disabled],\n.ant-btn-dashed[disabled]:hover,\n.ant-btn-dashed[disabled]:focus,\n.ant-btn-dashed[disabled]:active {\n color: rgba(0, 0, 0, 0.25);\n background: #f5f5f5;\n border-color: #d9d9d9;\n text-shadow: none;\n box-shadow: none;\n}\n.ant-btn-dashed[disabled] > a:only-child,\n.ant-btn-dashed[disabled]:hover > a:only-child,\n.ant-btn-dashed[disabled]:focus > a:only-child,\n.ant-btn-dashed[disabled]:active > a:only-child {\n color: currentColor;\n}\n.ant-btn-dashed[disabled] > a:only-child::after,\n.ant-btn-dashed[disabled]:hover > a:only-child::after,\n.ant-btn-dashed[disabled]:focus > a:only-child::after,\n.ant-btn-dashed[disabled]:active > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-danger {\n color: #fff;\n background: #ff4d4f;\n border-color: #ff4d4f;\n text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.12);\n box-shadow: 0 2px 0 rgba(0, 0, 0, 0.045);\n}\n.ant-btn-danger > a:only-child {\n color: currentColor;\n}\n.ant-btn-danger > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-danger:hover,\n.ant-btn-danger:focus {\n color: #fff;\n background: #ff7875;\n border-color: #ff7875;\n}\n.ant-btn-danger:hover > a:only-child,\n.ant-btn-danger:focus > a:only-child {\n color: currentColor;\n}\n.ant-btn-danger:hover > a:only-child::after,\n.ant-btn-danger:focus > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-danger:active {\n color: #fff;\n background: #d9363e;\n border-color: #d9363e;\n}\n.ant-btn-danger:active > a:only-child {\n color: currentColor;\n}\n.ant-btn-danger:active > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-danger[disabled],\n.ant-btn-danger[disabled]:hover,\n.ant-btn-danger[disabled]:focus,\n.ant-btn-danger[disabled]:active {\n color: rgba(0, 0, 0, 0.25);\n background: #f5f5f5;\n border-color: #d9d9d9;\n text-shadow: none;\n box-shadow: none;\n}\n.ant-btn-danger[disabled] > a:only-child,\n.ant-btn-danger[disabled]:hover > a:only-child,\n.ant-btn-danger[disabled]:focus > a:only-child,\n.ant-btn-danger[disabled]:active > a:only-child {\n color: currentColor;\n}\n.ant-btn-danger[disabled] > a:only-child::after,\n.ant-btn-danger[disabled]:hover > a:only-child::after,\n.ant-btn-danger[disabled]:focus > a:only-child::after,\n.ant-btn-danger[disabled]:active > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-link {\n color: #1890ff;\n background: transparent;\n border-color: transparent;\n box-shadow: none;\n}\n.ant-btn-link > a:only-child {\n color: currentColor;\n}\n.ant-btn-link > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-link:hover,\n.ant-btn-link:focus {\n color: #40a9ff;\n background: transparent;\n border-color: #40a9ff;\n}\n.ant-btn-link:hover > a:only-child,\n.ant-btn-link:focus > a:only-child {\n color: currentColor;\n}\n.ant-btn-link:hover > a:only-child::after,\n.ant-btn-link:focus > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-link:active {\n color: #096dd9;\n background: transparent;\n border-color: #096dd9;\n}\n.ant-btn-link:active > a:only-child {\n color: currentColor;\n}\n.ant-btn-link:active > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-link[disabled],\n.ant-btn-link[disabled]:hover,\n.ant-btn-link[disabled]:focus,\n.ant-btn-link[disabled]:active {\n color: rgba(0, 0, 0, 0.25);\n background: #f5f5f5;\n border-color: #d9d9d9;\n text-shadow: none;\n box-shadow: none;\n}\n.ant-btn-link[disabled] > a:only-child,\n.ant-btn-link[disabled]:hover > a:only-child,\n.ant-btn-link[disabled]:focus > a:only-child,\n.ant-btn-link[disabled]:active > a:only-child {\n color: currentColor;\n}\n.ant-btn-link[disabled] > a:only-child::after,\n.ant-btn-link[disabled]:hover > a:only-child::after,\n.ant-btn-link[disabled]:focus > a:only-child::after,\n.ant-btn-link[disabled]:active > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-link:hover {\n background: transparent;\n}\n.ant-btn-link:hover,\n.ant-btn-link:focus,\n.ant-btn-link:active {\n border-color: transparent;\n}\n.ant-btn-link[disabled],\n.ant-btn-link[disabled]:hover,\n.ant-btn-link[disabled]:focus,\n.ant-btn-link[disabled]:active {\n color: rgba(0, 0, 0, 0.25);\n background: transparent;\n border-color: transparent;\n text-shadow: none;\n box-shadow: none;\n}\n.ant-btn-link[disabled] > a:only-child,\n.ant-btn-link[disabled]:hover > a:only-child,\n.ant-btn-link[disabled]:focus > a:only-child,\n.ant-btn-link[disabled]:active > a:only-child {\n color: currentColor;\n}\n.ant-btn-link[disabled] > a:only-child::after,\n.ant-btn-link[disabled]:hover > a:only-child::after,\n.ant-btn-link[disabled]:focus > a:only-child::after,\n.ant-btn-link[disabled]:active > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-text {\n color: rgba(0, 0, 0, 0.85);\n background: transparent;\n border-color: transparent;\n box-shadow: none;\n}\n.ant-btn-text > a:only-child {\n color: currentColor;\n}\n.ant-btn-text > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-text:hover,\n.ant-btn-text:focus {\n color: #40a9ff;\n background: transparent;\n border-color: #40a9ff;\n}\n.ant-btn-text:hover > a:only-child,\n.ant-btn-text:focus > a:only-child {\n color: currentColor;\n}\n.ant-btn-text:hover > a:only-child::after,\n.ant-btn-text:focus > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-text:active {\n color: #096dd9;\n background: transparent;\n border-color: #096dd9;\n}\n.ant-btn-text:active > a:only-child {\n color: currentColor;\n}\n.ant-btn-text:active > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-text[disabled],\n.ant-btn-text[disabled]:hover,\n.ant-btn-text[disabled]:focus,\n.ant-btn-text[disabled]:active {\n color: rgba(0, 0, 0, 0.25);\n background: #f5f5f5;\n border-color: #d9d9d9;\n text-shadow: none;\n box-shadow: none;\n}\n.ant-btn-text[disabled] > a:only-child,\n.ant-btn-text[disabled]:hover > a:only-child,\n.ant-btn-text[disabled]:focus > a:only-child,\n.ant-btn-text[disabled]:active > a:only-child {\n color: currentColor;\n}\n.ant-btn-text[disabled] > a:only-child::after,\n.ant-btn-text[disabled]:hover > a:only-child::after,\n.ant-btn-text[disabled]:focus > a:only-child::after,\n.ant-btn-text[disabled]:active > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-text:hover,\n.ant-btn-text:focus {\n color: rgba(0, 0, 0, 0.85);\n background: rgba(0, 0, 0, 0.018);\n border-color: transparent;\n}\n.ant-btn-text:active {\n color: rgba(0, 0, 0, 0.85);\n background: rgba(0, 0, 0, 0.028);\n border-color: transparent;\n}\n.ant-btn-text[disabled],\n.ant-btn-text[disabled]:hover,\n.ant-btn-text[disabled]:focus,\n.ant-btn-text[disabled]:active {\n color: rgba(0, 0, 0, 0.25);\n background: transparent;\n border-color: transparent;\n text-shadow: none;\n box-shadow: none;\n}\n.ant-btn-text[disabled] > a:only-child,\n.ant-btn-text[disabled]:hover > a:only-child,\n.ant-btn-text[disabled]:focus > a:only-child,\n.ant-btn-text[disabled]:active > a:only-child {\n color: currentColor;\n}\n.ant-btn-text[disabled] > a:only-child::after,\n.ant-btn-text[disabled]:hover > a:only-child::after,\n.ant-btn-text[disabled]:focus > a:only-child::after,\n.ant-btn-text[disabled]:active > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-dangerous {\n color: #ff4d4f;\n background: #fff;\n border-color: #ff4d4f;\n}\n.ant-btn-dangerous > a:only-child {\n color: currentColor;\n}\n.ant-btn-dangerous > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-dangerous:hover,\n.ant-btn-dangerous:focus {\n color: #ff7875;\n background: #fff;\n border-color: #ff7875;\n}\n.ant-btn-dangerous:hover > a:only-child,\n.ant-btn-dangerous:focus > a:only-child {\n color: currentColor;\n}\n.ant-btn-dangerous:hover > a:only-child::after,\n.ant-btn-dangerous:focus > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-dangerous:active {\n color: #d9363e;\n background: #fff;\n border-color: #d9363e;\n}\n.ant-btn-dangerous:active > a:only-child {\n color: currentColor;\n}\n.ant-btn-dangerous:active > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-dangerous[disabled],\n.ant-btn-dangerous[disabled]:hover,\n.ant-btn-dangerous[disabled]:focus,\n.ant-btn-dangerous[disabled]:active {\n color: rgba(0, 0, 0, 0.25);\n background: #f5f5f5;\n border-color: #d9d9d9;\n text-shadow: none;\n box-shadow: none;\n}\n.ant-btn-dangerous[disabled] > a:only-child,\n.ant-btn-dangerous[disabled]:hover > a:only-child,\n.ant-btn-dangerous[disabled]:focus > a:only-child,\n.ant-btn-dangerous[disabled]:active > a:only-child {\n color: currentColor;\n}\n.ant-btn-dangerous[disabled] > a:only-child::after,\n.ant-btn-dangerous[disabled]:hover > a:only-child::after,\n.ant-btn-dangerous[disabled]:focus > a:only-child::after,\n.ant-btn-dangerous[disabled]:active > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-dangerous.ant-btn-primary {\n color: #fff;\n background: #ff4d4f;\n border-color: #ff4d4f;\n text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.12);\n box-shadow: 0 2px 0 rgba(0, 0, 0, 0.045);\n}\n.ant-btn-dangerous.ant-btn-primary > a:only-child {\n color: currentColor;\n}\n.ant-btn-dangerous.ant-btn-primary > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-dangerous.ant-btn-primary:hover,\n.ant-btn-dangerous.ant-btn-primary:focus {\n color: #fff;\n background: #ff7875;\n border-color: #ff7875;\n}\n.ant-btn-dangerous.ant-btn-primary:hover > a:only-child,\n.ant-btn-dangerous.ant-btn-primary:focus > a:only-child {\n color: currentColor;\n}\n.ant-btn-dangerous.ant-btn-primary:hover > a:only-child::after,\n.ant-btn-dangerous.ant-btn-primary:focus > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-dangerous.ant-btn-primary:active {\n color: #fff;\n background: #d9363e;\n border-color: #d9363e;\n}\n.ant-btn-dangerous.ant-btn-primary:active > a:only-child {\n color: currentColor;\n}\n.ant-btn-dangerous.ant-btn-primary:active > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-dangerous.ant-btn-primary[disabled],\n.ant-btn-dangerous.ant-btn-primary[disabled]:hover,\n.ant-btn-dangerous.ant-btn-primary[disabled]:focus,\n.ant-btn-dangerous.ant-btn-primary[disabled]:active {\n color: rgba(0, 0, 0, 0.25);\n background: #f5f5f5;\n border-color: #d9d9d9;\n text-shadow: none;\n box-shadow: none;\n}\n.ant-btn-dangerous.ant-btn-primary[disabled] > a:only-child,\n.ant-btn-dangerous.ant-btn-primary[disabled]:hover > a:only-child,\n.ant-btn-dangerous.ant-btn-primary[disabled]:focus > a:only-child,\n.ant-btn-dangerous.ant-btn-primary[disabled]:active > a:only-child {\n color: currentColor;\n}\n.ant-btn-dangerous.ant-btn-primary[disabled] > a:only-child::after,\n.ant-btn-dangerous.ant-btn-primary[disabled]:hover > a:only-child::after,\n.ant-btn-dangerous.ant-btn-primary[disabled]:focus > a:only-child::after,\n.ant-btn-dangerous.ant-btn-primary[disabled]:active > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-dangerous.ant-btn-link {\n color: #ff4d4f;\n background: transparent;\n border-color: transparent;\n box-shadow: none;\n}\n.ant-btn-dangerous.ant-btn-link > a:only-child {\n color: currentColor;\n}\n.ant-btn-dangerous.ant-btn-link > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-dangerous.ant-btn-link:hover,\n.ant-btn-dangerous.ant-btn-link:focus {\n color: #40a9ff;\n background: transparent;\n border-color: #40a9ff;\n}\n.ant-btn-dangerous.ant-btn-link:hover > a:only-child,\n.ant-btn-dangerous.ant-btn-link:focus > a:only-child {\n color: currentColor;\n}\n.ant-btn-dangerous.ant-btn-link:hover > a:only-child::after,\n.ant-btn-dangerous.ant-btn-link:focus > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-dangerous.ant-btn-link:active {\n color: #096dd9;\n background: transparent;\n border-color: #096dd9;\n}\n.ant-btn-dangerous.ant-btn-link:active > a:only-child {\n color: currentColor;\n}\n.ant-btn-dangerous.ant-btn-link:active > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-dangerous.ant-btn-link[disabled],\n.ant-btn-dangerous.ant-btn-link[disabled]:hover,\n.ant-btn-dangerous.ant-btn-link[disabled]:focus,\n.ant-btn-dangerous.ant-btn-link[disabled]:active {\n color: rgba(0, 0, 0, 0.25);\n background: #f5f5f5;\n border-color: #d9d9d9;\n text-shadow: none;\n box-shadow: none;\n}\n.ant-btn-dangerous.ant-btn-link[disabled] > a:only-child,\n.ant-btn-dangerous.ant-btn-link[disabled]:hover > a:only-child,\n.ant-btn-dangerous.ant-btn-link[disabled]:focus > a:only-child,\n.ant-btn-dangerous.ant-btn-link[disabled]:active > a:only-child {\n color: currentColor;\n}\n.ant-btn-dangerous.ant-btn-link[disabled] > a:only-child::after,\n.ant-btn-dangerous.ant-btn-link[disabled]:hover > a:only-child::after,\n.ant-btn-dangerous.ant-btn-link[disabled]:focus > a:only-child::after,\n.ant-btn-dangerous.ant-btn-link[disabled]:active > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-dangerous.ant-btn-link:hover,\n.ant-btn-dangerous.ant-btn-link:focus {\n color: #ff7875;\n background: transparent;\n border-color: transparent;\n}\n.ant-btn-dangerous.ant-btn-link:hover > a:only-child,\n.ant-btn-dangerous.ant-btn-link:focus > a:only-child {\n color: currentColor;\n}\n.ant-btn-dangerous.ant-btn-link:hover > a:only-child::after,\n.ant-btn-dangerous.ant-btn-link:focus > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-dangerous.ant-btn-link:active {\n color: #d9363e;\n background: transparent;\n border-color: transparent;\n}\n.ant-btn-dangerous.ant-btn-link:active > a:only-child {\n color: currentColor;\n}\n.ant-btn-dangerous.ant-btn-link:active > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-dangerous.ant-btn-link[disabled],\n.ant-btn-dangerous.ant-btn-link[disabled]:hover,\n.ant-btn-dangerous.ant-btn-link[disabled]:focus,\n.ant-btn-dangerous.ant-btn-link[disabled]:active {\n color: rgba(0, 0, 0, 0.25);\n background: transparent;\n border-color: transparent;\n text-shadow: none;\n box-shadow: none;\n}\n.ant-btn-dangerous.ant-btn-link[disabled] > a:only-child,\n.ant-btn-dangerous.ant-btn-link[disabled]:hover > a:only-child,\n.ant-btn-dangerous.ant-btn-link[disabled]:focus > a:only-child,\n.ant-btn-dangerous.ant-btn-link[disabled]:active > a:only-child {\n color: currentColor;\n}\n.ant-btn-dangerous.ant-btn-link[disabled] > a:only-child::after,\n.ant-btn-dangerous.ant-btn-link[disabled]:hover > a:only-child::after,\n.ant-btn-dangerous.ant-btn-link[disabled]:focus > a:only-child::after,\n.ant-btn-dangerous.ant-btn-link[disabled]:active > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-dangerous.ant-btn-text {\n color: #ff4d4f;\n background: transparent;\n border-color: transparent;\n box-shadow: none;\n}\n.ant-btn-dangerous.ant-btn-text > a:only-child {\n color: currentColor;\n}\n.ant-btn-dangerous.ant-btn-text > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-dangerous.ant-btn-text:hover,\n.ant-btn-dangerous.ant-btn-text:focus {\n color: #40a9ff;\n background: transparent;\n border-color: #40a9ff;\n}\n.ant-btn-dangerous.ant-btn-text:hover > a:only-child,\n.ant-btn-dangerous.ant-btn-text:focus > a:only-child {\n color: currentColor;\n}\n.ant-btn-dangerous.ant-btn-text:hover > a:only-child::after,\n.ant-btn-dangerous.ant-btn-text:focus > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-dangerous.ant-btn-text:active {\n color: #096dd9;\n background: transparent;\n border-color: #096dd9;\n}\n.ant-btn-dangerous.ant-btn-text:active > a:only-child {\n color: currentColor;\n}\n.ant-btn-dangerous.ant-btn-text:active > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-dangerous.ant-btn-text[disabled],\n.ant-btn-dangerous.ant-btn-text[disabled]:hover,\n.ant-btn-dangerous.ant-btn-text[disabled]:focus,\n.ant-btn-dangerous.ant-btn-text[disabled]:active {\n color: rgba(0, 0, 0, 0.25);\n background: #f5f5f5;\n border-color: #d9d9d9;\n text-shadow: none;\n box-shadow: none;\n}\n.ant-btn-dangerous.ant-btn-text[disabled] > a:only-child,\n.ant-btn-dangerous.ant-btn-text[disabled]:hover > a:only-child,\n.ant-btn-dangerous.ant-btn-text[disabled]:focus > a:only-child,\n.ant-btn-dangerous.ant-btn-text[disabled]:active > a:only-child {\n color: currentColor;\n}\n.ant-btn-dangerous.ant-btn-text[disabled] > a:only-child::after,\n.ant-btn-dangerous.ant-btn-text[disabled]:hover > a:only-child::after,\n.ant-btn-dangerous.ant-btn-text[disabled]:focus > a:only-child::after,\n.ant-btn-dangerous.ant-btn-text[disabled]:active > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-dangerous.ant-btn-text:hover,\n.ant-btn-dangerous.ant-btn-text:focus {\n color: #ff7875;\n background: rgba(0, 0, 0, 0.018);\n border-color: transparent;\n}\n.ant-btn-dangerous.ant-btn-text:hover > a:only-child,\n.ant-btn-dangerous.ant-btn-text:focus > a:only-child {\n color: currentColor;\n}\n.ant-btn-dangerous.ant-btn-text:hover > a:only-child::after,\n.ant-btn-dangerous.ant-btn-text:focus > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-dangerous.ant-btn-text:active {\n color: #d9363e;\n background: rgba(0, 0, 0, 0.028);\n border-color: transparent;\n}\n.ant-btn-dangerous.ant-btn-text:active > a:only-child {\n color: currentColor;\n}\n.ant-btn-dangerous.ant-btn-text:active > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-dangerous.ant-btn-text[disabled],\n.ant-btn-dangerous.ant-btn-text[disabled]:hover,\n.ant-btn-dangerous.ant-btn-text[disabled]:focus,\n.ant-btn-dangerous.ant-btn-text[disabled]:active {\n color: rgba(0, 0, 0, 0.25);\n background: transparent;\n border-color: transparent;\n text-shadow: none;\n box-shadow: none;\n}\n.ant-btn-dangerous.ant-btn-text[disabled] > a:only-child,\n.ant-btn-dangerous.ant-btn-text[disabled]:hover > a:only-child,\n.ant-btn-dangerous.ant-btn-text[disabled]:focus > a:only-child,\n.ant-btn-dangerous.ant-btn-text[disabled]:active > a:only-child {\n color: currentColor;\n}\n.ant-btn-dangerous.ant-btn-text[disabled] > a:only-child::after,\n.ant-btn-dangerous.ant-btn-text[disabled]:hover > a:only-child::after,\n.ant-btn-dangerous.ant-btn-text[disabled]:focus > a:only-child::after,\n.ant-btn-dangerous.ant-btn-text[disabled]:active > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-icon-only {\n width: 32px;\n height: 32px;\n padding: 2.4px 0;\n font-size: 16px;\n border-radius: 2px;\n vertical-align: -1px;\n}\n.ant-btn-icon-only > * {\n font-size: 16px;\n}\n.ant-btn-icon-only.ant-btn-lg {\n width: 40px;\n height: 40px;\n padding: 4.9px 0;\n font-size: 18px;\n border-radius: 2px;\n}\n.ant-btn-icon-only.ant-btn-lg > * {\n font-size: 18px;\n}\n.ant-btn-icon-only.ant-btn-sm {\n width: 24px;\n height: 24px;\n padding: 0px 0;\n font-size: 14px;\n border-radius: 2px;\n}\n.ant-btn-icon-only.ant-btn-sm > * {\n font-size: 14px;\n}\n.ant-btn-round {\n height: 32px;\n padding: 4px 16px;\n font-size: 14px;\n border-radius: 32px;\n}\n.ant-btn-round.ant-btn-lg {\n height: 40px;\n padding: 6.4px 20px;\n font-size: 16px;\n border-radius: 40px;\n}\n.ant-btn-round.ant-btn-sm {\n height: 24px;\n padding: 0px 12px;\n font-size: 14px;\n border-radius: 24px;\n}\n.ant-btn-round.ant-btn-icon-only {\n width: auto;\n}\n.ant-btn-circle {\n min-width: 32px;\n padding-right: 0;\n padding-left: 0;\n text-align: center;\n border-radius: 50%;\n}\n.ant-btn-circle.ant-btn-lg {\n min-width: 40px;\n border-radius: 50%;\n}\n.ant-btn-circle.ant-btn-sm {\n min-width: 24px;\n border-radius: 50%;\n}\n.ant-btn::before {\n position: absolute;\n top: -1px;\n right: -1px;\n bottom: -1px;\n left: -1px;\n z-index: 1;\n display: none;\n background: #fff;\n border-radius: inherit;\n opacity: 0.35;\n transition: opacity 0.2s;\n content: '';\n pointer-events: none;\n}\n.ant-btn .anticon {\n transition: margin-left 0.3s cubic-bezier(0.645, 0.045, 0.355, 1);\n}\n.ant-btn .anticon.anticon-plus > svg,\n.ant-btn .anticon.anticon-minus > svg {\n shape-rendering: optimizeSpeed;\n}\n.ant-btn.ant-btn-loading {\n position: relative;\n}\n.ant-btn.ant-btn-loading:not([disabled]) {\n pointer-events: none;\n}\n.ant-btn.ant-btn-loading::before {\n display: block;\n}\n.ant-btn > .ant-btn-loading-icon {\n transition: all 0.3s cubic-bezier(0.645, 0.045, 0.355, 1);\n}\n.ant-btn > .ant-btn-loading-icon .anticon {\n padding-right: 8px;\n -webkit-animation: none;\n animation: none;\n}\n.ant-btn > .ant-btn-loading-icon .anticon svg {\n -webkit-animation: loadingCircle 1s infinite linear;\n animation: loadingCircle 1s infinite linear;\n}\n.ant-btn > .ant-btn-loading-icon:only-child .anticon {\n padding-right: 0;\n}\n.ant-btn-group {\n position: relative;\n display: inline-flex;\n}\n.ant-btn-group > .ant-btn,\n.ant-btn-group > span > .ant-btn {\n position: relative;\n}\n.ant-btn-group > .ant-btn:hover,\n.ant-btn-group > span > .ant-btn:hover,\n.ant-btn-group > .ant-btn:focus,\n.ant-btn-group > span > .ant-btn:focus,\n.ant-btn-group > .ant-btn:active,\n.ant-btn-group > span > .ant-btn:active {\n z-index: 2;\n}\n.ant-btn-group > .ant-btn[disabled],\n.ant-btn-group > span > .ant-btn[disabled] {\n z-index: 0;\n}\n.ant-btn-group .ant-btn-icon-only {\n font-size: 14px;\n}\n.ant-btn-group-lg > .ant-btn,\n.ant-btn-group-lg > span > .ant-btn {\n height: 40px;\n padding: 6.4px 15px;\n font-size: 16px;\n border-radius: 0;\n}\n.ant-btn-group-lg .ant-btn.ant-btn-icon-only {\n width: 40px;\n height: 40px;\n padding-right: 0;\n padding-left: 0;\n}\n.ant-btn-group-sm > .ant-btn,\n.ant-btn-group-sm > span > .ant-btn {\n height: 24px;\n padding: 0px 7px;\n font-size: 14px;\n border-radius: 0;\n}\n.ant-btn-group-sm > .ant-btn > .anticon,\n.ant-btn-group-sm > span > .ant-btn > .anticon {\n font-size: 14px;\n}\n.ant-btn-group-sm .ant-btn.ant-btn-icon-only {\n width: 24px;\n height: 24px;\n padding-right: 0;\n padding-left: 0;\n}\n.ant-btn-group .ant-btn + .ant-btn,\n.ant-btn + .ant-btn-group,\n.ant-btn-group span + .ant-btn,\n.ant-btn-group .ant-btn + span,\n.ant-btn-group > span + span,\n.ant-btn-group + .ant-btn,\n.ant-btn-group + .ant-btn-group {\n margin-left: -1px;\n}\n.ant-btn-group .ant-btn-primary + .ant-btn:not(.ant-btn-primary):not([disabled]) {\n border-left-color: transparent;\n}\n.ant-btn-group .ant-btn {\n border-radius: 0;\n}\n.ant-btn-group > .ant-btn:first-child,\n.ant-btn-group > span:first-child > .ant-btn {\n margin-left: 0;\n}\n.ant-btn-group > .ant-btn:only-child {\n border-radius: 2px;\n}\n.ant-btn-group > span:only-child > .ant-btn {\n border-radius: 2px;\n}\n.ant-btn-group > .ant-btn:first-child:not(:last-child),\n.ant-btn-group > span:first-child:not(:last-child) > .ant-btn {\n border-top-left-radius: 2px;\n border-bottom-left-radius: 2px;\n}\n.ant-btn-group > .ant-btn:last-child:not(:first-child),\n.ant-btn-group > span:last-child:not(:first-child) > .ant-btn {\n border-top-right-radius: 2px;\n border-bottom-right-radius: 2px;\n}\n.ant-btn-group-sm > .ant-btn:only-child {\n border-radius: 2px;\n}\n.ant-btn-group-sm > span:only-child > .ant-btn {\n border-radius: 2px;\n}\n.ant-btn-group-sm > .ant-btn:first-child:not(:last-child),\n.ant-btn-group-sm > span:first-child:not(:last-child) > .ant-btn {\n border-top-left-radius: 2px;\n border-bottom-left-radius: 2px;\n}\n.ant-btn-group-sm > .ant-btn:last-child:not(:first-child),\n.ant-btn-group-sm > span:last-child:not(:first-child) > .ant-btn {\n border-top-right-radius: 2px;\n border-bottom-right-radius: 2px;\n}\n.ant-btn-group > .ant-btn-group {\n float: left;\n}\n.ant-btn-group > .ant-btn-group:not(:first-child):not(:last-child) > .ant-btn {\n border-radius: 0;\n}\n.ant-btn-group > .ant-btn-group:first-child:not(:last-child) > .ant-btn:last-child {\n padding-right: 8px;\n border-top-right-radius: 0;\n border-bottom-right-radius: 0;\n}\n.ant-btn-group > .ant-btn-group:last-child:not(:first-child) > .ant-btn:first-child {\n padding-left: 8px;\n border-top-left-radius: 0;\n border-bottom-left-radius: 0;\n}\n.ant-btn-rtl.ant-btn-group .ant-btn + .ant-btn,\n.ant-btn-rtl.ant-btn + .ant-btn-group,\n.ant-btn-rtl.ant-btn-group span + .ant-btn,\n.ant-btn-rtl.ant-btn-group .ant-btn + span,\n.ant-btn-rtl.ant-btn-group > span + span,\n.ant-btn-rtl.ant-btn-group + .ant-btn,\n.ant-btn-rtl.ant-btn-group + .ant-btn-group,\n.ant-btn-group-rtl.ant-btn-group .ant-btn + .ant-btn,\n.ant-btn-group-rtl.ant-btn + .ant-btn-group,\n.ant-btn-group-rtl.ant-btn-group span + .ant-btn,\n.ant-btn-group-rtl.ant-btn-group .ant-btn + span,\n.ant-btn-group-rtl.ant-btn-group > span + span,\n.ant-btn-group-rtl.ant-btn-group + .ant-btn,\n.ant-btn-group-rtl.ant-btn-group + .ant-btn-group {\n margin-right: -1px;\n margin-left: auto;\n}\n.ant-btn-group.ant-btn-group-rtl {\n direction: rtl;\n}\n.ant-btn-group-rtl.ant-btn-group > .ant-btn:first-child:not(:last-child),\n.ant-btn-group-rtl.ant-btn-group > span:first-child:not(:last-child) > .ant-btn {\n border-top-left-radius: 0;\n border-top-right-radius: 2px;\n border-bottom-right-radius: 2px;\n border-bottom-left-radius: 0;\n}\n.ant-btn-group-rtl.ant-btn-group > .ant-btn:last-child:not(:first-child),\n.ant-btn-group-rtl.ant-btn-group > span:last-child:not(:first-child) > .ant-btn {\n border-top-left-radius: 2px;\n border-top-right-radius: 0;\n border-bottom-right-radius: 0;\n border-bottom-left-radius: 2px;\n}\n.ant-btn-group-rtl.ant-btn-group-sm > .ant-btn:first-child:not(:last-child),\n.ant-btn-group-rtl.ant-btn-group-sm > span:first-child:not(:last-child) > .ant-btn {\n border-top-left-radius: 0;\n border-top-right-radius: 2px;\n border-bottom-right-radius: 2px;\n border-bottom-left-radius: 0;\n}\n.ant-btn-group-rtl.ant-btn-group-sm > .ant-btn:last-child:not(:first-child),\n.ant-btn-group-rtl.ant-btn-group-sm > span:last-child:not(:first-child) > .ant-btn {\n border-top-left-radius: 2px;\n border-top-right-radius: 0;\n border-bottom-right-radius: 0;\n border-bottom-left-radius: 2px;\n}\n.ant-btn:focus > span,\n.ant-btn:active > span {\n position: relative;\n}\n.ant-btn > .anticon + span,\n.ant-btn > span + .anticon {\n margin-left: 8px;\n}\n.ant-btn-background-ghost {\n color: #fff;\n background: transparent !important;\n border-color: #fff;\n}\n.ant-btn-background-ghost.ant-btn-primary {\n color: #1890ff;\n background: transparent;\n border-color: #1890ff;\n text-shadow: none;\n}\n.ant-btn-background-ghost.ant-btn-primary > a:only-child {\n color: currentColor;\n}\n.ant-btn-background-ghost.ant-btn-primary > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-background-ghost.ant-btn-primary:hover,\n.ant-btn-background-ghost.ant-btn-primary:focus {\n color: #40a9ff;\n background: transparent;\n border-color: #40a9ff;\n}\n.ant-btn-background-ghost.ant-btn-primary:hover > a:only-child,\n.ant-btn-background-ghost.ant-btn-primary:focus > a:only-child {\n color: currentColor;\n}\n.ant-btn-background-ghost.ant-btn-primary:hover > a:only-child::after,\n.ant-btn-background-ghost.ant-btn-primary:focus > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-background-ghost.ant-btn-primary:active {\n color: #096dd9;\n background: transparent;\n border-color: #096dd9;\n}\n.ant-btn-background-ghost.ant-btn-primary:active > a:only-child {\n color: currentColor;\n}\n.ant-btn-background-ghost.ant-btn-primary:active > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-background-ghost.ant-btn-primary[disabled],\n.ant-btn-background-ghost.ant-btn-primary[disabled]:hover,\n.ant-btn-background-ghost.ant-btn-primary[disabled]:focus,\n.ant-btn-background-ghost.ant-btn-primary[disabled]:active {\n color: rgba(0, 0, 0, 0.25);\n background: #f5f5f5;\n border-color: #d9d9d9;\n text-shadow: none;\n box-shadow: none;\n}\n.ant-btn-background-ghost.ant-btn-primary[disabled] > a:only-child,\n.ant-btn-background-ghost.ant-btn-primary[disabled]:hover > a:only-child,\n.ant-btn-background-ghost.ant-btn-primary[disabled]:focus > a:only-child,\n.ant-btn-background-ghost.ant-btn-primary[disabled]:active > a:only-child {\n color: currentColor;\n}\n.ant-btn-background-ghost.ant-btn-primary[disabled] > a:only-child::after,\n.ant-btn-background-ghost.ant-btn-primary[disabled]:hover > a:only-child::after,\n.ant-btn-background-ghost.ant-btn-primary[disabled]:focus > a:only-child::after,\n.ant-btn-background-ghost.ant-btn-primary[disabled]:active > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-background-ghost.ant-btn-danger {\n color: #ff4d4f;\n background: transparent;\n border-color: #ff4d4f;\n text-shadow: none;\n}\n.ant-btn-background-ghost.ant-btn-danger > a:only-child {\n color: currentColor;\n}\n.ant-btn-background-ghost.ant-btn-danger > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-background-ghost.ant-btn-danger:hover,\n.ant-btn-background-ghost.ant-btn-danger:focus {\n color: #ff7875;\n background: transparent;\n border-color: #ff7875;\n}\n.ant-btn-background-ghost.ant-btn-danger:hover > a:only-child,\n.ant-btn-background-ghost.ant-btn-danger:focus > a:only-child {\n color: currentColor;\n}\n.ant-btn-background-ghost.ant-btn-danger:hover > a:only-child::after,\n.ant-btn-background-ghost.ant-btn-danger:focus > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-background-ghost.ant-btn-danger:active {\n color: #d9363e;\n background: transparent;\n border-color: #d9363e;\n}\n.ant-btn-background-ghost.ant-btn-danger:active > a:only-child {\n color: currentColor;\n}\n.ant-btn-background-ghost.ant-btn-danger:active > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-background-ghost.ant-btn-danger[disabled],\n.ant-btn-background-ghost.ant-btn-danger[disabled]:hover,\n.ant-btn-background-ghost.ant-btn-danger[disabled]:focus,\n.ant-btn-background-ghost.ant-btn-danger[disabled]:active {\n color: rgba(0, 0, 0, 0.25);\n background: #f5f5f5;\n border-color: #d9d9d9;\n text-shadow: none;\n box-shadow: none;\n}\n.ant-btn-background-ghost.ant-btn-danger[disabled] > a:only-child,\n.ant-btn-background-ghost.ant-btn-danger[disabled]:hover > a:only-child,\n.ant-btn-background-ghost.ant-btn-danger[disabled]:focus > a:only-child,\n.ant-btn-background-ghost.ant-btn-danger[disabled]:active > a:only-child {\n color: currentColor;\n}\n.ant-btn-background-ghost.ant-btn-danger[disabled] > a:only-child::after,\n.ant-btn-background-ghost.ant-btn-danger[disabled]:hover > a:only-child::after,\n.ant-btn-background-ghost.ant-btn-danger[disabled]:focus > a:only-child::after,\n.ant-btn-background-ghost.ant-btn-danger[disabled]:active > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-background-ghost.ant-btn-dangerous {\n color: #ff4d4f;\n background: transparent;\n border-color: #ff4d4f;\n text-shadow: none;\n}\n.ant-btn-background-ghost.ant-btn-dangerous > a:only-child {\n color: currentColor;\n}\n.ant-btn-background-ghost.ant-btn-dangerous > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-background-ghost.ant-btn-dangerous:hover,\n.ant-btn-background-ghost.ant-btn-dangerous:focus {\n color: #ff7875;\n background: transparent;\n border-color: #ff7875;\n}\n.ant-btn-background-ghost.ant-btn-dangerous:hover > a:only-child,\n.ant-btn-background-ghost.ant-btn-dangerous:focus > a:only-child {\n color: currentColor;\n}\n.ant-btn-background-ghost.ant-btn-dangerous:hover > a:only-child::after,\n.ant-btn-background-ghost.ant-btn-dangerous:focus > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-background-ghost.ant-btn-dangerous:active {\n color: #d9363e;\n background: transparent;\n border-color: #d9363e;\n}\n.ant-btn-background-ghost.ant-btn-dangerous:active > a:only-child {\n color: currentColor;\n}\n.ant-btn-background-ghost.ant-btn-dangerous:active > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-background-ghost.ant-btn-dangerous[disabled],\n.ant-btn-background-ghost.ant-btn-dangerous[disabled]:hover,\n.ant-btn-background-ghost.ant-btn-dangerous[disabled]:focus,\n.ant-btn-background-ghost.ant-btn-dangerous[disabled]:active {\n color: rgba(0, 0, 0, 0.25);\n background: #f5f5f5;\n border-color: #d9d9d9;\n text-shadow: none;\n box-shadow: none;\n}\n.ant-btn-background-ghost.ant-btn-dangerous[disabled] > a:only-child,\n.ant-btn-background-ghost.ant-btn-dangerous[disabled]:hover > a:only-child,\n.ant-btn-background-ghost.ant-btn-dangerous[disabled]:focus > a:only-child,\n.ant-btn-background-ghost.ant-btn-dangerous[disabled]:active > a:only-child {\n color: currentColor;\n}\n.ant-btn-background-ghost.ant-btn-dangerous[disabled] > a:only-child::after,\n.ant-btn-background-ghost.ant-btn-dangerous[disabled]:hover > a:only-child::after,\n.ant-btn-background-ghost.ant-btn-dangerous[disabled]:focus > a:only-child::after,\n.ant-btn-background-ghost.ant-btn-dangerous[disabled]:active > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-background-ghost.ant-btn-dangerous.ant-btn-link {\n color: #ff4d4f;\n background: transparent;\n border-color: transparent;\n text-shadow: none;\n}\n.ant-btn-background-ghost.ant-btn-dangerous.ant-btn-link > a:only-child {\n color: currentColor;\n}\n.ant-btn-background-ghost.ant-btn-dangerous.ant-btn-link > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-background-ghost.ant-btn-dangerous.ant-btn-link:hover,\n.ant-btn-background-ghost.ant-btn-dangerous.ant-btn-link:focus {\n color: #ff7875;\n background: transparent;\n border-color: transparent;\n}\n.ant-btn-background-ghost.ant-btn-dangerous.ant-btn-link:hover > a:only-child,\n.ant-btn-background-ghost.ant-btn-dangerous.ant-btn-link:focus > a:only-child {\n color: currentColor;\n}\n.ant-btn-background-ghost.ant-btn-dangerous.ant-btn-link:hover > a:only-child::after,\n.ant-btn-background-ghost.ant-btn-dangerous.ant-btn-link:focus > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-background-ghost.ant-btn-dangerous.ant-btn-link:active {\n color: #d9363e;\n background: transparent;\n border-color: transparent;\n}\n.ant-btn-background-ghost.ant-btn-dangerous.ant-btn-link:active > a:only-child {\n color: currentColor;\n}\n.ant-btn-background-ghost.ant-btn-dangerous.ant-btn-link:active > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-background-ghost.ant-btn-dangerous.ant-btn-link[disabled],\n.ant-btn-background-ghost.ant-btn-dangerous.ant-btn-link[disabled]:hover,\n.ant-btn-background-ghost.ant-btn-dangerous.ant-btn-link[disabled]:focus,\n.ant-btn-background-ghost.ant-btn-dangerous.ant-btn-link[disabled]:active {\n color: rgba(0, 0, 0, 0.25);\n background: #f5f5f5;\n border-color: #d9d9d9;\n text-shadow: none;\n box-shadow: none;\n}\n.ant-btn-background-ghost.ant-btn-dangerous.ant-btn-link[disabled] > a:only-child,\n.ant-btn-background-ghost.ant-btn-dangerous.ant-btn-link[disabled]:hover > a:only-child,\n.ant-btn-background-ghost.ant-btn-dangerous.ant-btn-link[disabled]:focus > a:only-child,\n.ant-btn-background-ghost.ant-btn-dangerous.ant-btn-link[disabled]:active > a:only-child {\n color: currentColor;\n}\n.ant-btn-background-ghost.ant-btn-dangerous.ant-btn-link[disabled] > a:only-child::after,\n.ant-btn-background-ghost.ant-btn-dangerous.ant-btn-link[disabled]:hover > a:only-child::after,\n.ant-btn-background-ghost.ant-btn-dangerous.ant-btn-link[disabled]:focus > a:only-child::after,\n.ant-btn-background-ghost.ant-btn-dangerous.ant-btn-link[disabled]:active > a:only-child::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n background: transparent;\n content: '';\n}\n.ant-btn-two-chinese-chars::first-letter {\n letter-spacing: 0.34em;\n}\n.ant-btn-two-chinese-chars > *:not(.anticon) {\n margin-right: -0.34em;\n letter-spacing: 0.34em;\n}\n.ant-btn-block {\n width: 100%;\n}\n.ant-btn:empty {\n display: inline-block;\n width: 0;\n visibility: hidden;\n content: '\\a0';\n}\na.ant-btn {\n padding-top: 0.01px !important;\n line-height: 30px;\n}\na.ant-btn-lg {\n line-height: 38px;\n}\na.ant-btn-sm {\n line-height: 22px;\n}\n.ant-btn-rtl {\n direction: rtl;\n}\n.ant-btn-group-rtl.ant-btn-group .ant-btn-primary:last-child:not(:first-child),\n.ant-btn-group-rtl.ant-btn-group .ant-btn-primary + .ant-btn-primary {\n border-right-color: #40a9ff;\n border-left-color: #d9d9d9;\n}\n.ant-btn-group-rtl.ant-btn-group .ant-btn-primary:last-child:not(:first-child)[disabled],\n.ant-btn-group-rtl.ant-btn-group .ant-btn-primary + .ant-btn-primary[disabled] {\n border-right-color: #d9d9d9;\n border-left-color: #40a9ff;\n}\n.ant-btn-rtl.ant-btn > .ant-btn-loading-icon .anticon {\n padding-right: 0;\n padding-left: 8px;\n}\n.ant-btn > .ant-btn-loading-icon:only-child .anticon {\n padding-right: 0;\n padding-left: 0;\n}\n.ant-btn-rtl.ant-btn > .anticon + span,\n.ant-btn-rtl.ant-btn > span + .anticon {\n margin-right: 8px;\n margin-left: 0;\n}\n",""]);const a=o},4078:(n,e,t)=>{"use strict";t.d(e,{Z:()=>a});var r=t(3645),o=t.n(r)()((function(n){return n[1]}));o.push([n.id,"/* stylelint-disable at-rule-empty-line-before,at-rule-name-space-after,at-rule-no-unknown */\n/* stylelint-disable no-duplicate-selectors */\n/* stylelint-disable */\n/* stylelint-disable declaration-bang-space-before,no-duplicate-selectors,string-no-newline */\n@-webkit-keyframes antCheckboxEffect {\n 0% {\n transform: scale(1);\n opacity: 0.5;\n }\n 100% {\n transform: scale(1.6);\n opacity: 0;\n }\n}\n@keyframes antCheckboxEffect {\n 0% {\n transform: scale(1);\n opacity: 0.5;\n }\n 100% {\n transform: scale(1.6);\n opacity: 0;\n }\n}\n.ant-checkbox {\n box-sizing: border-box;\n margin: 0;\n padding: 0;\n color: rgba(0, 0, 0, 0.85);\n font-size: 14px;\n font-variant: tabular-nums;\n line-height: 1.5715;\n list-style: none;\n font-feature-settings: 'tnum';\n position: relative;\n top: 0.2em;\n line-height: 1;\n white-space: nowrap;\n outline: none;\n cursor: pointer;\n}\n.ant-checkbox-wrapper:hover .ant-checkbox-inner,\n.ant-checkbox:hover .ant-checkbox-inner,\n.ant-checkbox-input:focus + .ant-checkbox-inner {\n border-color: #1890ff;\n}\n.ant-checkbox-checked::after {\n position: absolute;\n top: 0;\n left: 0;\n width: 100%;\n height: 100%;\n border: 1px solid #1890ff;\n border-radius: 2px;\n visibility: hidden;\n -webkit-animation: antCheckboxEffect 0.36s ease-in-out;\n animation: antCheckboxEffect 0.36s ease-in-out;\n -webkit-animation-fill-mode: backwards;\n animation-fill-mode: backwards;\n content: '';\n}\n.ant-checkbox:hover::after,\n.ant-checkbox-wrapper:hover .ant-checkbox::after {\n visibility: visible;\n}\n.ant-checkbox-inner {\n position: relative;\n top: 0;\n left: 0;\n display: block;\n width: 16px;\n height: 16px;\n direction: ltr;\n background-color: #fff;\n border: 1px solid #d9d9d9;\n border-radius: 2px;\n border-collapse: separate;\n transition: all 0.3s;\n}\n.ant-checkbox-inner::after {\n position: absolute;\n top: 50%;\n left: 22%;\n display: table;\n width: 5.71428571px;\n height: 9.14285714px;\n border: 2px solid #fff;\n border-top: 0;\n border-left: 0;\n transform: rotate(45deg) scale(0) translate(-50%, -50%);\n opacity: 0;\n transition: all 0.1s cubic-bezier(0.71, -0.46, 0.88, 0.6), opacity 0.1s;\n content: ' ';\n}\n.ant-checkbox-input {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n z-index: 1;\n width: 100%;\n height: 100%;\n cursor: pointer;\n opacity: 0;\n}\n.ant-checkbox-checked .ant-checkbox-inner::after {\n position: absolute;\n display: table;\n border: 2px solid #fff;\n border-top: 0;\n border-left: 0;\n transform: rotate(45deg) scale(1) translate(-50%, -50%);\n opacity: 1;\n transition: all 0.2s cubic-bezier(0.12, 0.4, 0.29, 1.46) 0.1s;\n content: ' ';\n}\n.ant-checkbox-checked .ant-checkbox-inner {\n background-color: #1890ff;\n border-color: #1890ff;\n}\n.ant-checkbox-disabled {\n cursor: not-allowed;\n}\n.ant-checkbox-disabled.ant-checkbox-checked .ant-checkbox-inner::after {\n border-color: rgba(0, 0, 0, 0.25);\n -webkit-animation-name: none;\n animation-name: none;\n}\n.ant-checkbox-disabled .ant-checkbox-input {\n cursor: not-allowed;\n}\n.ant-checkbox-disabled .ant-checkbox-inner {\n background-color: #f5f5f5;\n border-color: #d9d9d9 !important;\n}\n.ant-checkbox-disabled .ant-checkbox-inner::after {\n border-color: #f5f5f5;\n border-collapse: separate;\n -webkit-animation-name: none;\n animation-name: none;\n}\n.ant-checkbox-disabled + span {\n color: rgba(0, 0, 0, 0.25);\n cursor: not-allowed;\n}\n.ant-checkbox-disabled:hover::after,\n.ant-checkbox-wrapper:hover .ant-checkbox-disabled::after {\n visibility: hidden;\n}\n.ant-checkbox-wrapper {\n box-sizing: border-box;\n margin: 0;\n padding: 0;\n color: rgba(0, 0, 0, 0.85);\n font-size: 14px;\n font-variant: tabular-nums;\n line-height: 1.5715;\n list-style: none;\n font-feature-settings: 'tnum';\n display: inline-flex;\n align-items: baseline;\n line-height: unset;\n cursor: pointer;\n}\n.ant-checkbox-wrapper.ant-checkbox-wrapper-disabled {\n cursor: not-allowed;\n}\n.ant-checkbox-wrapper + .ant-checkbox-wrapper {\n margin-left: 8px;\n}\n.ant-checkbox + span {\n padding-right: 8px;\n padding-left: 8px;\n}\n.ant-checkbox-group {\n box-sizing: border-box;\n margin: 0;\n padding: 0;\n color: rgba(0, 0, 0, 0.85);\n font-size: 14px;\n font-variant: tabular-nums;\n line-height: 1.5715;\n list-style: none;\n font-feature-settings: 'tnum';\n display: inline-block;\n}\n.ant-checkbox-group-item {\n margin-right: 8px;\n}\n.ant-checkbox-group-item:last-child {\n margin-right: 0;\n}\n.ant-checkbox-group-item + .ant-checkbox-group-item {\n margin-left: 0;\n}\n.ant-checkbox-indeterminate .ant-checkbox-inner {\n background-color: #fff;\n border-color: #d9d9d9;\n}\n.ant-checkbox-indeterminate .ant-checkbox-inner::after {\n top: 50%;\n left: 50%;\n width: 8px;\n height: 8px;\n background-color: #1890ff;\n border: 0;\n transform: translate(-50%, -50%) scale(1);\n opacity: 1;\n content: ' ';\n}\n.ant-checkbox-indeterminate.ant-checkbox-disabled .ant-checkbox-inner::after {\n background-color: rgba(0, 0, 0, 0.25);\n border-color: rgba(0, 0, 0, 0.25);\n}\n.ant-checkbox-rtl {\n direction: rtl;\n}\n.ant-checkbox-group-rtl .ant-checkbox-group-item {\n margin-right: 0;\n margin-left: 8px;\n}\n.ant-checkbox-group-rtl .ant-checkbox-group-item:last-child {\n margin-left: 0 !important;\n}\n.ant-checkbox-group-rtl .ant-checkbox-group-item + .ant-checkbox-group-item {\n margin-left: 8px;\n}\n",""]);const a=o},6096:(n,e,t)=>{"use strict";t.d(e,{Z:()=>a});var r=t(3645),o=t.n(r)()((function(n){return n[1]}));o.push([n.id,"/* stylelint-disable at-rule-empty-line-before,at-rule-name-space-after,at-rule-no-unknown */\n/* stylelint-disable no-duplicate-selectors */\n/* stylelint-disable */\n/* stylelint-disable declaration-bang-space-before,no-duplicate-selectors,string-no-newline */\n.ant-dropdown-menu-item.ant-dropdown-menu-item-danger {\n color: #ff4d4f;\n}\n.ant-dropdown-menu-item.ant-dropdown-menu-item-danger:hover {\n color: #fff;\n background-color: #ff4d4f;\n}\n.ant-dropdown {\n box-sizing: border-box;\n margin: 0;\n padding: 0;\n color: rgba(0, 0, 0, 0.85);\n font-size: 14px;\n font-variant: tabular-nums;\n line-height: 1.5715;\n list-style: none;\n font-feature-settings: 'tnum';\n position: absolute;\n top: -9999px;\n left: -9999px;\n z-index: 1050;\n display: block;\n}\n.ant-dropdown::before {\n position: absolute;\n top: -4px;\n right: 0;\n bottom: -4px;\n left: -7px;\n z-index: -9999;\n opacity: 0.0001;\n content: ' ';\n}\n.ant-dropdown-wrap {\n position: relative;\n}\n.ant-dropdown-wrap .ant-btn > .anticon-down {\n font-size: 10px;\n}\n.ant-dropdown-wrap .anticon-down::before {\n transition: transform 0.2s;\n}\n.ant-dropdown-wrap-open .anticon-down::before {\n transform: rotate(180deg);\n}\n.ant-dropdown-hidden,\n.ant-dropdown-menu-hidden {\n display: none;\n}\n.ant-dropdown-show-arrow.ant-dropdown-placement-topCenter,\n.ant-dropdown-show-arrow.ant-dropdown-placement-topLeft,\n.ant-dropdown-show-arrow.ant-dropdown-placement-topRight {\n padding-bottom: 10px;\n}\n.ant-dropdown-show-arrow.ant-dropdown-placement-bottomCenter,\n.ant-dropdown-show-arrow.ant-dropdown-placement-bottomLeft,\n.ant-dropdown-show-arrow.ant-dropdown-placement-bottomRight {\n padding-top: 10px;\n}\n.ant-dropdown-arrow {\n position: absolute;\n z-index: 1;\n display: block;\n width: 8.48528137px;\n height: 8.48528137px;\n background: transparent;\n border-style: solid;\n border-width: 4.24264069px;\n transform: rotate(45deg);\n}\n.ant-dropdown-placement-topCenter > .ant-dropdown-arrow,\n.ant-dropdown-placement-topLeft > .ant-dropdown-arrow,\n.ant-dropdown-placement-topRight > .ant-dropdown-arrow {\n bottom: 6.2px;\n border-top-color: transparent;\n border-right-color: #fff;\n border-bottom-color: #fff;\n border-left-color: transparent;\n box-shadow: 3px 3px 7px rgba(0, 0, 0, 0.07);\n}\n.ant-dropdown-placement-topCenter > .ant-dropdown-arrow {\n left: 50%;\n transform: translateX(-50%) rotate(45deg);\n}\n.ant-dropdown-placement-topLeft > .ant-dropdown-arrow {\n left: 16px;\n}\n.ant-dropdown-placement-topRight > .ant-dropdown-arrow {\n right: 16px;\n}\n.ant-dropdown-placement-bottomCenter > .ant-dropdown-arrow,\n.ant-dropdown-placement-bottomLeft > .ant-dropdown-arrow,\n.ant-dropdown-placement-bottomRight > .ant-dropdown-arrow {\n top: 6px;\n border-top-color: #fff;\n border-right-color: transparent;\n border-bottom-color: transparent;\n border-left-color: #fff;\n box-shadow: -2px -2px 5px rgba(0, 0, 0, 0.06);\n}\n.ant-dropdown-placement-bottomCenter > .ant-dropdown-arrow {\n left: 50%;\n transform: translateX(-50%) rotate(45deg);\n}\n.ant-dropdown-placement-bottomLeft > .ant-dropdown-arrow {\n left: 16px;\n}\n.ant-dropdown-placement-bottomRight > .ant-dropdown-arrow {\n right: 16px;\n}\n.ant-dropdown-menu {\n position: relative;\n margin: 0;\n padding: 4px 0;\n text-align: left;\n list-style-type: none;\n background-color: #fff;\n background-clip: padding-box;\n border-radius: 2px;\n outline: none;\n box-shadow: 0 3px 6px -4px rgba(0, 0, 0, 0.12), 0 6px 16px 0 rgba(0, 0, 0, 0.08), 0 9px 28px 8px rgba(0, 0, 0, 0.05);\n}\n.ant-dropdown-menu-item-group-title {\n padding: 5px 12px;\n color: rgba(0, 0, 0, 0.45);\n transition: all 0.3s;\n}\n.ant-dropdown-menu-submenu-popup {\n position: absolute;\n z-index: 1050;\n background: transparent;\n box-shadow: none;\n transform-origin: 0 0;\n}\n.ant-dropdown-menu-submenu-popup ul,\n.ant-dropdown-menu-submenu-popup li {\n list-style: none;\n}\n.ant-dropdown-menu-submenu-popup ul {\n margin-right: 0.3em;\n margin-left: 0.3em;\n}\n.ant-dropdown-menu-item,\n.ant-dropdown-menu-submenu-title {\n clear: both;\n margin: 0;\n padding: 5px 12px;\n color: rgba(0, 0, 0, 0.85);\n font-weight: normal;\n font-size: 14px;\n line-height: 22px;\n white-space: nowrap;\n cursor: pointer;\n transition: all 0.3s;\n}\n.ant-dropdown-menu-item > .anticon:first-child,\n.ant-dropdown-menu-submenu-title > .anticon:first-child,\n.ant-dropdown-menu-item > a > .anticon:first-child,\n.ant-dropdown-menu-submenu-title > a > .anticon:first-child,\n.ant-dropdown-menu-item > span > .anticon:first-child,\n.ant-dropdown-menu-submenu-title > span > .anticon:first-child {\n min-width: 12px;\n margin-right: 8px;\n font-size: 12px;\n vertical-align: -0.1em;\n}\n.ant-dropdown-menu-item > a,\n.ant-dropdown-menu-submenu-title > a {\n display: block;\n margin: -5px -12px;\n padding: 5px 12px;\n color: rgba(0, 0, 0, 0.85);\n transition: all 0.3s;\n}\n.ant-dropdown-menu-item > a:hover,\n.ant-dropdown-menu-submenu-title > a:hover {\n color: rgba(0, 0, 0, 0.85);\n}\n.ant-dropdown-menu-item > .anticon + span > a,\n.ant-dropdown-menu-submenu-title > .anticon + span > a {\n color: rgba(0, 0, 0, 0.85);\n transition: all 0.3s;\n}\n.ant-dropdown-menu-item > .anticon + span > a:hover,\n.ant-dropdown-menu-submenu-title > .anticon + span > a:hover {\n color: rgba(0, 0, 0, 0.85);\n}\n.ant-dropdown-menu-item-selected,\n.ant-dropdown-menu-submenu-title-selected,\n.ant-dropdown-menu-item-selected > a,\n.ant-dropdown-menu-submenu-title-selected > a {\n color: #1890ff;\n background-color: #e6f7ff;\n}\n.ant-dropdown-menu-item:hover,\n.ant-dropdown-menu-submenu-title:hover {\n background-color: #f5f5f5;\n}\n.ant-dropdown-menu-item-disabled,\n.ant-dropdown-menu-submenu-title-disabled {\n color: rgba(0, 0, 0, 0.25);\n cursor: not-allowed;\n}\n.ant-dropdown-menu-item-disabled:hover,\n.ant-dropdown-menu-submenu-title-disabled:hover {\n color: rgba(0, 0, 0, 0.25);\n background-color: #fff;\n cursor: not-allowed;\n}\n.ant-dropdown-menu-item-disabled > .anticon + span > a,\n.ant-dropdown-menu-submenu-title-disabled > .anticon + span > a,\n.ant-dropdown-menu-item-disabled > a,\n.ant-dropdown-menu-submenu-title-disabled > a {\n position: relative;\n color: rgba(0, 0, 0, 0.25);\n pointer-events: none;\n}\n.ant-dropdown-menu-item-disabled > .anticon + span > a::after,\n.ant-dropdown-menu-submenu-title-disabled > .anticon + span > a::after,\n.ant-dropdown-menu-item-disabled > a::after,\n.ant-dropdown-menu-submenu-title-disabled > a::after {\n position: absolute;\n top: 0;\n left: 0;\n width: 100%;\n height: 100%;\n cursor: not-allowed;\n content: '';\n}\n.ant-dropdown-menu-item-divider,\n.ant-dropdown-menu-submenu-title-divider {\n height: 1px;\n margin: 4px 0;\n overflow: hidden;\n line-height: 0;\n background-color: #f0f0f0;\n}\n.ant-dropdown-menu-item .ant-dropdown-menu-submenu-expand-icon,\n.ant-dropdown-menu-submenu-title .ant-dropdown-menu-submenu-expand-icon {\n position: absolute;\n right: 8px;\n}\n.ant-dropdown-menu-item .ant-dropdown-menu-submenu-expand-icon .ant-dropdown-menu-submenu-arrow-icon,\n.ant-dropdown-menu-submenu-title .ant-dropdown-menu-submenu-expand-icon .ant-dropdown-menu-submenu-arrow-icon {\n margin-right: 0 !important;\n color: rgba(0, 0, 0, 0.45);\n font-size: 10px;\n font-style: normal;\n}\n.ant-dropdown-menu-item-group-list {\n margin: 0 8px;\n padding: 0;\n list-style: none;\n}\n.ant-dropdown-menu-submenu-title {\n padding-right: 24px;\n}\n.ant-dropdown-menu-submenu-vertical {\n position: relative;\n}\n.ant-dropdown-menu-submenu-vertical > .ant-dropdown-menu {\n position: absolute;\n top: 0;\n left: 100%;\n min-width: 100%;\n margin-left: 4px;\n transform-origin: 0 0;\n}\n.ant-dropdown-menu-submenu.ant-dropdown-menu-submenu-disabled .ant-dropdown-menu-submenu-title,\n.ant-dropdown-menu-submenu.ant-dropdown-menu-submenu-disabled .ant-dropdown-menu-submenu-title .ant-dropdown-menu-submenu-arrow-icon {\n color: rgba(0, 0, 0, 0.25);\n background-color: #fff;\n cursor: not-allowed;\n}\n.ant-dropdown-menu-submenu-selected .ant-dropdown-menu-submenu-title {\n color: #1890ff;\n}\n.ant-dropdown.slide-down-enter.slide-down-enter-active.ant-dropdown-placement-bottomLeft,\n.ant-dropdown.slide-down-appear.slide-down-appear-active.ant-dropdown-placement-bottomLeft,\n.ant-dropdown.slide-down-enter.slide-down-enter-active.ant-dropdown-placement-bottomCenter,\n.ant-dropdown.slide-down-appear.slide-down-appear-active.ant-dropdown-placement-bottomCenter,\n.ant-dropdown.slide-down-enter.slide-down-enter-active.ant-dropdown-placement-bottomRight,\n.ant-dropdown.slide-down-appear.slide-down-appear-active.ant-dropdown-placement-bottomRight {\n -webkit-animation-name: antSlideUpIn;\n animation-name: antSlideUpIn;\n}\n.ant-dropdown.slide-up-enter.slide-up-enter-active.ant-dropdown-placement-topLeft,\n.ant-dropdown.slide-up-appear.slide-up-appear-active.ant-dropdown-placement-topLeft,\n.ant-dropdown.slide-up-enter.slide-up-enter-active.ant-dropdown-placement-topCenter,\n.ant-dropdown.slide-up-appear.slide-up-appear-active.ant-dropdown-placement-topCenter,\n.ant-dropdown.slide-up-enter.slide-up-enter-active.ant-dropdown-placement-topRight,\n.ant-dropdown.slide-up-appear.slide-up-appear-active.ant-dropdown-placement-topRight {\n -webkit-animation-name: antSlideDownIn;\n animation-name: antSlideDownIn;\n}\n.ant-dropdown.slide-down-leave.slide-down-leave-active.ant-dropdown-placement-bottomLeft,\n.ant-dropdown.slide-down-leave.slide-down-leave-active.ant-dropdown-placement-bottomCenter,\n.ant-dropdown.slide-down-leave.slide-down-leave-active.ant-dropdown-placement-bottomRight {\n -webkit-animation-name: antSlideUpOut;\n animation-name: antSlideUpOut;\n}\n.ant-dropdown.slide-up-leave.slide-up-leave-active.ant-dropdown-placement-topLeft,\n.ant-dropdown.slide-up-leave.slide-up-leave-active.ant-dropdown-placement-topCenter,\n.ant-dropdown.slide-up-leave.slide-up-leave-active.ant-dropdown-placement-topRight {\n -webkit-animation-name: antSlideDownOut;\n animation-name: antSlideDownOut;\n}\n.ant-dropdown-trigger > .anticon.anticon-down,\n.ant-dropdown-link > .anticon.anticon-down,\n.ant-dropdown-button > .anticon.anticon-down {\n font-size: 10px;\n vertical-align: baseline;\n}\n.ant-dropdown-button {\n white-space: nowrap;\n}\n.ant-dropdown-button.ant-btn-group > .ant-btn:last-child:not(:first-child):not(.ant-btn-icon-only) {\n padding-right: 8px;\n padding-left: 8px;\n}\n.ant-dropdown-menu-dark,\n.ant-dropdown-menu-dark .ant-dropdown-menu {\n background: #001529;\n}\n.ant-dropdown-menu-dark .ant-dropdown-menu-item,\n.ant-dropdown-menu-dark .ant-dropdown-menu-submenu-title,\n.ant-dropdown-menu-dark .ant-dropdown-menu-item > a,\n.ant-dropdown-menu-dark .ant-dropdown-menu-item > .anticon + span > a {\n color: rgba(255, 255, 255, 0.65);\n}\n.ant-dropdown-menu-dark .ant-dropdown-menu-item .ant-dropdown-menu-submenu-arrow::after,\n.ant-dropdown-menu-dark .ant-dropdown-menu-submenu-title .ant-dropdown-menu-submenu-arrow::after,\n.ant-dropdown-menu-dark .ant-dropdown-menu-item > a .ant-dropdown-menu-submenu-arrow::after,\n.ant-dropdown-menu-dark .ant-dropdown-menu-item > .anticon + span > a .ant-dropdown-menu-submenu-arrow::after {\n color: rgba(255, 255, 255, 0.65);\n}\n.ant-dropdown-menu-dark .ant-dropdown-menu-item:hover,\n.ant-dropdown-menu-dark .ant-dropdown-menu-submenu-title:hover,\n.ant-dropdown-menu-dark .ant-dropdown-menu-item > a:hover,\n.ant-dropdown-menu-dark .ant-dropdown-menu-item > .anticon + span > a:hover {\n color: #fff;\n background: transparent;\n}\n.ant-dropdown-menu-dark .ant-dropdown-menu-item-selected,\n.ant-dropdown-menu-dark .ant-dropdown-menu-item-selected:hover,\n.ant-dropdown-menu-dark .ant-dropdown-menu-item-selected > a {\n color: #fff;\n background: #1890ff;\n}\n.ant-dropdown-rtl {\n direction: rtl;\n}\n.ant-dropdown-rtl.ant-dropdown::before {\n right: -7px;\n left: 0;\n}\n.ant-dropdown-menu.ant-dropdown-menu-rtl {\n direction: rtl;\n text-align: right;\n}\n.ant-dropdown-rtl .ant-dropdown-menu-item-group-title {\n direction: rtl;\n text-align: right;\n}\n.ant-dropdown-menu-submenu-popup.ant-dropdown-menu-submenu-rtl {\n transform-origin: 100% 0;\n}\n.ant-dropdown-rtl .ant-dropdown-menu-submenu-popup ul,\n.ant-dropdown-rtl .ant-dropdown-menu-submenu-popup li {\n text-align: right;\n}\n.ant-dropdown-rtl .ant-dropdown-menu-item,\n.ant-dropdown-rtl .ant-dropdown-menu-submenu-title {\n text-align: right;\n}\n.ant-dropdown-rtl .ant-dropdown-menu-item > .anticon:first-child,\n.ant-dropdown-rtl .ant-dropdown-menu-submenu-title > .anticon:first-child,\n.ant-dropdown-rtl .ant-dropdown-menu-item > span > .anticon:first-child,\n.ant-dropdown-rtl .ant-dropdown-menu-submenu-title > span > .anticon:first-child {\n margin-right: 0;\n margin-left: 8px;\n}\n.ant-dropdown-rtl .ant-dropdown-menu-item .ant-dropdown-menu-submenu-arrow,\n.ant-dropdown-rtl .ant-dropdown-menu-submenu-title .ant-dropdown-menu-submenu-arrow {\n right: auto;\n left: 8px;\n}\n.ant-dropdown-rtl .ant-dropdown-menu-item .ant-dropdown-menu-submenu-arrow-icon,\n.ant-dropdown-rtl .ant-dropdown-menu-submenu-title .ant-dropdown-menu-submenu-arrow-icon {\n margin-left: 0 !important;\n transform: scaleX(-1);\n}\n.ant-dropdown-rtl .ant-dropdown-menu-submenu-title {\n padding-right: 12px;\n padding-left: 24px;\n}\n.ant-dropdown-rtl .ant-dropdown-menu-submenu-vertical > .ant-dropdown-menu {\n right: 100%;\n left: 0;\n margin-right: 4px;\n margin-left: 0;\n}\n",""]);const a=o},4730:(n,e,t)=>{"use strict";t.d(e,{Z:()=>a});var r=t(3645),o=t.n(r)()((function(n){return n[1]}));o.push([n.id,"/* stylelint-disable at-rule-empty-line-before,at-rule-name-space-after,at-rule-no-unknown */\n/* stylelint-disable no-duplicate-selectors */\n/* stylelint-disable */\n/* stylelint-disable declaration-bang-space-before,no-duplicate-selectors,string-no-newline */\n.ant-empty {\n margin: 0 8px;\n font-size: 14px;\n line-height: 1.5715;\n text-align: center;\n}\n.ant-empty-image {\n height: 100px;\n margin-bottom: 8px;\n}\n.ant-empty-image img {\n height: 100%;\n}\n.ant-empty-image svg {\n height: 100%;\n margin: auto;\n}\n.ant-empty-footer {\n margin-top: 16px;\n}\n.ant-empty-normal {\n margin: 32px 0;\n color: rgba(0, 0, 0, 0.25);\n}\n.ant-empty-normal .ant-empty-image {\n height: 40px;\n}\n.ant-empty-small {\n margin: 8px 0;\n color: rgba(0, 0, 0, 0.25);\n}\n.ant-empty-small .ant-empty-image {\n height: 35px;\n}\n.ant-empty-img-default-ellipse {\n fill: #f5f5f5;\n fill-opacity: 0.8;\n}\n.ant-empty-img-default-path-1 {\n fill: #aeb8c2;\n}\n.ant-empty-img-default-path-2 {\n fill: url(#linearGradient-1);\n}\n.ant-empty-img-default-path-3 {\n fill: #f5f5f7;\n}\n.ant-empty-img-default-path-4 {\n fill: #dce0e6;\n}\n.ant-empty-img-default-path-5 {\n fill: #dce0e6;\n}\n.ant-empty-img-default-g {\n fill: #fff;\n}\n.ant-empty-img-simple-ellipse {\n fill: #f5f5f5;\n}\n.ant-empty-img-simple-g {\n stroke: #d9d9d9;\n}\n.ant-empty-img-simple-path {\n fill: #fafafa;\n}\n.ant-empty-rtl {\n direction: rtl;\n}\n",""]);const a=o},6173:(n,e,t)=>{"use strict";t.d(e,{Z:()=>a});var r=t(3645),o=t.n(r)()((function(n){return n[1]}));o.push([n.id,"/* stylelint-disable at-rule-empty-line-before,at-rule-name-space-after,at-rule-no-unknown */\n/* stylelint-disable no-duplicate-selectors */\n/* stylelint-disable */\n/* stylelint-disable declaration-bang-space-before,no-duplicate-selectors,string-no-newline */\n.ant-row {\n display: flex;\n flex-flow: row wrap;\n}\n.ant-row::before,\n.ant-row::after {\n display: flex;\n}\n.ant-row-no-wrap {\n flex-wrap: nowrap;\n}\n.ant-row-start {\n justify-content: flex-start;\n}\n.ant-row-center {\n justify-content: center;\n}\n.ant-row-end {\n justify-content: flex-end;\n}\n.ant-row-space-between {\n justify-content: space-between;\n}\n.ant-row-space-around {\n justify-content: space-around;\n}\n.ant-row-top {\n align-items: flex-start;\n}\n.ant-row-middle {\n align-items: center;\n}\n.ant-row-bottom {\n align-items: flex-end;\n}\n.ant-col {\n position: relative;\n max-width: 100%;\n min-height: 1px;\n}\n.ant-col-24 {\n display: block;\n flex: 0 0 100%;\n max-width: 100%;\n}\n.ant-col-push-24 {\n left: 100%;\n}\n.ant-col-pull-24 {\n right: 100%;\n}\n.ant-col-offset-24 {\n margin-left: 100%;\n}\n.ant-col-order-24 {\n order: 24;\n}\n.ant-col-23 {\n display: block;\n flex: 0 0 95.83333333%;\n max-width: 95.83333333%;\n}\n.ant-col-push-23 {\n left: 95.83333333%;\n}\n.ant-col-pull-23 {\n right: 95.83333333%;\n}\n.ant-col-offset-23 {\n margin-left: 95.83333333%;\n}\n.ant-col-order-23 {\n order: 23;\n}\n.ant-col-22 {\n display: block;\n flex: 0 0 91.66666667%;\n max-width: 91.66666667%;\n}\n.ant-col-push-22 {\n left: 91.66666667%;\n}\n.ant-col-pull-22 {\n right: 91.66666667%;\n}\n.ant-col-offset-22 {\n margin-left: 91.66666667%;\n}\n.ant-col-order-22 {\n order: 22;\n}\n.ant-col-21 {\n display: block;\n flex: 0 0 87.5%;\n max-width: 87.5%;\n}\n.ant-col-push-21 {\n left: 87.5%;\n}\n.ant-col-pull-21 {\n right: 87.5%;\n}\n.ant-col-offset-21 {\n margin-left: 87.5%;\n}\n.ant-col-order-21 {\n order: 21;\n}\n.ant-col-20 {\n display: block;\n flex: 0 0 83.33333333%;\n max-width: 83.33333333%;\n}\n.ant-col-push-20 {\n left: 83.33333333%;\n}\n.ant-col-pull-20 {\n right: 83.33333333%;\n}\n.ant-col-offset-20 {\n margin-left: 83.33333333%;\n}\n.ant-col-order-20 {\n order: 20;\n}\n.ant-col-19 {\n display: block;\n flex: 0 0 79.16666667%;\n max-width: 79.16666667%;\n}\n.ant-col-push-19 {\n left: 79.16666667%;\n}\n.ant-col-pull-19 {\n right: 79.16666667%;\n}\n.ant-col-offset-19 {\n margin-left: 79.16666667%;\n}\n.ant-col-order-19 {\n order: 19;\n}\n.ant-col-18 {\n display: block;\n flex: 0 0 75%;\n max-width: 75%;\n}\n.ant-col-push-18 {\n left: 75%;\n}\n.ant-col-pull-18 {\n right: 75%;\n}\n.ant-col-offset-18 {\n margin-left: 75%;\n}\n.ant-col-order-18 {\n order: 18;\n}\n.ant-col-17 {\n display: block;\n flex: 0 0 70.83333333%;\n max-width: 70.83333333%;\n}\n.ant-col-push-17 {\n left: 70.83333333%;\n}\n.ant-col-pull-17 {\n right: 70.83333333%;\n}\n.ant-col-offset-17 {\n margin-left: 70.83333333%;\n}\n.ant-col-order-17 {\n order: 17;\n}\n.ant-col-16 {\n display: block;\n flex: 0 0 66.66666667%;\n max-width: 66.66666667%;\n}\n.ant-col-push-16 {\n left: 66.66666667%;\n}\n.ant-col-pull-16 {\n right: 66.66666667%;\n}\n.ant-col-offset-16 {\n margin-left: 66.66666667%;\n}\n.ant-col-order-16 {\n order: 16;\n}\n.ant-col-15 {\n display: block;\n flex: 0 0 62.5%;\n max-width: 62.5%;\n}\n.ant-col-push-15 {\n left: 62.5%;\n}\n.ant-col-pull-15 {\n right: 62.5%;\n}\n.ant-col-offset-15 {\n margin-left: 62.5%;\n}\n.ant-col-order-15 {\n order: 15;\n}\n.ant-col-14 {\n display: block;\n flex: 0 0 58.33333333%;\n max-width: 58.33333333%;\n}\n.ant-col-push-14 {\n left: 58.33333333%;\n}\n.ant-col-pull-14 {\n right: 58.33333333%;\n}\n.ant-col-offset-14 {\n margin-left: 58.33333333%;\n}\n.ant-col-order-14 {\n order: 14;\n}\n.ant-col-13 {\n display: block;\n flex: 0 0 54.16666667%;\n max-width: 54.16666667%;\n}\n.ant-col-push-13 {\n left: 54.16666667%;\n}\n.ant-col-pull-13 {\n right: 54.16666667%;\n}\n.ant-col-offset-13 {\n margin-left: 54.16666667%;\n}\n.ant-col-order-13 {\n order: 13;\n}\n.ant-col-12 {\n display: block;\n flex: 0 0 50%;\n max-width: 50%;\n}\n.ant-col-push-12 {\n left: 50%;\n}\n.ant-col-pull-12 {\n right: 50%;\n}\n.ant-col-offset-12 {\n margin-left: 50%;\n}\n.ant-col-order-12 {\n order: 12;\n}\n.ant-col-11 {\n display: block;\n flex: 0 0 45.83333333%;\n max-width: 45.83333333%;\n}\n.ant-col-push-11 {\n left: 45.83333333%;\n}\n.ant-col-pull-11 {\n right: 45.83333333%;\n}\n.ant-col-offset-11 {\n margin-left: 45.83333333%;\n}\n.ant-col-order-11 {\n order: 11;\n}\n.ant-col-10 {\n display: block;\n flex: 0 0 41.66666667%;\n max-width: 41.66666667%;\n}\n.ant-col-push-10 {\n left: 41.66666667%;\n}\n.ant-col-pull-10 {\n right: 41.66666667%;\n}\n.ant-col-offset-10 {\n margin-left: 41.66666667%;\n}\n.ant-col-order-10 {\n order: 10;\n}\n.ant-col-9 {\n display: block;\n flex: 0 0 37.5%;\n max-width: 37.5%;\n}\n.ant-col-push-9 {\n left: 37.5%;\n}\n.ant-col-pull-9 {\n right: 37.5%;\n}\n.ant-col-offset-9 {\n margin-left: 37.5%;\n}\n.ant-col-order-9 {\n order: 9;\n}\n.ant-col-8 {\n display: block;\n flex: 0 0 33.33333333%;\n max-width: 33.33333333%;\n}\n.ant-col-push-8 {\n left: 33.33333333%;\n}\n.ant-col-pull-8 {\n right: 33.33333333%;\n}\n.ant-col-offset-8 {\n margin-left: 33.33333333%;\n}\n.ant-col-order-8 {\n order: 8;\n}\n.ant-col-7 {\n display: block;\n flex: 0 0 29.16666667%;\n max-width: 29.16666667%;\n}\n.ant-col-push-7 {\n left: 29.16666667%;\n}\n.ant-col-pull-7 {\n right: 29.16666667%;\n}\n.ant-col-offset-7 {\n margin-left: 29.16666667%;\n}\n.ant-col-order-7 {\n order: 7;\n}\n.ant-col-6 {\n display: block;\n flex: 0 0 25%;\n max-width: 25%;\n}\n.ant-col-push-6 {\n left: 25%;\n}\n.ant-col-pull-6 {\n right: 25%;\n}\n.ant-col-offset-6 {\n margin-left: 25%;\n}\n.ant-col-order-6 {\n order: 6;\n}\n.ant-col-5 {\n display: block;\n flex: 0 0 20.83333333%;\n max-width: 20.83333333%;\n}\n.ant-col-push-5 {\n left: 20.83333333%;\n}\n.ant-col-pull-5 {\n right: 20.83333333%;\n}\n.ant-col-offset-5 {\n margin-left: 20.83333333%;\n}\n.ant-col-order-5 {\n order: 5;\n}\n.ant-col-4 {\n display: block;\n flex: 0 0 16.66666667%;\n max-width: 16.66666667%;\n}\n.ant-col-push-4 {\n left: 16.66666667%;\n}\n.ant-col-pull-4 {\n right: 16.66666667%;\n}\n.ant-col-offset-4 {\n margin-left: 16.66666667%;\n}\n.ant-col-order-4 {\n order: 4;\n}\n.ant-col-3 {\n display: block;\n flex: 0 0 12.5%;\n max-width: 12.5%;\n}\n.ant-col-push-3 {\n left: 12.5%;\n}\n.ant-col-pull-3 {\n right: 12.5%;\n}\n.ant-col-offset-3 {\n margin-left: 12.5%;\n}\n.ant-col-order-3 {\n order: 3;\n}\n.ant-col-2 {\n display: block;\n flex: 0 0 8.33333333%;\n max-width: 8.33333333%;\n}\n.ant-col-push-2 {\n left: 8.33333333%;\n}\n.ant-col-pull-2 {\n right: 8.33333333%;\n}\n.ant-col-offset-2 {\n margin-left: 8.33333333%;\n}\n.ant-col-order-2 {\n order: 2;\n}\n.ant-col-1 {\n display: block;\n flex: 0 0 4.16666667%;\n max-width: 4.16666667%;\n}\n.ant-col-push-1 {\n left: 4.16666667%;\n}\n.ant-col-pull-1 {\n right: 4.16666667%;\n}\n.ant-col-offset-1 {\n margin-left: 4.16666667%;\n}\n.ant-col-order-1 {\n order: 1;\n}\n.ant-col-0 {\n display: none;\n}\n.ant-col-push-0 {\n left: auto;\n}\n.ant-col-pull-0 {\n right: auto;\n}\n.ant-col-push-0 {\n left: auto;\n}\n.ant-col-pull-0 {\n right: auto;\n}\n.ant-col-offset-0 {\n margin-left: 0;\n}\n.ant-col-order-0 {\n order: 0;\n}\n.ant-col-push-0.ant-col-rtl {\n right: auto;\n}\n.ant-col-pull-0.ant-col-rtl {\n left: auto;\n}\n.ant-col-push-0.ant-col-rtl {\n right: auto;\n}\n.ant-col-pull-0.ant-col-rtl {\n left: auto;\n}\n.ant-col-offset-0.ant-col-rtl {\n margin-right: 0;\n}\n.ant-col-push-1.ant-col-rtl {\n right: 4.16666667%;\n left: auto;\n}\n.ant-col-pull-1.ant-col-rtl {\n right: auto;\n left: 4.16666667%;\n}\n.ant-col-offset-1.ant-col-rtl {\n margin-right: 4.16666667%;\n margin-left: 0;\n}\n.ant-col-push-2.ant-col-rtl {\n right: 8.33333333%;\n left: auto;\n}\n.ant-col-pull-2.ant-col-rtl {\n right: auto;\n left: 8.33333333%;\n}\n.ant-col-offset-2.ant-col-rtl {\n margin-right: 8.33333333%;\n margin-left: 0;\n}\n.ant-col-push-3.ant-col-rtl {\n right: 12.5%;\n left: auto;\n}\n.ant-col-pull-3.ant-col-rtl {\n right: auto;\n left: 12.5%;\n}\n.ant-col-offset-3.ant-col-rtl {\n margin-right: 12.5%;\n margin-left: 0;\n}\n.ant-col-push-4.ant-col-rtl {\n right: 16.66666667%;\n left: auto;\n}\n.ant-col-pull-4.ant-col-rtl {\n right: auto;\n left: 16.66666667%;\n}\n.ant-col-offset-4.ant-col-rtl {\n margin-right: 16.66666667%;\n margin-left: 0;\n}\n.ant-col-push-5.ant-col-rtl {\n right: 20.83333333%;\n left: auto;\n}\n.ant-col-pull-5.ant-col-rtl {\n right: auto;\n left: 20.83333333%;\n}\n.ant-col-offset-5.ant-col-rtl {\n margin-right: 20.83333333%;\n margin-left: 0;\n}\n.ant-col-push-6.ant-col-rtl {\n right: 25%;\n left: auto;\n}\n.ant-col-pull-6.ant-col-rtl {\n right: auto;\n left: 25%;\n}\n.ant-col-offset-6.ant-col-rtl {\n margin-right: 25%;\n margin-left: 0;\n}\n.ant-col-push-7.ant-col-rtl {\n right: 29.16666667%;\n left: auto;\n}\n.ant-col-pull-7.ant-col-rtl {\n right: auto;\n left: 29.16666667%;\n}\n.ant-col-offset-7.ant-col-rtl {\n margin-right: 29.16666667%;\n margin-left: 0;\n}\n.ant-col-push-8.ant-col-rtl {\n right: 33.33333333%;\n left: auto;\n}\n.ant-col-pull-8.ant-col-rtl {\n right: auto;\n left: 33.33333333%;\n}\n.ant-col-offset-8.ant-col-rtl {\n margin-right: 33.33333333%;\n margin-left: 0;\n}\n.ant-col-push-9.ant-col-rtl {\n right: 37.5%;\n left: auto;\n}\n.ant-col-pull-9.ant-col-rtl {\n right: auto;\n left: 37.5%;\n}\n.ant-col-offset-9.ant-col-rtl {\n margin-right: 37.5%;\n margin-left: 0;\n}\n.ant-col-push-10.ant-col-rtl {\n right: 41.66666667%;\n left: auto;\n}\n.ant-col-pull-10.ant-col-rtl {\n right: auto;\n left: 41.66666667%;\n}\n.ant-col-offset-10.ant-col-rtl {\n margin-right: 41.66666667%;\n margin-left: 0;\n}\n.ant-col-push-11.ant-col-rtl {\n right: 45.83333333%;\n left: auto;\n}\n.ant-col-pull-11.ant-col-rtl {\n right: auto;\n left: 45.83333333%;\n}\n.ant-col-offset-11.ant-col-rtl {\n margin-right: 45.83333333%;\n margin-left: 0;\n}\n.ant-col-push-12.ant-col-rtl {\n right: 50%;\n left: auto;\n}\n.ant-col-pull-12.ant-col-rtl {\n right: auto;\n left: 50%;\n}\n.ant-col-offset-12.ant-col-rtl {\n margin-right: 50%;\n margin-left: 0;\n}\n.ant-col-push-13.ant-col-rtl {\n right: 54.16666667%;\n left: auto;\n}\n.ant-col-pull-13.ant-col-rtl {\n right: auto;\n left: 54.16666667%;\n}\n.ant-col-offset-13.ant-col-rtl {\n margin-right: 54.16666667%;\n margin-left: 0;\n}\n.ant-col-push-14.ant-col-rtl {\n right: 58.33333333%;\n left: auto;\n}\n.ant-col-pull-14.ant-col-rtl {\n right: auto;\n left: 58.33333333%;\n}\n.ant-col-offset-14.ant-col-rtl {\n margin-right: 58.33333333%;\n margin-left: 0;\n}\n.ant-col-push-15.ant-col-rtl {\n right: 62.5%;\n left: auto;\n}\n.ant-col-pull-15.ant-col-rtl {\n right: auto;\n left: 62.5%;\n}\n.ant-col-offset-15.ant-col-rtl {\n margin-right: 62.5%;\n margin-left: 0;\n}\n.ant-col-push-16.ant-col-rtl {\n right: 66.66666667%;\n left: auto;\n}\n.ant-col-pull-16.ant-col-rtl {\n right: auto;\n left: 66.66666667%;\n}\n.ant-col-offset-16.ant-col-rtl {\n margin-right: 66.66666667%;\n margin-left: 0;\n}\n.ant-col-push-17.ant-col-rtl {\n right: 70.83333333%;\n left: auto;\n}\n.ant-col-pull-17.ant-col-rtl {\n right: auto;\n left: 70.83333333%;\n}\n.ant-col-offset-17.ant-col-rtl {\n margin-right: 70.83333333%;\n margin-left: 0;\n}\n.ant-col-push-18.ant-col-rtl {\n right: 75%;\n left: auto;\n}\n.ant-col-pull-18.ant-col-rtl {\n right: auto;\n left: 75%;\n}\n.ant-col-offset-18.ant-col-rtl {\n margin-right: 75%;\n margin-left: 0;\n}\n.ant-col-push-19.ant-col-rtl {\n right: 79.16666667%;\n left: auto;\n}\n.ant-col-pull-19.ant-col-rtl {\n right: auto;\n left: 79.16666667%;\n}\n.ant-col-offset-19.ant-col-rtl {\n margin-right: 79.16666667%;\n margin-left: 0;\n}\n.ant-col-push-20.ant-col-rtl {\n right: 83.33333333%;\n left: auto;\n}\n.ant-col-pull-20.ant-col-rtl {\n right: auto;\n left: 83.33333333%;\n}\n.ant-col-offset-20.ant-col-rtl {\n margin-right: 83.33333333%;\n margin-left: 0;\n}\n.ant-col-push-21.ant-col-rtl {\n right: 87.5%;\n left: auto;\n}\n.ant-col-pull-21.ant-col-rtl {\n right: auto;\n left: 87.5%;\n}\n.ant-col-offset-21.ant-col-rtl {\n margin-right: 87.5%;\n margin-left: 0;\n}\n.ant-col-push-22.ant-col-rtl {\n right: 91.66666667%;\n left: auto;\n}\n.ant-col-pull-22.ant-col-rtl {\n right: auto;\n left: 91.66666667%;\n}\n.ant-col-offset-22.ant-col-rtl {\n margin-right: 91.66666667%;\n margin-left: 0;\n}\n.ant-col-push-23.ant-col-rtl {\n right: 95.83333333%;\n left: auto;\n}\n.ant-col-pull-23.ant-col-rtl {\n right: auto;\n left: 95.83333333%;\n}\n.ant-col-offset-23.ant-col-rtl {\n margin-right: 95.83333333%;\n margin-left: 0;\n}\n.ant-col-push-24.ant-col-rtl {\n right: 100%;\n left: auto;\n}\n.ant-col-pull-24.ant-col-rtl {\n right: auto;\n left: 100%;\n}\n.ant-col-offset-24.ant-col-rtl {\n margin-right: 100%;\n margin-left: 0;\n}\n.ant-col-xs-24 {\n display: block;\n flex: 0 0 100%;\n max-width: 100%;\n}\n.ant-col-xs-push-24 {\n left: 100%;\n}\n.ant-col-xs-pull-24 {\n right: 100%;\n}\n.ant-col-xs-offset-24 {\n margin-left: 100%;\n}\n.ant-col-xs-order-24 {\n order: 24;\n}\n.ant-col-xs-23 {\n display: block;\n flex: 0 0 95.83333333%;\n max-width: 95.83333333%;\n}\n.ant-col-xs-push-23 {\n left: 95.83333333%;\n}\n.ant-col-xs-pull-23 {\n right: 95.83333333%;\n}\n.ant-col-xs-offset-23 {\n margin-left: 95.83333333%;\n}\n.ant-col-xs-order-23 {\n order: 23;\n}\n.ant-col-xs-22 {\n display: block;\n flex: 0 0 91.66666667%;\n max-width: 91.66666667%;\n}\n.ant-col-xs-push-22 {\n left: 91.66666667%;\n}\n.ant-col-xs-pull-22 {\n right: 91.66666667%;\n}\n.ant-col-xs-offset-22 {\n margin-left: 91.66666667%;\n}\n.ant-col-xs-order-22 {\n order: 22;\n}\n.ant-col-xs-21 {\n display: block;\n flex: 0 0 87.5%;\n max-width: 87.5%;\n}\n.ant-col-xs-push-21 {\n left: 87.5%;\n}\n.ant-col-xs-pull-21 {\n right: 87.5%;\n}\n.ant-col-xs-offset-21 {\n margin-left: 87.5%;\n}\n.ant-col-xs-order-21 {\n order: 21;\n}\n.ant-col-xs-20 {\n display: block;\n flex: 0 0 83.33333333%;\n max-width: 83.33333333%;\n}\n.ant-col-xs-push-20 {\n left: 83.33333333%;\n}\n.ant-col-xs-pull-20 {\n right: 83.33333333%;\n}\n.ant-col-xs-offset-20 {\n margin-left: 83.33333333%;\n}\n.ant-col-xs-order-20 {\n order: 20;\n}\n.ant-col-xs-19 {\n display: block;\n flex: 0 0 79.16666667%;\n max-width: 79.16666667%;\n}\n.ant-col-xs-push-19 {\n left: 79.16666667%;\n}\n.ant-col-xs-pull-19 {\n right: 79.16666667%;\n}\n.ant-col-xs-offset-19 {\n margin-left: 79.16666667%;\n}\n.ant-col-xs-order-19 {\n order: 19;\n}\n.ant-col-xs-18 {\n display: block;\n flex: 0 0 75%;\n max-width: 75%;\n}\n.ant-col-xs-push-18 {\n left: 75%;\n}\n.ant-col-xs-pull-18 {\n right: 75%;\n}\n.ant-col-xs-offset-18 {\n margin-left: 75%;\n}\n.ant-col-xs-order-18 {\n order: 18;\n}\n.ant-col-xs-17 {\n display: block;\n flex: 0 0 70.83333333%;\n max-width: 70.83333333%;\n}\n.ant-col-xs-push-17 {\n left: 70.83333333%;\n}\n.ant-col-xs-pull-17 {\n right: 70.83333333%;\n}\n.ant-col-xs-offset-17 {\n margin-left: 70.83333333%;\n}\n.ant-col-xs-order-17 {\n order: 17;\n}\n.ant-col-xs-16 {\n display: block;\n flex: 0 0 66.66666667%;\n max-width: 66.66666667%;\n}\n.ant-col-xs-push-16 {\n left: 66.66666667%;\n}\n.ant-col-xs-pull-16 {\n right: 66.66666667%;\n}\n.ant-col-xs-offset-16 {\n margin-left: 66.66666667%;\n}\n.ant-col-xs-order-16 {\n order: 16;\n}\n.ant-col-xs-15 {\n display: block;\n flex: 0 0 62.5%;\n max-width: 62.5%;\n}\n.ant-col-xs-push-15 {\n left: 62.5%;\n}\n.ant-col-xs-pull-15 {\n right: 62.5%;\n}\n.ant-col-xs-offset-15 {\n margin-left: 62.5%;\n}\n.ant-col-xs-order-15 {\n order: 15;\n}\n.ant-col-xs-14 {\n display: block;\n flex: 0 0 58.33333333%;\n max-width: 58.33333333%;\n}\n.ant-col-xs-push-14 {\n left: 58.33333333%;\n}\n.ant-col-xs-pull-14 {\n right: 58.33333333%;\n}\n.ant-col-xs-offset-14 {\n margin-left: 58.33333333%;\n}\n.ant-col-xs-order-14 {\n order: 14;\n}\n.ant-col-xs-13 {\n display: block;\n flex: 0 0 54.16666667%;\n max-width: 54.16666667%;\n}\n.ant-col-xs-push-13 {\n left: 54.16666667%;\n}\n.ant-col-xs-pull-13 {\n right: 54.16666667%;\n}\n.ant-col-xs-offset-13 {\n margin-left: 54.16666667%;\n}\n.ant-col-xs-order-13 {\n order: 13;\n}\n.ant-col-xs-12 {\n display: block;\n flex: 0 0 50%;\n max-width: 50%;\n}\n.ant-col-xs-push-12 {\n left: 50%;\n}\n.ant-col-xs-pull-12 {\n right: 50%;\n}\n.ant-col-xs-offset-12 {\n margin-left: 50%;\n}\n.ant-col-xs-order-12 {\n order: 12;\n}\n.ant-col-xs-11 {\n display: block;\n flex: 0 0 45.83333333%;\n max-width: 45.83333333%;\n}\n.ant-col-xs-push-11 {\n left: 45.83333333%;\n}\n.ant-col-xs-pull-11 {\n right: 45.83333333%;\n}\n.ant-col-xs-offset-11 {\n margin-left: 45.83333333%;\n}\n.ant-col-xs-order-11 {\n order: 11;\n}\n.ant-col-xs-10 {\n display: block;\n flex: 0 0 41.66666667%;\n max-width: 41.66666667%;\n}\n.ant-col-xs-push-10 {\n left: 41.66666667%;\n}\n.ant-col-xs-pull-10 {\n right: 41.66666667%;\n}\n.ant-col-xs-offset-10 {\n margin-left: 41.66666667%;\n}\n.ant-col-xs-order-10 {\n order: 10;\n}\n.ant-col-xs-9 {\n display: block;\n flex: 0 0 37.5%;\n max-width: 37.5%;\n}\n.ant-col-xs-push-9 {\n left: 37.5%;\n}\n.ant-col-xs-pull-9 {\n right: 37.5%;\n}\n.ant-col-xs-offset-9 {\n margin-left: 37.5%;\n}\n.ant-col-xs-order-9 {\n order: 9;\n}\n.ant-col-xs-8 {\n display: block;\n flex: 0 0 33.33333333%;\n max-width: 33.33333333%;\n}\n.ant-col-xs-push-8 {\n left: 33.33333333%;\n}\n.ant-col-xs-pull-8 {\n right: 33.33333333%;\n}\n.ant-col-xs-offset-8 {\n margin-left: 33.33333333%;\n}\n.ant-col-xs-order-8 {\n order: 8;\n}\n.ant-col-xs-7 {\n display: block;\n flex: 0 0 29.16666667%;\n max-width: 29.16666667%;\n}\n.ant-col-xs-push-7 {\n left: 29.16666667%;\n}\n.ant-col-xs-pull-7 {\n right: 29.16666667%;\n}\n.ant-col-xs-offset-7 {\n margin-left: 29.16666667%;\n}\n.ant-col-xs-order-7 {\n order: 7;\n}\n.ant-col-xs-6 {\n display: block;\n flex: 0 0 25%;\n max-width: 25%;\n}\n.ant-col-xs-push-6 {\n left: 25%;\n}\n.ant-col-xs-pull-6 {\n right: 25%;\n}\n.ant-col-xs-offset-6 {\n margin-left: 25%;\n}\n.ant-col-xs-order-6 {\n order: 6;\n}\n.ant-col-xs-5 {\n display: block;\n flex: 0 0 20.83333333%;\n max-width: 20.83333333%;\n}\n.ant-col-xs-push-5 {\n left: 20.83333333%;\n}\n.ant-col-xs-pull-5 {\n right: 20.83333333%;\n}\n.ant-col-xs-offset-5 {\n margin-left: 20.83333333%;\n}\n.ant-col-xs-order-5 {\n order: 5;\n}\n.ant-col-xs-4 {\n display: block;\n flex: 0 0 16.66666667%;\n max-width: 16.66666667%;\n}\n.ant-col-xs-push-4 {\n left: 16.66666667%;\n}\n.ant-col-xs-pull-4 {\n right: 16.66666667%;\n}\n.ant-col-xs-offset-4 {\n margin-left: 16.66666667%;\n}\n.ant-col-xs-order-4 {\n order: 4;\n}\n.ant-col-xs-3 {\n display: block;\n flex: 0 0 12.5%;\n max-width: 12.5%;\n}\n.ant-col-xs-push-3 {\n left: 12.5%;\n}\n.ant-col-xs-pull-3 {\n right: 12.5%;\n}\n.ant-col-xs-offset-3 {\n margin-left: 12.5%;\n}\n.ant-col-xs-order-3 {\n order: 3;\n}\n.ant-col-xs-2 {\n display: block;\n flex: 0 0 8.33333333%;\n max-width: 8.33333333%;\n}\n.ant-col-xs-push-2 {\n left: 8.33333333%;\n}\n.ant-col-xs-pull-2 {\n right: 8.33333333%;\n}\n.ant-col-xs-offset-2 {\n margin-left: 8.33333333%;\n}\n.ant-col-xs-order-2 {\n order: 2;\n}\n.ant-col-xs-1 {\n display: block;\n flex: 0 0 4.16666667%;\n max-width: 4.16666667%;\n}\n.ant-col-xs-push-1 {\n left: 4.16666667%;\n}\n.ant-col-xs-pull-1 {\n right: 4.16666667%;\n}\n.ant-col-xs-offset-1 {\n margin-left: 4.16666667%;\n}\n.ant-col-xs-order-1 {\n order: 1;\n}\n.ant-col-xs-0 {\n display: none;\n}\n.ant-col-push-0 {\n left: auto;\n}\n.ant-col-pull-0 {\n right: auto;\n}\n.ant-col-xs-push-0 {\n left: auto;\n}\n.ant-col-xs-pull-0 {\n right: auto;\n}\n.ant-col-xs-offset-0 {\n margin-left: 0;\n}\n.ant-col-xs-order-0 {\n order: 0;\n}\n.ant-col-push-0.ant-col-rtl {\n right: auto;\n}\n.ant-col-pull-0.ant-col-rtl {\n left: auto;\n}\n.ant-col-xs-push-0.ant-col-rtl {\n right: auto;\n}\n.ant-col-xs-pull-0.ant-col-rtl {\n left: auto;\n}\n.ant-col-xs-offset-0.ant-col-rtl {\n margin-right: 0;\n}\n.ant-col-xs-push-1.ant-col-rtl {\n right: 4.16666667%;\n left: auto;\n}\n.ant-col-xs-pull-1.ant-col-rtl {\n right: auto;\n left: 4.16666667%;\n}\n.ant-col-xs-offset-1.ant-col-rtl {\n margin-right: 4.16666667%;\n margin-left: 0;\n}\n.ant-col-xs-push-2.ant-col-rtl {\n right: 8.33333333%;\n left: auto;\n}\n.ant-col-xs-pull-2.ant-col-rtl {\n right: auto;\n left: 8.33333333%;\n}\n.ant-col-xs-offset-2.ant-col-rtl {\n margin-right: 8.33333333%;\n margin-left: 0;\n}\n.ant-col-xs-push-3.ant-col-rtl {\n right: 12.5%;\n left: auto;\n}\n.ant-col-xs-pull-3.ant-col-rtl {\n right: auto;\n left: 12.5%;\n}\n.ant-col-xs-offset-3.ant-col-rtl {\n margin-right: 12.5%;\n margin-left: 0;\n}\n.ant-col-xs-push-4.ant-col-rtl {\n right: 16.66666667%;\n left: auto;\n}\n.ant-col-xs-pull-4.ant-col-rtl {\n right: auto;\n left: 16.66666667%;\n}\n.ant-col-xs-offset-4.ant-col-rtl {\n margin-right: 16.66666667%;\n margin-left: 0;\n}\n.ant-col-xs-push-5.ant-col-rtl {\n right: 20.83333333%;\n left: auto;\n}\n.ant-col-xs-pull-5.ant-col-rtl {\n right: auto;\n left: 20.83333333%;\n}\n.ant-col-xs-offset-5.ant-col-rtl {\n margin-right: 20.83333333%;\n margin-left: 0;\n}\n.ant-col-xs-push-6.ant-col-rtl {\n right: 25%;\n left: auto;\n}\n.ant-col-xs-pull-6.ant-col-rtl {\n right: auto;\n left: 25%;\n}\n.ant-col-xs-offset-6.ant-col-rtl {\n margin-right: 25%;\n margin-left: 0;\n}\n.ant-col-xs-push-7.ant-col-rtl {\n right: 29.16666667%;\n left: auto;\n}\n.ant-col-xs-pull-7.ant-col-rtl {\n right: auto;\n left: 29.16666667%;\n}\n.ant-col-xs-offset-7.ant-col-rtl {\n margin-right: 29.16666667%;\n margin-left: 0;\n}\n.ant-col-xs-push-8.ant-col-rtl {\n right: 33.33333333%;\n left: auto;\n}\n.ant-col-xs-pull-8.ant-col-rtl {\n right: auto;\n left: 33.33333333%;\n}\n.ant-col-xs-offset-8.ant-col-rtl {\n margin-right: 33.33333333%;\n margin-left: 0;\n}\n.ant-col-xs-push-9.ant-col-rtl {\n right: 37.5%;\n left: auto;\n}\n.ant-col-xs-pull-9.ant-col-rtl {\n right: auto;\n left: 37.5%;\n}\n.ant-col-xs-offset-9.ant-col-rtl {\n margin-right: 37.5%;\n margin-left: 0;\n}\n.ant-col-xs-push-10.ant-col-rtl {\n right: 41.66666667%;\n left: auto;\n}\n.ant-col-xs-pull-10.ant-col-rtl {\n right: auto;\n left: 41.66666667%;\n}\n.ant-col-xs-offset-10.ant-col-rtl {\n margin-right: 41.66666667%;\n margin-left: 0;\n}\n.ant-col-xs-push-11.ant-col-rtl {\n right: 45.83333333%;\n left: auto;\n}\n.ant-col-xs-pull-11.ant-col-rtl {\n right: auto;\n left: 45.83333333%;\n}\n.ant-col-xs-offset-11.ant-col-rtl {\n margin-right: 45.83333333%;\n margin-left: 0;\n}\n.ant-col-xs-push-12.ant-col-rtl {\n right: 50%;\n left: auto;\n}\n.ant-col-xs-pull-12.ant-col-rtl {\n right: auto;\n left: 50%;\n}\n.ant-col-xs-offset-12.ant-col-rtl {\n margin-right: 50%;\n margin-left: 0;\n}\n.ant-col-xs-push-13.ant-col-rtl {\n right: 54.16666667%;\n left: auto;\n}\n.ant-col-xs-pull-13.ant-col-rtl {\n right: auto;\n left: 54.16666667%;\n}\n.ant-col-xs-offset-13.ant-col-rtl {\n margin-right: 54.16666667%;\n margin-left: 0;\n}\n.ant-col-xs-push-14.ant-col-rtl {\n right: 58.33333333%;\n left: auto;\n}\n.ant-col-xs-pull-14.ant-col-rtl {\n right: auto;\n left: 58.33333333%;\n}\n.ant-col-xs-offset-14.ant-col-rtl {\n margin-right: 58.33333333%;\n margin-left: 0;\n}\n.ant-col-xs-push-15.ant-col-rtl {\n right: 62.5%;\n left: auto;\n}\n.ant-col-xs-pull-15.ant-col-rtl {\n right: auto;\n left: 62.5%;\n}\n.ant-col-xs-offset-15.ant-col-rtl {\n margin-right: 62.5%;\n margin-left: 0;\n}\n.ant-col-xs-push-16.ant-col-rtl {\n right: 66.66666667%;\n left: auto;\n}\n.ant-col-xs-pull-16.ant-col-rtl {\n right: auto;\n left: 66.66666667%;\n}\n.ant-col-xs-offset-16.ant-col-rtl {\n margin-right: 66.66666667%;\n margin-left: 0;\n}\n.ant-col-xs-push-17.ant-col-rtl {\n right: 70.83333333%;\n left: auto;\n}\n.ant-col-xs-pull-17.ant-col-rtl {\n right: auto;\n left: 70.83333333%;\n}\n.ant-col-xs-offset-17.ant-col-rtl {\n margin-right: 70.83333333%;\n margin-left: 0;\n}\n.ant-col-xs-push-18.ant-col-rtl {\n right: 75%;\n left: auto;\n}\n.ant-col-xs-pull-18.ant-col-rtl {\n right: auto;\n left: 75%;\n}\n.ant-col-xs-offset-18.ant-col-rtl {\n margin-right: 75%;\n margin-left: 0;\n}\n.ant-col-xs-push-19.ant-col-rtl {\n right: 79.16666667%;\n left: auto;\n}\n.ant-col-xs-pull-19.ant-col-rtl {\n right: auto;\n left: 79.16666667%;\n}\n.ant-col-xs-offset-19.ant-col-rtl {\n margin-right: 79.16666667%;\n margin-left: 0;\n}\n.ant-col-xs-push-20.ant-col-rtl {\n right: 83.33333333%;\n left: auto;\n}\n.ant-col-xs-pull-20.ant-col-rtl {\n right: auto;\n left: 83.33333333%;\n}\n.ant-col-xs-offset-20.ant-col-rtl {\n margin-right: 83.33333333%;\n margin-left: 0;\n}\n.ant-col-xs-push-21.ant-col-rtl {\n right: 87.5%;\n left: auto;\n}\n.ant-col-xs-pull-21.ant-col-rtl {\n right: auto;\n left: 87.5%;\n}\n.ant-col-xs-offset-21.ant-col-rtl {\n margin-right: 87.5%;\n margin-left: 0;\n}\n.ant-col-xs-push-22.ant-col-rtl {\n right: 91.66666667%;\n left: auto;\n}\n.ant-col-xs-pull-22.ant-col-rtl {\n right: auto;\n left: 91.66666667%;\n}\n.ant-col-xs-offset-22.ant-col-rtl {\n margin-right: 91.66666667%;\n margin-left: 0;\n}\n.ant-col-xs-push-23.ant-col-rtl {\n right: 95.83333333%;\n left: auto;\n}\n.ant-col-xs-pull-23.ant-col-rtl {\n right: auto;\n left: 95.83333333%;\n}\n.ant-col-xs-offset-23.ant-col-rtl {\n margin-right: 95.83333333%;\n margin-left: 0;\n}\n.ant-col-xs-push-24.ant-col-rtl {\n right: 100%;\n left: auto;\n}\n.ant-col-xs-pull-24.ant-col-rtl {\n right: auto;\n left: 100%;\n}\n.ant-col-xs-offset-24.ant-col-rtl {\n margin-right: 100%;\n margin-left: 0;\n}\n@media (min-width: 576px) {\n .ant-col-sm-24 {\n display: block;\n flex: 0 0 100%;\n max-width: 100%;\n }\n .ant-col-sm-push-24 {\n left: 100%;\n }\n .ant-col-sm-pull-24 {\n right: 100%;\n }\n .ant-col-sm-offset-24 {\n margin-left: 100%;\n }\n .ant-col-sm-order-24 {\n order: 24;\n }\n .ant-col-sm-23 {\n display: block;\n flex: 0 0 95.83333333%;\n max-width: 95.83333333%;\n }\n .ant-col-sm-push-23 {\n left: 95.83333333%;\n }\n .ant-col-sm-pull-23 {\n right: 95.83333333%;\n }\n .ant-col-sm-offset-23 {\n margin-left: 95.83333333%;\n }\n .ant-col-sm-order-23 {\n order: 23;\n }\n .ant-col-sm-22 {\n display: block;\n flex: 0 0 91.66666667%;\n max-width: 91.66666667%;\n }\n .ant-col-sm-push-22 {\n left: 91.66666667%;\n }\n .ant-col-sm-pull-22 {\n right: 91.66666667%;\n }\n .ant-col-sm-offset-22 {\n margin-left: 91.66666667%;\n }\n .ant-col-sm-order-22 {\n order: 22;\n }\n .ant-col-sm-21 {\n display: block;\n flex: 0 0 87.5%;\n max-width: 87.5%;\n }\n .ant-col-sm-push-21 {\n left: 87.5%;\n }\n .ant-col-sm-pull-21 {\n right: 87.5%;\n }\n .ant-col-sm-offset-21 {\n margin-left: 87.5%;\n }\n .ant-col-sm-order-21 {\n order: 21;\n }\n .ant-col-sm-20 {\n display: block;\n flex: 0 0 83.33333333%;\n max-width: 83.33333333%;\n }\n .ant-col-sm-push-20 {\n left: 83.33333333%;\n }\n .ant-col-sm-pull-20 {\n right: 83.33333333%;\n }\n .ant-col-sm-offset-20 {\n margin-left: 83.33333333%;\n }\n .ant-col-sm-order-20 {\n order: 20;\n }\n .ant-col-sm-19 {\n display: block;\n flex: 0 0 79.16666667%;\n max-width: 79.16666667%;\n }\n .ant-col-sm-push-19 {\n left: 79.16666667%;\n }\n .ant-col-sm-pull-19 {\n right: 79.16666667%;\n }\n .ant-col-sm-offset-19 {\n margin-left: 79.16666667%;\n }\n .ant-col-sm-order-19 {\n order: 19;\n }\n .ant-col-sm-18 {\n display: block;\n flex: 0 0 75%;\n max-width: 75%;\n }\n .ant-col-sm-push-18 {\n left: 75%;\n }\n .ant-col-sm-pull-18 {\n right: 75%;\n }\n .ant-col-sm-offset-18 {\n margin-left: 75%;\n }\n .ant-col-sm-order-18 {\n order: 18;\n }\n .ant-col-sm-17 {\n display: block;\n flex: 0 0 70.83333333%;\n max-width: 70.83333333%;\n }\n .ant-col-sm-push-17 {\n left: 70.83333333%;\n }\n .ant-col-sm-pull-17 {\n right: 70.83333333%;\n }\n .ant-col-sm-offset-17 {\n margin-left: 70.83333333%;\n }\n .ant-col-sm-order-17 {\n order: 17;\n }\n .ant-col-sm-16 {\n display: block;\n flex: 0 0 66.66666667%;\n max-width: 66.66666667%;\n }\n .ant-col-sm-push-16 {\n left: 66.66666667%;\n }\n .ant-col-sm-pull-16 {\n right: 66.66666667%;\n }\n .ant-col-sm-offset-16 {\n margin-left: 66.66666667%;\n }\n .ant-col-sm-order-16 {\n order: 16;\n }\n .ant-col-sm-15 {\n display: block;\n flex: 0 0 62.5%;\n max-width: 62.5%;\n }\n .ant-col-sm-push-15 {\n left: 62.5%;\n }\n .ant-col-sm-pull-15 {\n right: 62.5%;\n }\n .ant-col-sm-offset-15 {\n margin-left: 62.5%;\n }\n .ant-col-sm-order-15 {\n order: 15;\n }\n .ant-col-sm-14 {\n display: block;\n flex: 0 0 58.33333333%;\n max-width: 58.33333333%;\n }\n .ant-col-sm-push-14 {\n left: 58.33333333%;\n }\n .ant-col-sm-pull-14 {\n right: 58.33333333%;\n }\n .ant-col-sm-offset-14 {\n margin-left: 58.33333333%;\n }\n .ant-col-sm-order-14 {\n order: 14;\n }\n .ant-col-sm-13 {\n display: block;\n flex: 0 0 54.16666667%;\n max-width: 54.16666667%;\n }\n .ant-col-sm-push-13 {\n left: 54.16666667%;\n }\n .ant-col-sm-pull-13 {\n right: 54.16666667%;\n }\n .ant-col-sm-offset-13 {\n margin-left: 54.16666667%;\n }\n .ant-col-sm-order-13 {\n order: 13;\n }\n .ant-col-sm-12 {\n display: block;\n flex: 0 0 50%;\n max-width: 50%;\n }\n .ant-col-sm-push-12 {\n left: 50%;\n }\n .ant-col-sm-pull-12 {\n right: 50%;\n }\n .ant-col-sm-offset-12 {\n margin-left: 50%;\n }\n .ant-col-sm-order-12 {\n order: 12;\n }\n .ant-col-sm-11 {\n display: block;\n flex: 0 0 45.83333333%;\n max-width: 45.83333333%;\n }\n .ant-col-sm-push-11 {\n left: 45.83333333%;\n }\n .ant-col-sm-pull-11 {\n right: 45.83333333%;\n }\n .ant-col-sm-offset-11 {\n margin-left: 45.83333333%;\n }\n .ant-col-sm-order-11 {\n order: 11;\n }\n .ant-col-sm-10 {\n display: block;\n flex: 0 0 41.66666667%;\n max-width: 41.66666667%;\n }\n .ant-col-sm-push-10 {\n left: 41.66666667%;\n }\n .ant-col-sm-pull-10 {\n right: 41.66666667%;\n }\n .ant-col-sm-offset-10 {\n margin-left: 41.66666667%;\n }\n .ant-col-sm-order-10 {\n order: 10;\n }\n .ant-col-sm-9 {\n display: block;\n flex: 0 0 37.5%;\n max-width: 37.5%;\n }\n .ant-col-sm-push-9 {\n left: 37.5%;\n }\n .ant-col-sm-pull-9 {\n right: 37.5%;\n }\n .ant-col-sm-offset-9 {\n margin-left: 37.5%;\n }\n .ant-col-sm-order-9 {\n order: 9;\n }\n .ant-col-sm-8 {\n display: block;\n flex: 0 0 33.33333333%;\n max-width: 33.33333333%;\n }\n .ant-col-sm-push-8 {\n left: 33.33333333%;\n }\n .ant-col-sm-pull-8 {\n right: 33.33333333%;\n }\n .ant-col-sm-offset-8 {\n margin-left: 33.33333333%;\n }\n .ant-col-sm-order-8 {\n order: 8;\n }\n .ant-col-sm-7 {\n display: block;\n flex: 0 0 29.16666667%;\n max-width: 29.16666667%;\n }\n .ant-col-sm-push-7 {\n left: 29.16666667%;\n }\n .ant-col-sm-pull-7 {\n right: 29.16666667%;\n }\n .ant-col-sm-offset-7 {\n margin-left: 29.16666667%;\n }\n .ant-col-sm-order-7 {\n order: 7;\n }\n .ant-col-sm-6 {\n display: block;\n flex: 0 0 25%;\n max-width: 25%;\n }\n .ant-col-sm-push-6 {\n left: 25%;\n }\n .ant-col-sm-pull-6 {\n right: 25%;\n }\n .ant-col-sm-offset-6 {\n margin-left: 25%;\n }\n .ant-col-sm-order-6 {\n order: 6;\n }\n .ant-col-sm-5 {\n display: block;\n flex: 0 0 20.83333333%;\n max-width: 20.83333333%;\n }\n .ant-col-sm-push-5 {\n left: 20.83333333%;\n }\n .ant-col-sm-pull-5 {\n right: 20.83333333%;\n }\n .ant-col-sm-offset-5 {\n margin-left: 20.83333333%;\n }\n .ant-col-sm-order-5 {\n order: 5;\n }\n .ant-col-sm-4 {\n display: block;\n flex: 0 0 16.66666667%;\n max-width: 16.66666667%;\n }\n .ant-col-sm-push-4 {\n left: 16.66666667%;\n }\n .ant-col-sm-pull-4 {\n right: 16.66666667%;\n }\n .ant-col-sm-offset-4 {\n margin-left: 16.66666667%;\n }\n .ant-col-sm-order-4 {\n order: 4;\n }\n .ant-col-sm-3 {\n display: block;\n flex: 0 0 12.5%;\n max-width: 12.5%;\n }\n .ant-col-sm-push-3 {\n left: 12.5%;\n }\n .ant-col-sm-pull-3 {\n right: 12.5%;\n }\n .ant-col-sm-offset-3 {\n margin-left: 12.5%;\n }\n .ant-col-sm-order-3 {\n order: 3;\n }\n .ant-col-sm-2 {\n display: block;\n flex: 0 0 8.33333333%;\n max-width: 8.33333333%;\n }\n .ant-col-sm-push-2 {\n left: 8.33333333%;\n }\n .ant-col-sm-pull-2 {\n right: 8.33333333%;\n }\n .ant-col-sm-offset-2 {\n margin-left: 8.33333333%;\n }\n .ant-col-sm-order-2 {\n order: 2;\n }\n .ant-col-sm-1 {\n display: block;\n flex: 0 0 4.16666667%;\n max-width: 4.16666667%;\n }\n .ant-col-sm-push-1 {\n left: 4.16666667%;\n }\n .ant-col-sm-pull-1 {\n right: 4.16666667%;\n }\n .ant-col-sm-offset-1 {\n margin-left: 4.16666667%;\n }\n .ant-col-sm-order-1 {\n order: 1;\n }\n .ant-col-sm-0 {\n display: none;\n }\n .ant-col-push-0 {\n left: auto;\n }\n .ant-col-pull-0 {\n right: auto;\n }\n .ant-col-sm-push-0 {\n left: auto;\n }\n .ant-col-sm-pull-0 {\n right: auto;\n }\n .ant-col-sm-offset-0 {\n margin-left: 0;\n }\n .ant-col-sm-order-0 {\n order: 0;\n }\n .ant-col-push-0.ant-col-rtl {\n right: auto;\n }\n .ant-col-pull-0.ant-col-rtl {\n left: auto;\n }\n .ant-col-sm-push-0.ant-col-rtl {\n right: auto;\n }\n .ant-col-sm-pull-0.ant-col-rtl {\n left: auto;\n }\n .ant-col-sm-offset-0.ant-col-rtl {\n margin-right: 0;\n }\n .ant-col-sm-push-1.ant-col-rtl {\n right: 4.16666667%;\n left: auto;\n }\n .ant-col-sm-pull-1.ant-col-rtl {\n right: auto;\n left: 4.16666667%;\n }\n .ant-col-sm-offset-1.ant-col-rtl {\n margin-right: 4.16666667%;\n margin-left: 0;\n }\n .ant-col-sm-push-2.ant-col-rtl {\n right: 8.33333333%;\n left: auto;\n }\n .ant-col-sm-pull-2.ant-col-rtl {\n right: auto;\n left: 8.33333333%;\n }\n .ant-col-sm-offset-2.ant-col-rtl {\n margin-right: 8.33333333%;\n margin-left: 0;\n }\n .ant-col-sm-push-3.ant-col-rtl {\n right: 12.5%;\n left: auto;\n }\n .ant-col-sm-pull-3.ant-col-rtl {\n right: auto;\n left: 12.5%;\n }\n .ant-col-sm-offset-3.ant-col-rtl {\n margin-right: 12.5%;\n margin-left: 0;\n }\n .ant-col-sm-push-4.ant-col-rtl {\n right: 16.66666667%;\n left: auto;\n }\n .ant-col-sm-pull-4.ant-col-rtl {\n right: auto;\n left: 16.66666667%;\n }\n .ant-col-sm-offset-4.ant-col-rtl {\n margin-right: 16.66666667%;\n margin-left: 0;\n }\n .ant-col-sm-push-5.ant-col-rtl {\n right: 20.83333333%;\n left: auto;\n }\n .ant-col-sm-pull-5.ant-col-rtl {\n right: auto;\n left: 20.83333333%;\n }\n .ant-col-sm-offset-5.ant-col-rtl {\n margin-right: 20.83333333%;\n margin-left: 0;\n }\n .ant-col-sm-push-6.ant-col-rtl {\n right: 25%;\n left: auto;\n }\n .ant-col-sm-pull-6.ant-col-rtl {\n right: auto;\n left: 25%;\n }\n .ant-col-sm-offset-6.ant-col-rtl {\n margin-right: 25%;\n margin-left: 0;\n }\n .ant-col-sm-push-7.ant-col-rtl {\n right: 29.16666667%;\n left: auto;\n }\n .ant-col-sm-pull-7.ant-col-rtl {\n right: auto;\n left: 29.16666667%;\n }\n .ant-col-sm-offset-7.ant-col-rtl {\n margin-right: 29.16666667%;\n margin-left: 0;\n }\n .ant-col-sm-push-8.ant-col-rtl {\n right: 33.33333333%;\n left: auto;\n }\n .ant-col-sm-pull-8.ant-col-rtl {\n right: auto;\n left: 33.33333333%;\n }\n .ant-col-sm-offset-8.ant-col-rtl {\n margin-right: 33.33333333%;\n margin-left: 0;\n }\n .ant-col-sm-push-9.ant-col-rtl {\n right: 37.5%;\n left: auto;\n }\n .ant-col-sm-pull-9.ant-col-rtl {\n right: auto;\n left: 37.5%;\n }\n .ant-col-sm-offset-9.ant-col-rtl {\n margin-right: 37.5%;\n margin-left: 0;\n }\n .ant-col-sm-push-10.ant-col-rtl {\n right: 41.66666667%;\n left: auto;\n }\n .ant-col-sm-pull-10.ant-col-rtl {\n right: auto;\n left: 41.66666667%;\n }\n .ant-col-sm-offset-10.ant-col-rtl {\n margin-right: 41.66666667%;\n margin-left: 0;\n }\n .ant-col-sm-push-11.ant-col-rtl {\n right: 45.83333333%;\n left: auto;\n }\n .ant-col-sm-pull-11.ant-col-rtl {\n right: auto;\n left: 45.83333333%;\n }\n .ant-col-sm-offset-11.ant-col-rtl {\n margin-right: 45.83333333%;\n margin-left: 0;\n }\n .ant-col-sm-push-12.ant-col-rtl {\n right: 50%;\n left: auto;\n }\n .ant-col-sm-pull-12.ant-col-rtl {\n right: auto;\n left: 50%;\n }\n .ant-col-sm-offset-12.ant-col-rtl {\n margin-right: 50%;\n margin-left: 0;\n }\n .ant-col-sm-push-13.ant-col-rtl {\n right: 54.16666667%;\n left: auto;\n }\n .ant-col-sm-pull-13.ant-col-rtl {\n right: auto;\n left: 54.16666667%;\n }\n .ant-col-sm-offset-13.ant-col-rtl {\n margin-right: 54.16666667%;\n margin-left: 0;\n }\n .ant-col-sm-push-14.ant-col-rtl {\n right: 58.33333333%;\n left: auto;\n }\n .ant-col-sm-pull-14.ant-col-rtl {\n right: auto;\n left: 58.33333333%;\n }\n .ant-col-sm-offset-14.ant-col-rtl {\n margin-right: 58.33333333%;\n margin-left: 0;\n }\n .ant-col-sm-push-15.ant-col-rtl {\n right: 62.5%;\n left: auto;\n }\n .ant-col-sm-pull-15.ant-col-rtl {\n right: auto;\n left: 62.5%;\n }\n .ant-col-sm-offset-15.ant-col-rtl {\n margin-right: 62.5%;\n margin-left: 0;\n }\n .ant-col-sm-push-16.ant-col-rtl {\n right: 66.66666667%;\n left: auto;\n }\n .ant-col-sm-pull-16.ant-col-rtl {\n right: auto;\n left: 66.66666667%;\n }\n .ant-col-sm-offset-16.ant-col-rtl {\n margin-right: 66.66666667%;\n margin-left: 0;\n }\n .ant-col-sm-push-17.ant-col-rtl {\n right: 70.83333333%;\n left: auto;\n }\n .ant-col-sm-pull-17.ant-col-rtl {\n right: auto;\n left: 70.83333333%;\n }\n .ant-col-sm-offset-17.ant-col-rtl {\n margin-right: 70.83333333%;\n margin-left: 0;\n }\n .ant-col-sm-push-18.ant-col-rtl {\n right: 75%;\n left: auto;\n }\n .ant-col-sm-pull-18.ant-col-rtl {\n right: auto;\n left: 75%;\n }\n .ant-col-sm-offset-18.ant-col-rtl {\n margin-right: 75%;\n margin-left: 0;\n }\n .ant-col-sm-push-19.ant-col-rtl {\n right: 79.16666667%;\n left: auto;\n }\n .ant-col-sm-pull-19.ant-col-rtl {\n right: auto;\n left: 79.16666667%;\n }\n .ant-col-sm-offset-19.ant-col-rtl {\n margin-right: 79.16666667%;\n margin-left: 0;\n }\n .ant-col-sm-push-20.ant-col-rtl {\n right: 83.33333333%;\n left: auto;\n }\n .ant-col-sm-pull-20.ant-col-rtl {\n right: auto;\n left: 83.33333333%;\n }\n .ant-col-sm-offset-20.ant-col-rtl {\n margin-right: 83.33333333%;\n margin-left: 0;\n }\n .ant-col-sm-push-21.ant-col-rtl {\n right: 87.5%;\n left: auto;\n }\n .ant-col-sm-pull-21.ant-col-rtl {\n right: auto;\n left: 87.5%;\n }\n .ant-col-sm-offset-21.ant-col-rtl {\n margin-right: 87.5%;\n margin-left: 0;\n }\n .ant-col-sm-push-22.ant-col-rtl {\n right: 91.66666667%;\n left: auto;\n }\n .ant-col-sm-pull-22.ant-col-rtl {\n right: auto;\n left: 91.66666667%;\n }\n .ant-col-sm-offset-22.ant-col-rtl {\n margin-right: 91.66666667%;\n margin-left: 0;\n }\n .ant-col-sm-push-23.ant-col-rtl {\n right: 95.83333333%;\n left: auto;\n }\n .ant-col-sm-pull-23.ant-col-rtl {\n right: auto;\n left: 95.83333333%;\n }\n .ant-col-sm-offset-23.ant-col-rtl {\n margin-right: 95.83333333%;\n margin-left: 0;\n }\n .ant-col-sm-push-24.ant-col-rtl {\n right: 100%;\n left: auto;\n }\n .ant-col-sm-pull-24.ant-col-rtl {\n right: auto;\n left: 100%;\n }\n .ant-col-sm-offset-24.ant-col-rtl {\n margin-right: 100%;\n margin-left: 0;\n }\n}\n@media (min-width: 768px) {\n .ant-col-md-24 {\n display: block;\n flex: 0 0 100%;\n max-width: 100%;\n }\n .ant-col-md-push-24 {\n left: 100%;\n }\n .ant-col-md-pull-24 {\n right: 100%;\n }\n .ant-col-md-offset-24 {\n margin-left: 100%;\n }\n .ant-col-md-order-24 {\n order: 24;\n }\n .ant-col-md-23 {\n display: block;\n flex: 0 0 95.83333333%;\n max-width: 95.83333333%;\n }\n .ant-col-md-push-23 {\n left: 95.83333333%;\n }\n .ant-col-md-pull-23 {\n right: 95.83333333%;\n }\n .ant-col-md-offset-23 {\n margin-left: 95.83333333%;\n }\n .ant-col-md-order-23 {\n order: 23;\n }\n .ant-col-md-22 {\n display: block;\n flex: 0 0 91.66666667%;\n max-width: 91.66666667%;\n }\n .ant-col-md-push-22 {\n left: 91.66666667%;\n }\n .ant-col-md-pull-22 {\n right: 91.66666667%;\n }\n .ant-col-md-offset-22 {\n margin-left: 91.66666667%;\n }\n .ant-col-md-order-22 {\n order: 22;\n }\n .ant-col-md-21 {\n display: block;\n flex: 0 0 87.5%;\n max-width: 87.5%;\n }\n .ant-col-md-push-21 {\n left: 87.5%;\n }\n .ant-col-md-pull-21 {\n right: 87.5%;\n }\n .ant-col-md-offset-21 {\n margin-left: 87.5%;\n }\n .ant-col-md-order-21 {\n order: 21;\n }\n .ant-col-md-20 {\n display: block;\n flex: 0 0 83.33333333%;\n max-width: 83.33333333%;\n }\n .ant-col-md-push-20 {\n left: 83.33333333%;\n }\n .ant-col-md-pull-20 {\n right: 83.33333333%;\n }\n .ant-col-md-offset-20 {\n margin-left: 83.33333333%;\n }\n .ant-col-md-order-20 {\n order: 20;\n }\n .ant-col-md-19 {\n display: block;\n flex: 0 0 79.16666667%;\n max-width: 79.16666667%;\n }\n .ant-col-md-push-19 {\n left: 79.16666667%;\n }\n .ant-col-md-pull-19 {\n right: 79.16666667%;\n }\n .ant-col-md-offset-19 {\n margin-left: 79.16666667%;\n }\n .ant-col-md-order-19 {\n order: 19;\n }\n .ant-col-md-18 {\n display: block;\n flex: 0 0 75%;\n max-width: 75%;\n }\n .ant-col-md-push-18 {\n left: 75%;\n }\n .ant-col-md-pull-18 {\n right: 75%;\n }\n .ant-col-md-offset-18 {\n margin-left: 75%;\n }\n .ant-col-md-order-18 {\n order: 18;\n }\n .ant-col-md-17 {\n display: block;\n flex: 0 0 70.83333333%;\n max-width: 70.83333333%;\n }\n .ant-col-md-push-17 {\n left: 70.83333333%;\n }\n .ant-col-md-pull-17 {\n right: 70.83333333%;\n }\n .ant-col-md-offset-17 {\n margin-left: 70.83333333%;\n }\n .ant-col-md-order-17 {\n order: 17;\n }\n .ant-col-md-16 {\n display: block;\n flex: 0 0 66.66666667%;\n max-width: 66.66666667%;\n }\n .ant-col-md-push-16 {\n left: 66.66666667%;\n }\n .ant-col-md-pull-16 {\n right: 66.66666667%;\n }\n .ant-col-md-offset-16 {\n margin-left: 66.66666667%;\n }\n .ant-col-md-order-16 {\n order: 16;\n }\n .ant-col-md-15 {\n display: block;\n flex: 0 0 62.5%;\n max-width: 62.5%;\n }\n .ant-col-md-push-15 {\n left: 62.5%;\n }\n .ant-col-md-pull-15 {\n right: 62.5%;\n }\n .ant-col-md-offset-15 {\n margin-left: 62.5%;\n }\n .ant-col-md-order-15 {\n order: 15;\n }\n .ant-col-md-14 {\n display: block;\n flex: 0 0 58.33333333%;\n max-width: 58.33333333%;\n }\n .ant-col-md-push-14 {\n left: 58.33333333%;\n }\n .ant-col-md-pull-14 {\n right: 58.33333333%;\n }\n .ant-col-md-offset-14 {\n margin-left: 58.33333333%;\n }\n .ant-col-md-order-14 {\n order: 14;\n }\n .ant-col-md-13 {\n display: block;\n flex: 0 0 54.16666667%;\n max-width: 54.16666667%;\n }\n .ant-col-md-push-13 {\n left: 54.16666667%;\n }\n .ant-col-md-pull-13 {\n right: 54.16666667%;\n }\n .ant-col-md-offset-13 {\n margin-left: 54.16666667%;\n }\n .ant-col-md-order-13 {\n order: 13;\n }\n .ant-col-md-12 {\n display: block;\n flex: 0 0 50%;\n max-width: 50%;\n }\n .ant-col-md-push-12 {\n left: 50%;\n }\n .ant-col-md-pull-12 {\n right: 50%;\n }\n .ant-col-md-offset-12 {\n margin-left: 50%;\n }\n .ant-col-md-order-12 {\n order: 12;\n }\n .ant-col-md-11 {\n display: block;\n flex: 0 0 45.83333333%;\n max-width: 45.83333333%;\n }\n .ant-col-md-push-11 {\n left: 45.83333333%;\n }\n .ant-col-md-pull-11 {\n right: 45.83333333%;\n }\n .ant-col-md-offset-11 {\n margin-left: 45.83333333%;\n }\n .ant-col-md-order-11 {\n order: 11;\n }\n .ant-col-md-10 {\n display: block;\n flex: 0 0 41.66666667%;\n max-width: 41.66666667%;\n }\n .ant-col-md-push-10 {\n left: 41.66666667%;\n }\n .ant-col-md-pull-10 {\n right: 41.66666667%;\n }\n .ant-col-md-offset-10 {\n margin-left: 41.66666667%;\n }\n .ant-col-md-order-10 {\n order: 10;\n }\n .ant-col-md-9 {\n display: block;\n flex: 0 0 37.5%;\n max-width: 37.5%;\n }\n .ant-col-md-push-9 {\n left: 37.5%;\n }\n .ant-col-md-pull-9 {\n right: 37.5%;\n }\n .ant-col-md-offset-9 {\n margin-left: 37.5%;\n }\n .ant-col-md-order-9 {\n order: 9;\n }\n .ant-col-md-8 {\n display: block;\n flex: 0 0 33.33333333%;\n max-width: 33.33333333%;\n }\n .ant-col-md-push-8 {\n left: 33.33333333%;\n }\n .ant-col-md-pull-8 {\n right: 33.33333333%;\n }\n .ant-col-md-offset-8 {\n margin-left: 33.33333333%;\n }\n .ant-col-md-order-8 {\n order: 8;\n }\n .ant-col-md-7 {\n display: block;\n flex: 0 0 29.16666667%;\n max-width: 29.16666667%;\n }\n .ant-col-md-push-7 {\n left: 29.16666667%;\n }\n .ant-col-md-pull-7 {\n right: 29.16666667%;\n }\n .ant-col-md-offset-7 {\n margin-left: 29.16666667%;\n }\n .ant-col-md-order-7 {\n order: 7;\n }\n .ant-col-md-6 {\n display: block;\n flex: 0 0 25%;\n max-width: 25%;\n }\n .ant-col-md-push-6 {\n left: 25%;\n }\n .ant-col-md-pull-6 {\n right: 25%;\n }\n .ant-col-md-offset-6 {\n margin-left: 25%;\n }\n .ant-col-md-order-6 {\n order: 6;\n }\n .ant-col-md-5 {\n display: block;\n flex: 0 0 20.83333333%;\n max-width: 20.83333333%;\n }\n .ant-col-md-push-5 {\n left: 20.83333333%;\n }\n .ant-col-md-pull-5 {\n right: 20.83333333%;\n }\n .ant-col-md-offset-5 {\n margin-left: 20.83333333%;\n }\n .ant-col-md-order-5 {\n order: 5;\n }\n .ant-col-md-4 {\n display: block;\n flex: 0 0 16.66666667%;\n max-width: 16.66666667%;\n }\n .ant-col-md-push-4 {\n left: 16.66666667%;\n }\n .ant-col-md-pull-4 {\n right: 16.66666667%;\n }\n .ant-col-md-offset-4 {\n margin-left: 16.66666667%;\n }\n .ant-col-md-order-4 {\n order: 4;\n }\n .ant-col-md-3 {\n display: block;\n flex: 0 0 12.5%;\n max-width: 12.5%;\n }\n .ant-col-md-push-3 {\n left: 12.5%;\n }\n .ant-col-md-pull-3 {\n right: 12.5%;\n }\n .ant-col-md-offset-3 {\n margin-left: 12.5%;\n }\n .ant-col-md-order-3 {\n order: 3;\n }\n .ant-col-md-2 {\n display: block;\n flex: 0 0 8.33333333%;\n max-width: 8.33333333%;\n }\n .ant-col-md-push-2 {\n left: 8.33333333%;\n }\n .ant-col-md-pull-2 {\n right: 8.33333333%;\n }\n .ant-col-md-offset-2 {\n margin-left: 8.33333333%;\n }\n .ant-col-md-order-2 {\n order: 2;\n }\n .ant-col-md-1 {\n display: block;\n flex: 0 0 4.16666667%;\n max-width: 4.16666667%;\n }\n .ant-col-md-push-1 {\n left: 4.16666667%;\n }\n .ant-col-md-pull-1 {\n right: 4.16666667%;\n }\n .ant-col-md-offset-1 {\n margin-left: 4.16666667%;\n }\n .ant-col-md-order-1 {\n order: 1;\n }\n .ant-col-md-0 {\n display: none;\n }\n .ant-col-push-0 {\n left: auto;\n }\n .ant-col-pull-0 {\n right: auto;\n }\n .ant-col-md-push-0 {\n left: auto;\n }\n .ant-col-md-pull-0 {\n right: auto;\n }\n .ant-col-md-offset-0 {\n margin-left: 0;\n }\n .ant-col-md-order-0 {\n order: 0;\n }\n .ant-col-push-0.ant-col-rtl {\n right: auto;\n }\n .ant-col-pull-0.ant-col-rtl {\n left: auto;\n }\n .ant-col-md-push-0.ant-col-rtl {\n right: auto;\n }\n .ant-col-md-pull-0.ant-col-rtl {\n left: auto;\n }\n .ant-col-md-offset-0.ant-col-rtl {\n margin-right: 0;\n }\n .ant-col-md-push-1.ant-col-rtl {\n right: 4.16666667%;\n left: auto;\n }\n .ant-col-md-pull-1.ant-col-rtl {\n right: auto;\n left: 4.16666667%;\n }\n .ant-col-md-offset-1.ant-col-rtl {\n margin-right: 4.16666667%;\n margin-left: 0;\n }\n .ant-col-md-push-2.ant-col-rtl {\n right: 8.33333333%;\n left: auto;\n }\n .ant-col-md-pull-2.ant-col-rtl {\n right: auto;\n left: 8.33333333%;\n }\n .ant-col-md-offset-2.ant-col-rtl {\n margin-right: 8.33333333%;\n margin-left: 0;\n }\n .ant-col-md-push-3.ant-col-rtl {\n right: 12.5%;\n left: auto;\n }\n .ant-col-md-pull-3.ant-col-rtl {\n right: auto;\n left: 12.5%;\n }\n .ant-col-md-offset-3.ant-col-rtl {\n margin-right: 12.5%;\n margin-left: 0;\n }\n .ant-col-md-push-4.ant-col-rtl {\n right: 16.66666667%;\n left: auto;\n }\n .ant-col-md-pull-4.ant-col-rtl {\n right: auto;\n left: 16.66666667%;\n }\n .ant-col-md-offset-4.ant-col-rtl {\n margin-right: 16.66666667%;\n margin-left: 0;\n }\n .ant-col-md-push-5.ant-col-rtl {\n right: 20.83333333%;\n left: auto;\n }\n .ant-col-md-pull-5.ant-col-rtl {\n right: auto;\n left: 20.83333333%;\n }\n .ant-col-md-offset-5.ant-col-rtl {\n margin-right: 20.83333333%;\n margin-left: 0;\n }\n .ant-col-md-push-6.ant-col-rtl {\n right: 25%;\n left: auto;\n }\n .ant-col-md-pull-6.ant-col-rtl {\n right: auto;\n left: 25%;\n }\n .ant-col-md-offset-6.ant-col-rtl {\n margin-right: 25%;\n margin-left: 0;\n }\n .ant-col-md-push-7.ant-col-rtl {\n right: 29.16666667%;\n left: auto;\n }\n .ant-col-md-pull-7.ant-col-rtl {\n right: auto;\n left: 29.16666667%;\n }\n .ant-col-md-offset-7.ant-col-rtl {\n margin-right: 29.16666667%;\n margin-left: 0;\n }\n .ant-col-md-push-8.ant-col-rtl {\n right: 33.33333333%;\n left: auto;\n }\n .ant-col-md-pull-8.ant-col-rtl {\n right: auto;\n left: 33.33333333%;\n }\n .ant-col-md-offset-8.ant-col-rtl {\n margin-right: 33.33333333%;\n margin-left: 0;\n }\n .ant-col-md-push-9.ant-col-rtl {\n right: 37.5%;\n left: auto;\n }\n .ant-col-md-pull-9.ant-col-rtl {\n right: auto;\n left: 37.5%;\n }\n .ant-col-md-offset-9.ant-col-rtl {\n margin-right: 37.5%;\n margin-left: 0;\n }\n .ant-col-md-push-10.ant-col-rtl {\n right: 41.66666667%;\n left: auto;\n }\n .ant-col-md-pull-10.ant-col-rtl {\n right: auto;\n left: 41.66666667%;\n }\n .ant-col-md-offset-10.ant-col-rtl {\n margin-right: 41.66666667%;\n margin-left: 0;\n }\n .ant-col-md-push-11.ant-col-rtl {\n right: 45.83333333%;\n left: auto;\n }\n .ant-col-md-pull-11.ant-col-rtl {\n right: auto;\n left: 45.83333333%;\n }\n .ant-col-md-offset-11.ant-col-rtl {\n margin-right: 45.83333333%;\n margin-left: 0;\n }\n .ant-col-md-push-12.ant-col-rtl {\n right: 50%;\n left: auto;\n }\n .ant-col-md-pull-12.ant-col-rtl {\n right: auto;\n left: 50%;\n }\n .ant-col-md-offset-12.ant-col-rtl {\n margin-right: 50%;\n margin-left: 0;\n }\n .ant-col-md-push-13.ant-col-rtl {\n right: 54.16666667%;\n left: auto;\n }\n .ant-col-md-pull-13.ant-col-rtl {\n right: auto;\n left: 54.16666667%;\n }\n .ant-col-md-offset-13.ant-col-rtl {\n margin-right: 54.16666667%;\n margin-left: 0;\n }\n .ant-col-md-push-14.ant-col-rtl {\n right: 58.33333333%;\n left: auto;\n }\n .ant-col-md-pull-14.ant-col-rtl {\n right: auto;\n left: 58.33333333%;\n }\n .ant-col-md-offset-14.ant-col-rtl {\n margin-right: 58.33333333%;\n margin-left: 0;\n }\n .ant-col-md-push-15.ant-col-rtl {\n right: 62.5%;\n left: auto;\n }\n .ant-col-md-pull-15.ant-col-rtl {\n right: auto;\n left: 62.5%;\n }\n .ant-col-md-offset-15.ant-col-rtl {\n margin-right: 62.5%;\n margin-left: 0;\n }\n .ant-col-md-push-16.ant-col-rtl {\n right: 66.66666667%;\n left: auto;\n }\n .ant-col-md-pull-16.ant-col-rtl {\n right: auto;\n left: 66.66666667%;\n }\n .ant-col-md-offset-16.ant-col-rtl {\n margin-right: 66.66666667%;\n margin-left: 0;\n }\n .ant-col-md-push-17.ant-col-rtl {\n right: 70.83333333%;\n left: auto;\n }\n .ant-col-md-pull-17.ant-col-rtl {\n right: auto;\n left: 70.83333333%;\n }\n .ant-col-md-offset-17.ant-col-rtl {\n margin-right: 70.83333333%;\n margin-left: 0;\n }\n .ant-col-md-push-18.ant-col-rtl {\n right: 75%;\n left: auto;\n }\n .ant-col-md-pull-18.ant-col-rtl {\n right: auto;\n left: 75%;\n }\n .ant-col-md-offset-18.ant-col-rtl {\n margin-right: 75%;\n margin-left: 0;\n }\n .ant-col-md-push-19.ant-col-rtl {\n right: 79.16666667%;\n left: auto;\n }\n .ant-col-md-pull-19.ant-col-rtl {\n right: auto;\n left: 79.16666667%;\n }\n .ant-col-md-offset-19.ant-col-rtl {\n margin-right: 79.16666667%;\n margin-left: 0;\n }\n .ant-col-md-push-20.ant-col-rtl {\n right: 83.33333333%;\n left: auto;\n }\n .ant-col-md-pull-20.ant-col-rtl {\n right: auto;\n left: 83.33333333%;\n }\n .ant-col-md-offset-20.ant-col-rtl {\n margin-right: 83.33333333%;\n margin-left: 0;\n }\n .ant-col-md-push-21.ant-col-rtl {\n right: 87.5%;\n left: auto;\n }\n .ant-col-md-pull-21.ant-col-rtl {\n right: auto;\n left: 87.5%;\n }\n .ant-col-md-offset-21.ant-col-rtl {\n margin-right: 87.5%;\n margin-left: 0;\n }\n .ant-col-md-push-22.ant-col-rtl {\n right: 91.66666667%;\n left: auto;\n }\n .ant-col-md-pull-22.ant-col-rtl {\n right: auto;\n left: 91.66666667%;\n }\n .ant-col-md-offset-22.ant-col-rtl {\n margin-right: 91.66666667%;\n margin-left: 0;\n }\n .ant-col-md-push-23.ant-col-rtl {\n right: 95.83333333%;\n left: auto;\n }\n .ant-col-md-pull-23.ant-col-rtl {\n right: auto;\n left: 95.83333333%;\n }\n .ant-col-md-offset-23.ant-col-rtl {\n margin-right: 95.83333333%;\n margin-left: 0;\n }\n .ant-col-md-push-24.ant-col-rtl {\n right: 100%;\n left: auto;\n }\n .ant-col-md-pull-24.ant-col-rtl {\n right: auto;\n left: 100%;\n }\n .ant-col-md-offset-24.ant-col-rtl {\n margin-right: 100%;\n margin-left: 0;\n }\n}\n@media (min-width: 992px) {\n .ant-col-lg-24 {\n display: block;\n flex: 0 0 100%;\n max-width: 100%;\n }\n .ant-col-lg-push-24 {\n left: 100%;\n }\n .ant-col-lg-pull-24 {\n right: 100%;\n }\n .ant-col-lg-offset-24 {\n margin-left: 100%;\n }\n .ant-col-lg-order-24 {\n order: 24;\n }\n .ant-col-lg-23 {\n display: block;\n flex: 0 0 95.83333333%;\n max-width: 95.83333333%;\n }\n .ant-col-lg-push-23 {\n left: 95.83333333%;\n }\n .ant-col-lg-pull-23 {\n right: 95.83333333%;\n }\n .ant-col-lg-offset-23 {\n margin-left: 95.83333333%;\n }\n .ant-col-lg-order-23 {\n order: 23;\n }\n .ant-col-lg-22 {\n display: block;\n flex: 0 0 91.66666667%;\n max-width: 91.66666667%;\n }\n .ant-col-lg-push-22 {\n left: 91.66666667%;\n }\n .ant-col-lg-pull-22 {\n right: 91.66666667%;\n }\n .ant-col-lg-offset-22 {\n margin-left: 91.66666667%;\n }\n .ant-col-lg-order-22 {\n order: 22;\n }\n .ant-col-lg-21 {\n display: block;\n flex: 0 0 87.5%;\n max-width: 87.5%;\n }\n .ant-col-lg-push-21 {\n left: 87.5%;\n }\n .ant-col-lg-pull-21 {\n right: 87.5%;\n }\n .ant-col-lg-offset-21 {\n margin-left: 87.5%;\n }\n .ant-col-lg-order-21 {\n order: 21;\n }\n .ant-col-lg-20 {\n display: block;\n flex: 0 0 83.33333333%;\n max-width: 83.33333333%;\n }\n .ant-col-lg-push-20 {\n left: 83.33333333%;\n }\n .ant-col-lg-pull-20 {\n right: 83.33333333%;\n }\n .ant-col-lg-offset-20 {\n margin-left: 83.33333333%;\n }\n .ant-col-lg-order-20 {\n order: 20;\n }\n .ant-col-lg-19 {\n display: block;\n flex: 0 0 79.16666667%;\n max-width: 79.16666667%;\n }\n .ant-col-lg-push-19 {\n left: 79.16666667%;\n }\n .ant-col-lg-pull-19 {\n right: 79.16666667%;\n }\n .ant-col-lg-offset-19 {\n margin-left: 79.16666667%;\n }\n .ant-col-lg-order-19 {\n order: 19;\n }\n .ant-col-lg-18 {\n display: block;\n flex: 0 0 75%;\n max-width: 75%;\n }\n .ant-col-lg-push-18 {\n left: 75%;\n }\n .ant-col-lg-pull-18 {\n right: 75%;\n }\n .ant-col-lg-offset-18 {\n margin-left: 75%;\n }\n .ant-col-lg-order-18 {\n order: 18;\n }\n .ant-col-lg-17 {\n display: block;\n flex: 0 0 70.83333333%;\n max-width: 70.83333333%;\n }\n .ant-col-lg-push-17 {\n left: 70.83333333%;\n }\n .ant-col-lg-pull-17 {\n right: 70.83333333%;\n }\n .ant-col-lg-offset-17 {\n margin-left: 70.83333333%;\n }\n .ant-col-lg-order-17 {\n order: 17;\n }\n .ant-col-lg-16 {\n display: block;\n flex: 0 0 66.66666667%;\n max-width: 66.66666667%;\n }\n .ant-col-lg-push-16 {\n left: 66.66666667%;\n }\n .ant-col-lg-pull-16 {\n right: 66.66666667%;\n }\n .ant-col-lg-offset-16 {\n margin-left: 66.66666667%;\n }\n .ant-col-lg-order-16 {\n order: 16;\n }\n .ant-col-lg-15 {\n display: block;\n flex: 0 0 62.5%;\n max-width: 62.5%;\n }\n .ant-col-lg-push-15 {\n left: 62.5%;\n }\n .ant-col-lg-pull-15 {\n right: 62.5%;\n }\n .ant-col-lg-offset-15 {\n margin-left: 62.5%;\n }\n .ant-col-lg-order-15 {\n order: 15;\n }\n .ant-col-lg-14 {\n display: block;\n flex: 0 0 58.33333333%;\n max-width: 58.33333333%;\n }\n .ant-col-lg-push-14 {\n left: 58.33333333%;\n }\n .ant-col-lg-pull-14 {\n right: 58.33333333%;\n }\n .ant-col-lg-offset-14 {\n margin-left: 58.33333333%;\n }\n .ant-col-lg-order-14 {\n order: 14;\n }\n .ant-col-lg-13 {\n display: block;\n flex: 0 0 54.16666667%;\n max-width: 54.16666667%;\n }\n .ant-col-lg-push-13 {\n left: 54.16666667%;\n }\n .ant-col-lg-pull-13 {\n right: 54.16666667%;\n }\n .ant-col-lg-offset-13 {\n margin-left: 54.16666667%;\n }\n .ant-col-lg-order-13 {\n order: 13;\n }\n .ant-col-lg-12 {\n display: block;\n flex: 0 0 50%;\n max-width: 50%;\n }\n .ant-col-lg-push-12 {\n left: 50%;\n }\n .ant-col-lg-pull-12 {\n right: 50%;\n }\n .ant-col-lg-offset-12 {\n margin-left: 50%;\n }\n .ant-col-lg-order-12 {\n order: 12;\n }\n .ant-col-lg-11 {\n display: block;\n flex: 0 0 45.83333333%;\n max-width: 45.83333333%;\n }\n .ant-col-lg-push-11 {\n left: 45.83333333%;\n }\n .ant-col-lg-pull-11 {\n right: 45.83333333%;\n }\n .ant-col-lg-offset-11 {\n margin-left: 45.83333333%;\n }\n .ant-col-lg-order-11 {\n order: 11;\n }\n .ant-col-lg-10 {\n display: block;\n flex: 0 0 41.66666667%;\n max-width: 41.66666667%;\n }\n .ant-col-lg-push-10 {\n left: 41.66666667%;\n }\n .ant-col-lg-pull-10 {\n right: 41.66666667%;\n }\n .ant-col-lg-offset-10 {\n margin-left: 41.66666667%;\n }\n .ant-col-lg-order-10 {\n order: 10;\n }\n .ant-col-lg-9 {\n display: block;\n flex: 0 0 37.5%;\n max-width: 37.5%;\n }\n .ant-col-lg-push-9 {\n left: 37.5%;\n }\n .ant-col-lg-pull-9 {\n right: 37.5%;\n }\n .ant-col-lg-offset-9 {\n margin-left: 37.5%;\n }\n .ant-col-lg-order-9 {\n order: 9;\n }\n .ant-col-lg-8 {\n display: block;\n flex: 0 0 33.33333333%;\n max-width: 33.33333333%;\n }\n .ant-col-lg-push-8 {\n left: 33.33333333%;\n }\n .ant-col-lg-pull-8 {\n right: 33.33333333%;\n }\n .ant-col-lg-offset-8 {\n margin-left: 33.33333333%;\n }\n .ant-col-lg-order-8 {\n order: 8;\n }\n .ant-col-lg-7 {\n display: block;\n flex: 0 0 29.16666667%;\n max-width: 29.16666667%;\n }\n .ant-col-lg-push-7 {\n left: 29.16666667%;\n }\n .ant-col-lg-pull-7 {\n right: 29.16666667%;\n }\n .ant-col-lg-offset-7 {\n margin-left: 29.16666667%;\n }\n .ant-col-lg-order-7 {\n order: 7;\n }\n .ant-col-lg-6 {\n display: block;\n flex: 0 0 25%;\n max-width: 25%;\n }\n .ant-col-lg-push-6 {\n left: 25%;\n }\n .ant-col-lg-pull-6 {\n right: 25%;\n }\n .ant-col-lg-offset-6 {\n margin-left: 25%;\n }\n .ant-col-lg-order-6 {\n order: 6;\n }\n .ant-col-lg-5 {\n display: block;\n flex: 0 0 20.83333333%;\n max-width: 20.83333333%;\n }\n .ant-col-lg-push-5 {\n left: 20.83333333%;\n }\n .ant-col-lg-pull-5 {\n right: 20.83333333%;\n }\n .ant-col-lg-offset-5 {\n margin-left: 20.83333333%;\n }\n .ant-col-lg-order-5 {\n order: 5;\n }\n .ant-col-lg-4 {\n display: block;\n flex: 0 0 16.66666667%;\n max-width: 16.66666667%;\n }\n .ant-col-lg-push-4 {\n left: 16.66666667%;\n }\n .ant-col-lg-pull-4 {\n right: 16.66666667%;\n }\n .ant-col-lg-offset-4 {\n margin-left: 16.66666667%;\n }\n .ant-col-lg-order-4 {\n order: 4;\n }\n .ant-col-lg-3 {\n display: block;\n flex: 0 0 12.5%;\n max-width: 12.5%;\n }\n .ant-col-lg-push-3 {\n left: 12.5%;\n }\n .ant-col-lg-pull-3 {\n right: 12.5%;\n }\n .ant-col-lg-offset-3 {\n margin-left: 12.5%;\n }\n .ant-col-lg-order-3 {\n order: 3;\n }\n .ant-col-lg-2 {\n display: block;\n flex: 0 0 8.33333333%;\n max-width: 8.33333333%;\n }\n .ant-col-lg-push-2 {\n left: 8.33333333%;\n }\n .ant-col-lg-pull-2 {\n right: 8.33333333%;\n }\n .ant-col-lg-offset-2 {\n margin-left: 8.33333333%;\n }\n .ant-col-lg-order-2 {\n order: 2;\n }\n .ant-col-lg-1 {\n display: block;\n flex: 0 0 4.16666667%;\n max-width: 4.16666667%;\n }\n .ant-col-lg-push-1 {\n left: 4.16666667%;\n }\n .ant-col-lg-pull-1 {\n right: 4.16666667%;\n }\n .ant-col-lg-offset-1 {\n margin-left: 4.16666667%;\n }\n .ant-col-lg-order-1 {\n order: 1;\n }\n .ant-col-lg-0 {\n display: none;\n }\n .ant-col-push-0 {\n left: auto;\n }\n .ant-col-pull-0 {\n right: auto;\n }\n .ant-col-lg-push-0 {\n left: auto;\n }\n .ant-col-lg-pull-0 {\n right: auto;\n }\n .ant-col-lg-offset-0 {\n margin-left: 0;\n }\n .ant-col-lg-order-0 {\n order: 0;\n }\n .ant-col-push-0.ant-col-rtl {\n right: auto;\n }\n .ant-col-pull-0.ant-col-rtl {\n left: auto;\n }\n .ant-col-lg-push-0.ant-col-rtl {\n right: auto;\n }\n .ant-col-lg-pull-0.ant-col-rtl {\n left: auto;\n }\n .ant-col-lg-offset-0.ant-col-rtl {\n margin-right: 0;\n }\n .ant-col-lg-push-1.ant-col-rtl {\n right: 4.16666667%;\n left: auto;\n }\n .ant-col-lg-pull-1.ant-col-rtl {\n right: auto;\n left: 4.16666667%;\n }\n .ant-col-lg-offset-1.ant-col-rtl {\n margin-right: 4.16666667%;\n margin-left: 0;\n }\n .ant-col-lg-push-2.ant-col-rtl {\n right: 8.33333333%;\n left: auto;\n }\n .ant-col-lg-pull-2.ant-col-rtl {\n right: auto;\n left: 8.33333333%;\n }\n .ant-col-lg-offset-2.ant-col-rtl {\n margin-right: 8.33333333%;\n margin-left: 0;\n }\n .ant-col-lg-push-3.ant-col-rtl {\n right: 12.5%;\n left: auto;\n }\n .ant-col-lg-pull-3.ant-col-rtl {\n right: auto;\n left: 12.5%;\n }\n .ant-col-lg-offset-3.ant-col-rtl {\n margin-right: 12.5%;\n margin-left: 0;\n }\n .ant-col-lg-push-4.ant-col-rtl {\n right: 16.66666667%;\n left: auto;\n }\n .ant-col-lg-pull-4.ant-col-rtl {\n right: auto;\n left: 16.66666667%;\n }\n .ant-col-lg-offset-4.ant-col-rtl {\n margin-right: 16.66666667%;\n margin-left: 0;\n }\n .ant-col-lg-push-5.ant-col-rtl {\n right: 20.83333333%;\n left: auto;\n }\n .ant-col-lg-pull-5.ant-col-rtl {\n right: auto;\n left: 20.83333333%;\n }\n .ant-col-lg-offset-5.ant-col-rtl {\n margin-right: 20.83333333%;\n margin-left: 0;\n }\n .ant-col-lg-push-6.ant-col-rtl {\n right: 25%;\n left: auto;\n }\n .ant-col-lg-pull-6.ant-col-rtl {\n right: auto;\n left: 25%;\n }\n .ant-col-lg-offset-6.ant-col-rtl {\n margin-right: 25%;\n margin-left: 0;\n }\n .ant-col-lg-push-7.ant-col-rtl {\n right: 29.16666667%;\n left: auto;\n }\n .ant-col-lg-pull-7.ant-col-rtl {\n right: auto;\n left: 29.16666667%;\n }\n .ant-col-lg-offset-7.ant-col-rtl {\n margin-right: 29.16666667%;\n margin-left: 0;\n }\n .ant-col-lg-push-8.ant-col-rtl {\n right: 33.33333333%;\n left: auto;\n }\n .ant-col-lg-pull-8.ant-col-rtl {\n right: auto;\n left: 33.33333333%;\n }\n .ant-col-lg-offset-8.ant-col-rtl {\n margin-right: 33.33333333%;\n margin-left: 0;\n }\n .ant-col-lg-push-9.ant-col-rtl {\n right: 37.5%;\n left: auto;\n }\n .ant-col-lg-pull-9.ant-col-rtl {\n right: auto;\n left: 37.5%;\n }\n .ant-col-lg-offset-9.ant-col-rtl {\n margin-right: 37.5%;\n margin-left: 0;\n }\n .ant-col-lg-push-10.ant-col-rtl {\n right: 41.66666667%;\n left: auto;\n }\n .ant-col-lg-pull-10.ant-col-rtl {\n right: auto;\n left: 41.66666667%;\n }\n .ant-col-lg-offset-10.ant-col-rtl {\n margin-right: 41.66666667%;\n margin-left: 0;\n }\n .ant-col-lg-push-11.ant-col-rtl {\n right: 45.83333333%;\n left: auto;\n }\n .ant-col-lg-pull-11.ant-col-rtl {\n right: auto;\n left: 45.83333333%;\n }\n .ant-col-lg-offset-11.ant-col-rtl {\n margin-right: 45.83333333%;\n margin-left: 0;\n }\n .ant-col-lg-push-12.ant-col-rtl {\n right: 50%;\n left: auto;\n }\n .ant-col-lg-pull-12.ant-col-rtl {\n right: auto;\n left: 50%;\n }\n .ant-col-lg-offset-12.ant-col-rtl {\n margin-right: 50%;\n margin-left: 0;\n }\n .ant-col-lg-push-13.ant-col-rtl {\n right: 54.16666667%;\n left: auto;\n }\n .ant-col-lg-pull-13.ant-col-rtl {\n right: auto;\n left: 54.16666667%;\n }\n .ant-col-lg-offset-13.ant-col-rtl {\n margin-right: 54.16666667%;\n margin-left: 0;\n }\n .ant-col-lg-push-14.ant-col-rtl {\n right: 58.33333333%;\n left: auto;\n }\n .ant-col-lg-pull-14.ant-col-rtl {\n right: auto;\n left: 58.33333333%;\n }\n .ant-col-lg-offset-14.ant-col-rtl {\n margin-right: 58.33333333%;\n margin-left: 0;\n }\n .ant-col-lg-push-15.ant-col-rtl {\n right: 62.5%;\n left: auto;\n }\n .ant-col-lg-pull-15.ant-col-rtl {\n right: auto;\n left: 62.5%;\n }\n .ant-col-lg-offset-15.ant-col-rtl {\n margin-right: 62.5%;\n margin-left: 0;\n }\n .ant-col-lg-push-16.ant-col-rtl {\n right: 66.66666667%;\n left: auto;\n }\n .ant-col-lg-pull-16.ant-col-rtl {\n right: auto;\n left: 66.66666667%;\n }\n .ant-col-lg-offset-16.ant-col-rtl {\n margin-right: 66.66666667%;\n margin-left: 0;\n }\n .ant-col-lg-push-17.ant-col-rtl {\n right: 70.83333333%;\n left: auto;\n }\n .ant-col-lg-pull-17.ant-col-rtl {\n right: auto;\n left: 70.83333333%;\n }\n .ant-col-lg-offset-17.ant-col-rtl {\n margin-right: 70.83333333%;\n margin-left: 0;\n }\n .ant-col-lg-push-18.ant-col-rtl {\n right: 75%;\n left: auto;\n }\n .ant-col-lg-pull-18.ant-col-rtl {\n right: auto;\n left: 75%;\n }\n .ant-col-lg-offset-18.ant-col-rtl {\n margin-right: 75%;\n margin-left: 0;\n }\n .ant-col-lg-push-19.ant-col-rtl {\n right: 79.16666667%;\n left: auto;\n }\n .ant-col-lg-pull-19.ant-col-rtl {\n right: auto;\n left: 79.16666667%;\n }\n .ant-col-lg-offset-19.ant-col-rtl {\n margin-right: 79.16666667%;\n margin-left: 0;\n }\n .ant-col-lg-push-20.ant-col-rtl {\n right: 83.33333333%;\n left: auto;\n }\n .ant-col-lg-pull-20.ant-col-rtl {\n right: auto;\n left: 83.33333333%;\n }\n .ant-col-lg-offset-20.ant-col-rtl {\n margin-right: 83.33333333%;\n margin-left: 0;\n }\n .ant-col-lg-push-21.ant-col-rtl {\n right: 87.5%;\n left: auto;\n }\n .ant-col-lg-pull-21.ant-col-rtl {\n right: auto;\n left: 87.5%;\n }\n .ant-col-lg-offset-21.ant-col-rtl {\n margin-right: 87.5%;\n margin-left: 0;\n }\n .ant-col-lg-push-22.ant-col-rtl {\n right: 91.66666667%;\n left: auto;\n }\n .ant-col-lg-pull-22.ant-col-rtl {\n right: auto;\n left: 91.66666667%;\n }\n .ant-col-lg-offset-22.ant-col-rtl {\n margin-right: 91.66666667%;\n margin-left: 0;\n }\n .ant-col-lg-push-23.ant-col-rtl {\n right: 95.83333333%;\n left: auto;\n }\n .ant-col-lg-pull-23.ant-col-rtl {\n right: auto;\n left: 95.83333333%;\n }\n .ant-col-lg-offset-23.ant-col-rtl {\n margin-right: 95.83333333%;\n margin-left: 0;\n }\n .ant-col-lg-push-24.ant-col-rtl {\n right: 100%;\n left: auto;\n }\n .ant-col-lg-pull-24.ant-col-rtl {\n right: auto;\n left: 100%;\n }\n .ant-col-lg-offset-24.ant-col-rtl {\n margin-right: 100%;\n margin-left: 0;\n }\n}\n@media (min-width: 1200px) {\n .ant-col-xl-24 {\n display: block;\n flex: 0 0 100%;\n max-width: 100%;\n }\n .ant-col-xl-push-24 {\n left: 100%;\n }\n .ant-col-xl-pull-24 {\n right: 100%;\n }\n .ant-col-xl-offset-24 {\n margin-left: 100%;\n }\n .ant-col-xl-order-24 {\n order: 24;\n }\n .ant-col-xl-23 {\n display: block;\n flex: 0 0 95.83333333%;\n max-width: 95.83333333%;\n }\n .ant-col-xl-push-23 {\n left: 95.83333333%;\n }\n .ant-col-xl-pull-23 {\n right: 95.83333333%;\n }\n .ant-col-xl-offset-23 {\n margin-left: 95.83333333%;\n }\n .ant-col-xl-order-23 {\n order: 23;\n }\n .ant-col-xl-22 {\n display: block;\n flex: 0 0 91.66666667%;\n max-width: 91.66666667%;\n }\n .ant-col-xl-push-22 {\n left: 91.66666667%;\n }\n .ant-col-xl-pull-22 {\n right: 91.66666667%;\n }\n .ant-col-xl-offset-22 {\n margin-left: 91.66666667%;\n }\n .ant-col-xl-order-22 {\n order: 22;\n }\n .ant-col-xl-21 {\n display: block;\n flex: 0 0 87.5%;\n max-width: 87.5%;\n }\n .ant-col-xl-push-21 {\n left: 87.5%;\n }\n .ant-col-xl-pull-21 {\n right: 87.5%;\n }\n .ant-col-xl-offset-21 {\n margin-left: 87.5%;\n }\n .ant-col-xl-order-21 {\n order: 21;\n }\n .ant-col-xl-20 {\n display: block;\n flex: 0 0 83.33333333%;\n max-width: 83.33333333%;\n }\n .ant-col-xl-push-20 {\n left: 83.33333333%;\n }\n .ant-col-xl-pull-20 {\n right: 83.33333333%;\n }\n .ant-col-xl-offset-20 {\n margin-left: 83.33333333%;\n }\n .ant-col-xl-order-20 {\n order: 20;\n }\n .ant-col-xl-19 {\n display: block;\n flex: 0 0 79.16666667%;\n max-width: 79.16666667%;\n }\n .ant-col-xl-push-19 {\n left: 79.16666667%;\n }\n .ant-col-xl-pull-19 {\n right: 79.16666667%;\n }\n .ant-col-xl-offset-19 {\n margin-left: 79.16666667%;\n }\n .ant-col-xl-order-19 {\n order: 19;\n }\n .ant-col-xl-18 {\n display: block;\n flex: 0 0 75%;\n max-width: 75%;\n }\n .ant-col-xl-push-18 {\n left: 75%;\n }\n .ant-col-xl-pull-18 {\n right: 75%;\n }\n .ant-col-xl-offset-18 {\n margin-left: 75%;\n }\n .ant-col-xl-order-18 {\n order: 18;\n }\n .ant-col-xl-17 {\n display: block;\n flex: 0 0 70.83333333%;\n max-width: 70.83333333%;\n }\n .ant-col-xl-push-17 {\n left: 70.83333333%;\n }\n .ant-col-xl-pull-17 {\n right: 70.83333333%;\n }\n .ant-col-xl-offset-17 {\n margin-left: 70.83333333%;\n }\n .ant-col-xl-order-17 {\n order: 17;\n }\n .ant-col-xl-16 {\n display: block;\n flex: 0 0 66.66666667%;\n max-width: 66.66666667%;\n }\n .ant-col-xl-push-16 {\n left: 66.66666667%;\n }\n .ant-col-xl-pull-16 {\n right: 66.66666667%;\n }\n .ant-col-xl-offset-16 {\n margin-left: 66.66666667%;\n }\n .ant-col-xl-order-16 {\n order: 16;\n }\n .ant-col-xl-15 {\n display: block;\n flex: 0 0 62.5%;\n max-width: 62.5%;\n }\n .ant-col-xl-push-15 {\n left: 62.5%;\n }\n .ant-col-xl-pull-15 {\n right: 62.5%;\n }\n .ant-col-xl-offset-15 {\n margin-left: 62.5%;\n }\n .ant-col-xl-order-15 {\n order: 15;\n }\n .ant-col-xl-14 {\n display: block;\n flex: 0 0 58.33333333%;\n max-width: 58.33333333%;\n }\n .ant-col-xl-push-14 {\n left: 58.33333333%;\n }\n .ant-col-xl-pull-14 {\n right: 58.33333333%;\n }\n .ant-col-xl-offset-14 {\n margin-left: 58.33333333%;\n }\n .ant-col-xl-order-14 {\n order: 14;\n }\n .ant-col-xl-13 {\n display: block;\n flex: 0 0 54.16666667%;\n max-width: 54.16666667%;\n }\n .ant-col-xl-push-13 {\n left: 54.16666667%;\n }\n .ant-col-xl-pull-13 {\n right: 54.16666667%;\n }\n .ant-col-xl-offset-13 {\n margin-left: 54.16666667%;\n }\n .ant-col-xl-order-13 {\n order: 13;\n }\n .ant-col-xl-12 {\n display: block;\n flex: 0 0 50%;\n max-width: 50%;\n }\n .ant-col-xl-push-12 {\n left: 50%;\n }\n .ant-col-xl-pull-12 {\n right: 50%;\n }\n .ant-col-xl-offset-12 {\n margin-left: 50%;\n }\n .ant-col-xl-order-12 {\n order: 12;\n }\n .ant-col-xl-11 {\n display: block;\n flex: 0 0 45.83333333%;\n max-width: 45.83333333%;\n }\n .ant-col-xl-push-11 {\n left: 45.83333333%;\n }\n .ant-col-xl-pull-11 {\n right: 45.83333333%;\n }\n .ant-col-xl-offset-11 {\n margin-left: 45.83333333%;\n }\n .ant-col-xl-order-11 {\n order: 11;\n }\n .ant-col-xl-10 {\n display: block;\n flex: 0 0 41.66666667%;\n max-width: 41.66666667%;\n }\n .ant-col-xl-push-10 {\n left: 41.66666667%;\n }\n .ant-col-xl-pull-10 {\n right: 41.66666667%;\n }\n .ant-col-xl-offset-10 {\n margin-left: 41.66666667%;\n }\n .ant-col-xl-order-10 {\n order: 10;\n }\n .ant-col-xl-9 {\n display: block;\n flex: 0 0 37.5%;\n max-width: 37.5%;\n }\n .ant-col-xl-push-9 {\n left: 37.5%;\n }\n .ant-col-xl-pull-9 {\n right: 37.5%;\n }\n .ant-col-xl-offset-9 {\n margin-left: 37.5%;\n }\n .ant-col-xl-order-9 {\n order: 9;\n }\n .ant-col-xl-8 {\n display: block;\n flex: 0 0 33.33333333%;\n max-width: 33.33333333%;\n }\n .ant-col-xl-push-8 {\n left: 33.33333333%;\n }\n .ant-col-xl-pull-8 {\n right: 33.33333333%;\n }\n .ant-col-xl-offset-8 {\n margin-left: 33.33333333%;\n }\n .ant-col-xl-order-8 {\n order: 8;\n }\n .ant-col-xl-7 {\n display: block;\n flex: 0 0 29.16666667%;\n max-width: 29.16666667%;\n }\n .ant-col-xl-push-7 {\n left: 29.16666667%;\n }\n .ant-col-xl-pull-7 {\n right: 29.16666667%;\n }\n .ant-col-xl-offset-7 {\n margin-left: 29.16666667%;\n }\n .ant-col-xl-order-7 {\n order: 7;\n }\n .ant-col-xl-6 {\n display: block;\n flex: 0 0 25%;\n max-width: 25%;\n }\n .ant-col-xl-push-6 {\n left: 25%;\n }\n .ant-col-xl-pull-6 {\n right: 25%;\n }\n .ant-col-xl-offset-6 {\n margin-left: 25%;\n }\n .ant-col-xl-order-6 {\n order: 6;\n }\n .ant-col-xl-5 {\n display: block;\n flex: 0 0 20.83333333%;\n max-width: 20.83333333%;\n }\n .ant-col-xl-push-5 {\n left: 20.83333333%;\n }\n .ant-col-xl-pull-5 {\n right: 20.83333333%;\n }\n .ant-col-xl-offset-5 {\n margin-left: 20.83333333%;\n }\n .ant-col-xl-order-5 {\n order: 5;\n }\n .ant-col-xl-4 {\n display: block;\n flex: 0 0 16.66666667%;\n max-width: 16.66666667%;\n }\n .ant-col-xl-push-4 {\n left: 16.66666667%;\n }\n .ant-col-xl-pull-4 {\n right: 16.66666667%;\n }\n .ant-col-xl-offset-4 {\n margin-left: 16.66666667%;\n }\n .ant-col-xl-order-4 {\n order: 4;\n }\n .ant-col-xl-3 {\n display: block;\n flex: 0 0 12.5%;\n max-width: 12.5%;\n }\n .ant-col-xl-push-3 {\n left: 12.5%;\n }\n .ant-col-xl-pull-3 {\n right: 12.5%;\n }\n .ant-col-xl-offset-3 {\n margin-left: 12.5%;\n }\n .ant-col-xl-order-3 {\n order: 3;\n }\n .ant-col-xl-2 {\n display: block;\n flex: 0 0 8.33333333%;\n max-width: 8.33333333%;\n }\n .ant-col-xl-push-2 {\n left: 8.33333333%;\n }\n .ant-col-xl-pull-2 {\n right: 8.33333333%;\n }\n .ant-col-xl-offset-2 {\n margin-left: 8.33333333%;\n }\n .ant-col-xl-order-2 {\n order: 2;\n }\n .ant-col-xl-1 {\n display: block;\n flex: 0 0 4.16666667%;\n max-width: 4.16666667%;\n }\n .ant-col-xl-push-1 {\n left: 4.16666667%;\n }\n .ant-col-xl-pull-1 {\n right: 4.16666667%;\n }\n .ant-col-xl-offset-1 {\n margin-left: 4.16666667%;\n }\n .ant-col-xl-order-1 {\n order: 1;\n }\n .ant-col-xl-0 {\n display: none;\n }\n .ant-col-push-0 {\n left: auto;\n }\n .ant-col-pull-0 {\n right: auto;\n }\n .ant-col-xl-push-0 {\n left: auto;\n }\n .ant-col-xl-pull-0 {\n right: auto;\n }\n .ant-col-xl-offset-0 {\n margin-left: 0;\n }\n .ant-col-xl-order-0 {\n order: 0;\n }\n .ant-col-push-0.ant-col-rtl {\n right: auto;\n }\n .ant-col-pull-0.ant-col-rtl {\n left: auto;\n }\n .ant-col-xl-push-0.ant-col-rtl {\n right: auto;\n }\n .ant-col-xl-pull-0.ant-col-rtl {\n left: auto;\n }\n .ant-col-xl-offset-0.ant-col-rtl {\n margin-right: 0;\n }\n .ant-col-xl-push-1.ant-col-rtl {\n right: 4.16666667%;\n left: auto;\n }\n .ant-col-xl-pull-1.ant-col-rtl {\n right: auto;\n left: 4.16666667%;\n }\n .ant-col-xl-offset-1.ant-col-rtl {\n margin-right: 4.16666667%;\n margin-left: 0;\n }\n .ant-col-xl-push-2.ant-col-rtl {\n right: 8.33333333%;\n left: auto;\n }\n .ant-col-xl-pull-2.ant-col-rtl {\n right: auto;\n left: 8.33333333%;\n }\n .ant-col-xl-offset-2.ant-col-rtl {\n margin-right: 8.33333333%;\n margin-left: 0;\n }\n .ant-col-xl-push-3.ant-col-rtl {\n right: 12.5%;\n left: auto;\n }\n .ant-col-xl-pull-3.ant-col-rtl {\n right: auto;\n left: 12.5%;\n }\n .ant-col-xl-offset-3.ant-col-rtl {\n margin-right: 12.5%;\n margin-left: 0;\n }\n .ant-col-xl-push-4.ant-col-rtl {\n right: 16.66666667%;\n left: auto;\n }\n .ant-col-xl-pull-4.ant-col-rtl {\n right: auto;\n left: 16.66666667%;\n }\n .ant-col-xl-offset-4.ant-col-rtl {\n margin-right: 16.66666667%;\n margin-left: 0;\n }\n .ant-col-xl-push-5.ant-col-rtl {\n right: 20.83333333%;\n left: auto;\n }\n .ant-col-xl-pull-5.ant-col-rtl {\n right: auto;\n left: 20.83333333%;\n }\n .ant-col-xl-offset-5.ant-col-rtl {\n margin-right: 20.83333333%;\n margin-left: 0;\n }\n .ant-col-xl-push-6.ant-col-rtl {\n right: 25%;\n left: auto;\n }\n .ant-col-xl-pull-6.ant-col-rtl {\n right: auto;\n left: 25%;\n }\n .ant-col-xl-offset-6.ant-col-rtl {\n margin-right: 25%;\n margin-left: 0;\n }\n .ant-col-xl-push-7.ant-col-rtl {\n right: 29.16666667%;\n left: auto;\n }\n .ant-col-xl-pull-7.ant-col-rtl {\n right: auto;\n left: 29.16666667%;\n }\n .ant-col-xl-offset-7.ant-col-rtl {\n margin-right: 29.16666667%;\n margin-left: 0;\n }\n .ant-col-xl-push-8.ant-col-rtl {\n right: 33.33333333%;\n left: auto;\n }\n .ant-col-xl-pull-8.ant-col-rtl {\n right: auto;\n left: 33.33333333%;\n }\n .ant-col-xl-offset-8.ant-col-rtl {\n margin-right: 33.33333333%;\n margin-left: 0;\n }\n .ant-col-xl-push-9.ant-col-rtl {\n right: 37.5%;\n left: auto;\n }\n .ant-col-xl-pull-9.ant-col-rtl {\n right: auto;\n left: 37.5%;\n }\n .ant-col-xl-offset-9.ant-col-rtl {\n margin-right: 37.5%;\n margin-left: 0;\n }\n .ant-col-xl-push-10.ant-col-rtl {\n right: 41.66666667%;\n left: auto;\n }\n .ant-col-xl-pull-10.ant-col-rtl {\n right: auto;\n left: 41.66666667%;\n }\n .ant-col-xl-offset-10.ant-col-rtl {\n margin-right: 41.66666667%;\n margin-left: 0;\n }\n .ant-col-xl-push-11.ant-col-rtl {\n right: 45.83333333%;\n left: auto;\n }\n .ant-col-xl-pull-11.ant-col-rtl {\n right: auto;\n left: 45.83333333%;\n }\n .ant-col-xl-offset-11.ant-col-rtl {\n margin-right: 45.83333333%;\n margin-left: 0;\n }\n .ant-col-xl-push-12.ant-col-rtl {\n right: 50%;\n left: auto;\n }\n .ant-col-xl-pull-12.ant-col-rtl {\n right: auto;\n left: 50%;\n }\n .ant-col-xl-offset-12.ant-col-rtl {\n margin-right: 50%;\n margin-left: 0;\n }\n .ant-col-xl-push-13.ant-col-rtl {\n right: 54.16666667%;\n left: auto;\n }\n .ant-col-xl-pull-13.ant-col-rtl {\n right: auto;\n left: 54.16666667%;\n }\n .ant-col-xl-offset-13.ant-col-rtl {\n margin-right: 54.16666667%;\n margin-left: 0;\n }\n .ant-col-xl-push-14.ant-col-rtl {\n right: 58.33333333%;\n left: auto;\n }\n .ant-col-xl-pull-14.ant-col-rtl {\n right: auto;\n left: 58.33333333%;\n }\n .ant-col-xl-offset-14.ant-col-rtl {\n margin-right: 58.33333333%;\n margin-left: 0;\n }\n .ant-col-xl-push-15.ant-col-rtl {\n right: 62.5%;\n left: auto;\n }\n .ant-col-xl-pull-15.ant-col-rtl {\n right: auto;\n left: 62.5%;\n }\n .ant-col-xl-offset-15.ant-col-rtl {\n margin-right: 62.5%;\n margin-left: 0;\n }\n .ant-col-xl-push-16.ant-col-rtl {\n right: 66.66666667%;\n left: auto;\n }\n .ant-col-xl-pull-16.ant-col-rtl {\n right: auto;\n left: 66.66666667%;\n }\n .ant-col-xl-offset-16.ant-col-rtl {\n margin-right: 66.66666667%;\n margin-left: 0;\n }\n .ant-col-xl-push-17.ant-col-rtl {\n right: 70.83333333%;\n left: auto;\n }\n .ant-col-xl-pull-17.ant-col-rtl {\n right: auto;\n left: 70.83333333%;\n }\n .ant-col-xl-offset-17.ant-col-rtl {\n margin-right: 70.83333333%;\n margin-left: 0;\n }\n .ant-col-xl-push-18.ant-col-rtl {\n right: 75%;\n left: auto;\n }\n .ant-col-xl-pull-18.ant-col-rtl {\n right: auto;\n left: 75%;\n }\n .ant-col-xl-offset-18.ant-col-rtl {\n margin-right: 75%;\n margin-left: 0;\n }\n .ant-col-xl-push-19.ant-col-rtl {\n right: 79.16666667%;\n left: auto;\n }\n .ant-col-xl-pull-19.ant-col-rtl {\n right: auto;\n left: 79.16666667%;\n }\n .ant-col-xl-offset-19.ant-col-rtl {\n margin-right: 79.16666667%;\n margin-left: 0;\n }\n .ant-col-xl-push-20.ant-col-rtl {\n right: 83.33333333%;\n left: auto;\n }\n .ant-col-xl-pull-20.ant-col-rtl {\n right: auto;\n left: 83.33333333%;\n }\n .ant-col-xl-offset-20.ant-col-rtl {\n margin-right: 83.33333333%;\n margin-left: 0;\n }\n .ant-col-xl-push-21.ant-col-rtl {\n right: 87.5%;\n left: auto;\n }\n .ant-col-xl-pull-21.ant-col-rtl {\n right: auto;\n left: 87.5%;\n }\n .ant-col-xl-offset-21.ant-col-rtl {\n margin-right: 87.5%;\n margin-left: 0;\n }\n .ant-col-xl-push-22.ant-col-rtl {\n right: 91.66666667%;\n left: auto;\n }\n .ant-col-xl-pull-22.ant-col-rtl {\n right: auto;\n left: 91.66666667%;\n }\n .ant-col-xl-offset-22.ant-col-rtl {\n margin-right: 91.66666667%;\n margin-left: 0;\n }\n .ant-col-xl-push-23.ant-col-rtl {\n right: 95.83333333%;\n left: auto;\n }\n .ant-col-xl-pull-23.ant-col-rtl {\n right: auto;\n left: 95.83333333%;\n }\n .ant-col-xl-offset-23.ant-col-rtl {\n margin-right: 95.83333333%;\n margin-left: 0;\n }\n .ant-col-xl-push-24.ant-col-rtl {\n right: 100%;\n left: auto;\n }\n .ant-col-xl-pull-24.ant-col-rtl {\n right: auto;\n left: 100%;\n }\n .ant-col-xl-offset-24.ant-col-rtl {\n margin-right: 100%;\n margin-left: 0;\n }\n}\n@media (min-width: 1600px) {\n .ant-col-xxl-24 {\n display: block;\n flex: 0 0 100%;\n max-width: 100%;\n }\n .ant-col-xxl-push-24 {\n left: 100%;\n }\n .ant-col-xxl-pull-24 {\n right: 100%;\n }\n .ant-col-xxl-offset-24 {\n margin-left: 100%;\n }\n .ant-col-xxl-order-24 {\n order: 24;\n }\n .ant-col-xxl-23 {\n display: block;\n flex: 0 0 95.83333333%;\n max-width: 95.83333333%;\n }\n .ant-col-xxl-push-23 {\n left: 95.83333333%;\n }\n .ant-col-xxl-pull-23 {\n right: 95.83333333%;\n }\n .ant-col-xxl-offset-23 {\n margin-left: 95.83333333%;\n }\n .ant-col-xxl-order-23 {\n order: 23;\n }\n .ant-col-xxl-22 {\n display: block;\n flex: 0 0 91.66666667%;\n max-width: 91.66666667%;\n }\n .ant-col-xxl-push-22 {\n left: 91.66666667%;\n }\n .ant-col-xxl-pull-22 {\n right: 91.66666667%;\n }\n .ant-col-xxl-offset-22 {\n margin-left: 91.66666667%;\n }\n .ant-col-xxl-order-22 {\n order: 22;\n }\n .ant-col-xxl-21 {\n display: block;\n flex: 0 0 87.5%;\n max-width: 87.5%;\n }\n .ant-col-xxl-push-21 {\n left: 87.5%;\n }\n .ant-col-xxl-pull-21 {\n right: 87.5%;\n }\n .ant-col-xxl-offset-21 {\n margin-left: 87.5%;\n }\n .ant-col-xxl-order-21 {\n order: 21;\n }\n .ant-col-xxl-20 {\n display: block;\n flex: 0 0 83.33333333%;\n max-width: 83.33333333%;\n }\n .ant-col-xxl-push-20 {\n left: 83.33333333%;\n }\n .ant-col-xxl-pull-20 {\n right: 83.33333333%;\n }\n .ant-col-xxl-offset-20 {\n margin-left: 83.33333333%;\n }\n .ant-col-xxl-order-20 {\n order: 20;\n }\n .ant-col-xxl-19 {\n display: block;\n flex: 0 0 79.16666667%;\n max-width: 79.16666667%;\n }\n .ant-col-xxl-push-19 {\n left: 79.16666667%;\n }\n .ant-col-xxl-pull-19 {\n right: 79.16666667%;\n }\n .ant-col-xxl-offset-19 {\n margin-left: 79.16666667%;\n }\n .ant-col-xxl-order-19 {\n order: 19;\n }\n .ant-col-xxl-18 {\n display: block;\n flex: 0 0 75%;\n max-width: 75%;\n }\n .ant-col-xxl-push-18 {\n left: 75%;\n }\n .ant-col-xxl-pull-18 {\n right: 75%;\n }\n .ant-col-xxl-offset-18 {\n margin-left: 75%;\n }\n .ant-col-xxl-order-18 {\n order: 18;\n }\n .ant-col-xxl-17 {\n display: block;\n flex: 0 0 70.83333333%;\n max-width: 70.83333333%;\n }\n .ant-col-xxl-push-17 {\n left: 70.83333333%;\n }\n .ant-col-xxl-pull-17 {\n right: 70.83333333%;\n }\n .ant-col-xxl-offset-17 {\n margin-left: 70.83333333%;\n }\n .ant-col-xxl-order-17 {\n order: 17;\n }\n .ant-col-xxl-16 {\n display: block;\n flex: 0 0 66.66666667%;\n max-width: 66.66666667%;\n }\n .ant-col-xxl-push-16 {\n left: 66.66666667%;\n }\n .ant-col-xxl-pull-16 {\n right: 66.66666667%;\n }\n .ant-col-xxl-offset-16 {\n margin-left: 66.66666667%;\n }\n .ant-col-xxl-order-16 {\n order: 16;\n }\n .ant-col-xxl-15 {\n display: block;\n flex: 0 0 62.5%;\n max-width: 62.5%;\n }\n .ant-col-xxl-push-15 {\n left: 62.5%;\n }\n .ant-col-xxl-pull-15 {\n right: 62.5%;\n }\n .ant-col-xxl-offset-15 {\n margin-left: 62.5%;\n }\n .ant-col-xxl-order-15 {\n order: 15;\n }\n .ant-col-xxl-14 {\n display: block;\n flex: 0 0 58.33333333%;\n max-width: 58.33333333%;\n }\n .ant-col-xxl-push-14 {\n left: 58.33333333%;\n }\n .ant-col-xxl-pull-14 {\n right: 58.33333333%;\n }\n .ant-col-xxl-offset-14 {\n margin-left: 58.33333333%;\n }\n .ant-col-xxl-order-14 {\n order: 14;\n }\n .ant-col-xxl-13 {\n display: block;\n flex: 0 0 54.16666667%;\n max-width: 54.16666667%;\n }\n .ant-col-xxl-push-13 {\n left: 54.16666667%;\n }\n .ant-col-xxl-pull-13 {\n right: 54.16666667%;\n }\n .ant-col-xxl-offset-13 {\n margin-left: 54.16666667%;\n }\n .ant-col-xxl-order-13 {\n order: 13;\n }\n .ant-col-xxl-12 {\n display: block;\n flex: 0 0 50%;\n max-width: 50%;\n }\n .ant-col-xxl-push-12 {\n left: 50%;\n }\n .ant-col-xxl-pull-12 {\n right: 50%;\n }\n .ant-col-xxl-offset-12 {\n margin-left: 50%;\n }\n .ant-col-xxl-order-12 {\n order: 12;\n }\n .ant-col-xxl-11 {\n display: block;\n flex: 0 0 45.83333333%;\n max-width: 45.83333333%;\n }\n .ant-col-xxl-push-11 {\n left: 45.83333333%;\n }\n .ant-col-xxl-pull-11 {\n right: 45.83333333%;\n }\n .ant-col-xxl-offset-11 {\n margin-left: 45.83333333%;\n }\n .ant-col-xxl-order-11 {\n order: 11;\n }\n .ant-col-xxl-10 {\n display: block;\n flex: 0 0 41.66666667%;\n max-width: 41.66666667%;\n }\n .ant-col-xxl-push-10 {\n left: 41.66666667%;\n }\n .ant-col-xxl-pull-10 {\n right: 41.66666667%;\n }\n .ant-col-xxl-offset-10 {\n margin-left: 41.66666667%;\n }\n .ant-col-xxl-order-10 {\n order: 10;\n }\n .ant-col-xxl-9 {\n display: block;\n flex: 0 0 37.5%;\n max-width: 37.5%;\n }\n .ant-col-xxl-push-9 {\n left: 37.5%;\n }\n .ant-col-xxl-pull-9 {\n right: 37.5%;\n }\n .ant-col-xxl-offset-9 {\n margin-left: 37.5%;\n }\n .ant-col-xxl-order-9 {\n order: 9;\n }\n .ant-col-xxl-8 {\n display: block;\n flex: 0 0 33.33333333%;\n max-width: 33.33333333%;\n }\n .ant-col-xxl-push-8 {\n left: 33.33333333%;\n }\n .ant-col-xxl-pull-8 {\n right: 33.33333333%;\n }\n .ant-col-xxl-offset-8 {\n margin-left: 33.33333333%;\n }\n .ant-col-xxl-order-8 {\n order: 8;\n }\n .ant-col-xxl-7 {\n display: block;\n flex: 0 0 29.16666667%;\n max-width: 29.16666667%;\n }\n .ant-col-xxl-push-7 {\n left: 29.16666667%;\n }\n .ant-col-xxl-pull-7 {\n right: 29.16666667%;\n }\n .ant-col-xxl-offset-7 {\n margin-left: 29.16666667%;\n }\n .ant-col-xxl-order-7 {\n order: 7;\n }\n .ant-col-xxl-6 {\n display: block;\n flex: 0 0 25%;\n max-width: 25%;\n }\n .ant-col-xxl-push-6 {\n left: 25%;\n }\n .ant-col-xxl-pull-6 {\n right: 25%;\n }\n .ant-col-xxl-offset-6 {\n margin-left: 25%;\n }\n .ant-col-xxl-order-6 {\n order: 6;\n }\n .ant-col-xxl-5 {\n display: block;\n flex: 0 0 20.83333333%;\n max-width: 20.83333333%;\n }\n .ant-col-xxl-push-5 {\n left: 20.83333333%;\n }\n .ant-col-xxl-pull-5 {\n right: 20.83333333%;\n }\n .ant-col-xxl-offset-5 {\n margin-left: 20.83333333%;\n }\n .ant-col-xxl-order-5 {\n order: 5;\n }\n .ant-col-xxl-4 {\n display: block;\n flex: 0 0 16.66666667%;\n max-width: 16.66666667%;\n }\n .ant-col-xxl-push-4 {\n left: 16.66666667%;\n }\n .ant-col-xxl-pull-4 {\n right: 16.66666667%;\n }\n .ant-col-xxl-offset-4 {\n margin-left: 16.66666667%;\n }\n .ant-col-xxl-order-4 {\n order: 4;\n }\n .ant-col-xxl-3 {\n display: block;\n flex: 0 0 12.5%;\n max-width: 12.5%;\n }\n .ant-col-xxl-push-3 {\n left: 12.5%;\n }\n .ant-col-xxl-pull-3 {\n right: 12.5%;\n }\n .ant-col-xxl-offset-3 {\n margin-left: 12.5%;\n }\n .ant-col-xxl-order-3 {\n order: 3;\n }\n .ant-col-xxl-2 {\n display: block;\n flex: 0 0 8.33333333%;\n max-width: 8.33333333%;\n }\n .ant-col-xxl-push-2 {\n left: 8.33333333%;\n }\n .ant-col-xxl-pull-2 {\n right: 8.33333333%;\n }\n .ant-col-xxl-offset-2 {\n margin-left: 8.33333333%;\n }\n .ant-col-xxl-order-2 {\n order: 2;\n }\n .ant-col-xxl-1 {\n display: block;\n flex: 0 0 4.16666667%;\n max-width: 4.16666667%;\n }\n .ant-col-xxl-push-1 {\n left: 4.16666667%;\n }\n .ant-col-xxl-pull-1 {\n right: 4.16666667%;\n }\n .ant-col-xxl-offset-1 {\n margin-left: 4.16666667%;\n }\n .ant-col-xxl-order-1 {\n order: 1;\n }\n .ant-col-xxl-0 {\n display: none;\n }\n .ant-col-push-0 {\n left: auto;\n }\n .ant-col-pull-0 {\n right: auto;\n }\n .ant-col-xxl-push-0 {\n left: auto;\n }\n .ant-col-xxl-pull-0 {\n right: auto;\n }\n .ant-col-xxl-offset-0 {\n margin-left: 0;\n }\n .ant-col-xxl-order-0 {\n order: 0;\n }\n .ant-col-push-0.ant-col-rtl {\n right: auto;\n }\n .ant-col-pull-0.ant-col-rtl {\n left: auto;\n }\n .ant-col-xxl-push-0.ant-col-rtl {\n right: auto;\n }\n .ant-col-xxl-pull-0.ant-col-rtl {\n left: auto;\n }\n .ant-col-xxl-offset-0.ant-col-rtl {\n margin-right: 0;\n }\n .ant-col-xxl-push-1.ant-col-rtl {\n right: 4.16666667%;\n left: auto;\n }\n .ant-col-xxl-pull-1.ant-col-rtl {\n right: auto;\n left: 4.16666667%;\n }\n .ant-col-xxl-offset-1.ant-col-rtl {\n margin-right: 4.16666667%;\n margin-left: 0;\n }\n .ant-col-xxl-push-2.ant-col-rtl {\n right: 8.33333333%;\n left: auto;\n }\n .ant-col-xxl-pull-2.ant-col-rtl {\n right: auto;\n left: 8.33333333%;\n }\n .ant-col-xxl-offset-2.ant-col-rtl {\n margin-right: 8.33333333%;\n margin-left: 0;\n }\n .ant-col-xxl-push-3.ant-col-rtl {\n right: 12.5%;\n left: auto;\n }\n .ant-col-xxl-pull-3.ant-col-rtl {\n right: auto;\n left: 12.5%;\n }\n .ant-col-xxl-offset-3.ant-col-rtl {\n margin-right: 12.5%;\n margin-left: 0;\n }\n .ant-col-xxl-push-4.ant-col-rtl {\n right: 16.66666667%;\n left: auto;\n }\n .ant-col-xxl-pull-4.ant-col-rtl {\n right: auto;\n left: 16.66666667%;\n }\n .ant-col-xxl-offset-4.ant-col-rtl {\n margin-right: 16.66666667%;\n margin-left: 0;\n }\n .ant-col-xxl-push-5.ant-col-rtl {\n right: 20.83333333%;\n left: auto;\n }\n .ant-col-xxl-pull-5.ant-col-rtl {\n right: auto;\n left: 20.83333333%;\n }\n .ant-col-xxl-offset-5.ant-col-rtl {\n margin-right: 20.83333333%;\n margin-left: 0;\n }\n .ant-col-xxl-push-6.ant-col-rtl {\n right: 25%;\n left: auto;\n }\n .ant-col-xxl-pull-6.ant-col-rtl {\n right: auto;\n left: 25%;\n }\n .ant-col-xxl-offset-6.ant-col-rtl {\n margin-right: 25%;\n margin-left: 0;\n }\n .ant-col-xxl-push-7.ant-col-rtl {\n right: 29.16666667%;\n left: auto;\n }\n .ant-col-xxl-pull-7.ant-col-rtl {\n right: auto;\n left: 29.16666667%;\n }\n .ant-col-xxl-offset-7.ant-col-rtl {\n margin-right: 29.16666667%;\n margin-left: 0;\n }\n .ant-col-xxl-push-8.ant-col-rtl {\n right: 33.33333333%;\n left: auto;\n }\n .ant-col-xxl-pull-8.ant-col-rtl {\n right: auto;\n left: 33.33333333%;\n }\n .ant-col-xxl-offset-8.ant-col-rtl {\n margin-right: 33.33333333%;\n margin-left: 0;\n }\n .ant-col-xxl-push-9.ant-col-rtl {\n right: 37.5%;\n left: auto;\n }\n .ant-col-xxl-pull-9.ant-col-rtl {\n right: auto;\n left: 37.5%;\n }\n .ant-col-xxl-offset-9.ant-col-rtl {\n margin-right: 37.5%;\n margin-left: 0;\n }\n .ant-col-xxl-push-10.ant-col-rtl {\n right: 41.66666667%;\n left: auto;\n }\n .ant-col-xxl-pull-10.ant-col-rtl {\n right: auto;\n left: 41.66666667%;\n }\n .ant-col-xxl-offset-10.ant-col-rtl {\n margin-right: 41.66666667%;\n margin-left: 0;\n }\n .ant-col-xxl-push-11.ant-col-rtl {\n right: 45.83333333%;\n left: auto;\n }\n .ant-col-xxl-pull-11.ant-col-rtl {\n right: auto;\n left: 45.83333333%;\n }\n .ant-col-xxl-offset-11.ant-col-rtl {\n margin-right: 45.83333333%;\n margin-left: 0;\n }\n .ant-col-xxl-push-12.ant-col-rtl {\n right: 50%;\n left: auto;\n }\n .ant-col-xxl-pull-12.ant-col-rtl {\n right: auto;\n left: 50%;\n }\n .ant-col-xxl-offset-12.ant-col-rtl {\n margin-right: 50%;\n margin-left: 0;\n }\n .ant-col-xxl-push-13.ant-col-rtl {\n right: 54.16666667%;\n left: auto;\n }\n .ant-col-xxl-pull-13.ant-col-rtl {\n right: auto;\n left: 54.16666667%;\n }\n .ant-col-xxl-offset-13.ant-col-rtl {\n margin-right: 54.16666667%;\n margin-left: 0;\n }\n .ant-col-xxl-push-14.ant-col-rtl {\n right: 58.33333333%;\n left: auto;\n }\n .ant-col-xxl-pull-14.ant-col-rtl {\n right: auto;\n left: 58.33333333%;\n }\n .ant-col-xxl-offset-14.ant-col-rtl {\n margin-right: 58.33333333%;\n margin-left: 0;\n }\n .ant-col-xxl-push-15.ant-col-rtl {\n right: 62.5%;\n left: auto;\n }\n .ant-col-xxl-pull-15.ant-col-rtl {\n right: auto;\n left: 62.5%;\n }\n .ant-col-xxl-offset-15.ant-col-rtl {\n margin-right: 62.5%;\n margin-left: 0;\n }\n .ant-col-xxl-push-16.ant-col-rtl {\n right: 66.66666667%;\n left: auto;\n }\n .ant-col-xxl-pull-16.ant-col-rtl {\n right: auto;\n left: 66.66666667%;\n }\n .ant-col-xxl-offset-16.ant-col-rtl {\n margin-right: 66.66666667%;\n margin-left: 0;\n }\n .ant-col-xxl-push-17.ant-col-rtl {\n right: 70.83333333%;\n left: auto;\n }\n .ant-col-xxl-pull-17.ant-col-rtl {\n right: auto;\n left: 70.83333333%;\n }\n .ant-col-xxl-offset-17.ant-col-rtl {\n margin-right: 70.83333333%;\n margin-left: 0;\n }\n .ant-col-xxl-push-18.ant-col-rtl {\n right: 75%;\n left: auto;\n }\n .ant-col-xxl-pull-18.ant-col-rtl {\n right: auto;\n left: 75%;\n }\n .ant-col-xxl-offset-18.ant-col-rtl {\n margin-right: 75%;\n margin-left: 0;\n }\n .ant-col-xxl-push-19.ant-col-rtl {\n right: 79.16666667%;\n left: auto;\n }\n .ant-col-xxl-pull-19.ant-col-rtl {\n right: auto;\n left: 79.16666667%;\n }\n .ant-col-xxl-offset-19.ant-col-rtl {\n margin-right: 79.16666667%;\n margin-left: 0;\n }\n .ant-col-xxl-push-20.ant-col-rtl {\n right: 83.33333333%;\n left: auto;\n }\n .ant-col-xxl-pull-20.ant-col-rtl {\n right: auto;\n left: 83.33333333%;\n }\n .ant-col-xxl-offset-20.ant-col-rtl {\n margin-right: 83.33333333%;\n margin-left: 0;\n }\n .ant-col-xxl-push-21.ant-col-rtl {\n right: 87.5%;\n left: auto;\n }\n .ant-col-xxl-pull-21.ant-col-rtl {\n right: auto;\n left: 87.5%;\n }\n .ant-col-xxl-offset-21.ant-col-rtl {\n margin-right: 87.5%;\n margin-left: 0;\n }\n .ant-col-xxl-push-22.ant-col-rtl {\n right: 91.66666667%;\n left: auto;\n }\n .ant-col-xxl-pull-22.ant-col-rtl {\n right: auto;\n left: 91.66666667%;\n }\n .ant-col-xxl-offset-22.ant-col-rtl {\n margin-right: 91.66666667%;\n margin-left: 0;\n }\n .ant-col-xxl-push-23.ant-col-rtl {\n right: 95.83333333%;\n left: auto;\n }\n .ant-col-xxl-pull-23.ant-col-rtl {\n right: auto;\n left: 95.83333333%;\n }\n .ant-col-xxl-offset-23.ant-col-rtl {\n margin-right: 95.83333333%;\n margin-left: 0;\n }\n .ant-col-xxl-push-24.ant-col-rtl {\n right: 100%;\n left: auto;\n }\n .ant-col-xxl-pull-24.ant-col-rtl {\n right: auto;\n left: 100%;\n }\n .ant-col-xxl-offset-24.ant-col-rtl {\n margin-right: 100%;\n margin-left: 0;\n }\n}\n.ant-row-rtl {\n direction: rtl;\n}\n",""]);const a=o},1958:(n,e,t)=>{"use strict";t.d(e,{Z:()=>a});var r=t(3645),o=t.n(r)()((function(n){return n[1]}));o.push([n.id,"/* stylelint-disable at-rule-empty-line-before,at-rule-name-space-after,at-rule-no-unknown */\n/* stylelint-disable no-duplicate-selectors */\n/* stylelint-disable */\n/* stylelint-disable declaration-bang-space-before,no-duplicate-selectors,string-no-newline */\n.ant-list {\n box-sizing: border-box;\n margin: 0;\n padding: 0;\n color: rgba(0, 0, 0, 0.85);\n font-size: 14px;\n font-variant: tabular-nums;\n line-height: 1.5715;\n list-style: none;\n font-feature-settings: 'tnum';\n position: relative;\n}\n.ant-list * {\n outline: none;\n}\n.ant-list-pagination {\n margin-top: 24px;\n text-align: right;\n}\n.ant-list-pagination .ant-pagination-options {\n text-align: left;\n}\n.ant-list-more {\n margin-top: 12px;\n text-align: center;\n}\n.ant-list-more button {\n padding-right: 32px;\n padding-left: 32px;\n}\n.ant-list-spin {\n min-height: 40px;\n text-align: center;\n}\n.ant-list-empty-text {\n padding: 16px;\n color: rgba(0, 0, 0, 0.25);\n font-size: 14px;\n text-align: center;\n}\n.ant-list-items {\n margin: 0;\n padding: 0;\n list-style: none;\n}\n.ant-list-item {\n display: flex;\n align-items: center;\n justify-content: space-between;\n padding: 12px 0;\n color: rgba(0, 0, 0, 0.85);\n}\n.ant-list-item-meta {\n display: flex;\n flex: 1;\n align-items: flex-start;\n max-width: 100%;\n}\n.ant-list-item-meta-avatar {\n margin-right: 16px;\n}\n.ant-list-item-meta-content {\n flex: 1 0;\n width: 0;\n color: rgba(0, 0, 0, 0.85);\n}\n.ant-list-item-meta-title {\n margin-bottom: 4px;\n color: rgba(0, 0, 0, 0.85);\n font-size: 14px;\n line-height: 1.5715;\n}\n.ant-list-item-meta-title > a {\n color: rgba(0, 0, 0, 0.85);\n transition: all 0.3s;\n}\n.ant-list-item-meta-title > a:hover {\n color: #1890ff;\n}\n.ant-list-item-meta-description {\n color: rgba(0, 0, 0, 0.45);\n font-size: 14px;\n line-height: 1.5715;\n}\n.ant-list-item-action {\n flex: 0 0 auto;\n margin-left: 48px;\n padding: 0;\n font-size: 0;\n list-style: none;\n}\n.ant-list-item-action > li {\n position: relative;\n display: inline-block;\n padding: 0 8px;\n color: rgba(0, 0, 0, 0.45);\n font-size: 14px;\n line-height: 1.5715;\n text-align: center;\n}\n.ant-list-item-action > li:first-child {\n padding-left: 0;\n}\n.ant-list-item-action-split {\n position: absolute;\n top: 50%;\n right: 0;\n width: 1px;\n height: 14px;\n margin-top: -7px;\n background-color: #f0f0f0;\n}\n.ant-list-header {\n background: transparent;\n}\n.ant-list-footer {\n background: transparent;\n}\n.ant-list-header,\n.ant-list-footer {\n padding-top: 12px;\n padding-bottom: 12px;\n}\n.ant-list-empty {\n padding: 16px 0;\n color: rgba(0, 0, 0, 0.45);\n font-size: 12px;\n text-align: center;\n}\n.ant-list-split .ant-list-item {\n border-bottom: 1px solid #f0f0f0;\n}\n.ant-list-split .ant-list-item:last-child {\n border-bottom: none;\n}\n.ant-list-split .ant-list-header {\n border-bottom: 1px solid #f0f0f0;\n}\n.ant-list-split.ant-list-empty .ant-list-footer {\n border-top: 1px solid #f0f0f0;\n}\n.ant-list-loading .ant-list-spin-nested-loading {\n min-height: 32px;\n}\n.ant-list-split.ant-list-something-after-last-item .ant-spin-container > .ant-list-items > .ant-list-item:last-child {\n border-bottom: 1px solid #f0f0f0;\n}\n.ant-list-lg .ant-list-item {\n padding: 16px 24px;\n}\n.ant-list-sm .ant-list-item {\n padding: 8px 16px;\n}\n.ant-list-vertical .ant-list-item {\n align-items: initial;\n}\n.ant-list-vertical .ant-list-item-main {\n display: block;\n flex: 1;\n}\n.ant-list-vertical .ant-list-item-extra {\n margin-left: 40px;\n}\n.ant-list-vertical .ant-list-item-meta {\n margin-bottom: 16px;\n}\n.ant-list-vertical .ant-list-item-meta-title {\n margin-bottom: 12px;\n color: rgba(0, 0, 0, 0.85);\n font-size: 16px;\n line-height: 24px;\n}\n.ant-list-vertical .ant-list-item-action {\n margin-top: 16px;\n margin-left: auto;\n}\n.ant-list-vertical .ant-list-item-action > li {\n padding: 0 16px;\n}\n.ant-list-vertical .ant-list-item-action > li:first-child {\n padding-left: 0;\n}\n.ant-list-grid .ant-col > .ant-list-item {\n display: block;\n max-width: 100%;\n margin-bottom: 16px;\n padding-top: 0;\n padding-bottom: 0;\n border-bottom: none;\n}\n.ant-list-item-no-flex {\n display: block;\n}\n.ant-list:not(.ant-list-vertical) .ant-list-item-no-flex .ant-list-item-action {\n float: right;\n}\n.ant-list-bordered {\n border: 1px solid #d9d9d9;\n border-radius: 2px;\n}\n.ant-list-bordered .ant-list-header {\n padding-right: 24px;\n padding-left: 24px;\n}\n.ant-list-bordered .ant-list-footer {\n padding-right: 24px;\n padding-left: 24px;\n}\n.ant-list-bordered .ant-list-item {\n padding-right: 24px;\n padding-left: 24px;\n}\n.ant-list-bordered .ant-list-pagination {\n margin: 16px 24px;\n}\n.ant-list-bordered.ant-list-sm .ant-list-item {\n padding: 8px 16px;\n}\n.ant-list-bordered.ant-list-sm .ant-list-header,\n.ant-list-bordered.ant-list-sm .ant-list-footer {\n padding: 8px 16px;\n}\n.ant-list-bordered.ant-list-lg .ant-list-item {\n padding: 16px 24px;\n}\n.ant-list-bordered.ant-list-lg .ant-list-header,\n.ant-list-bordered.ant-list-lg .ant-list-footer {\n padding: 16px 24px;\n}\n@media screen and (max-width: 768px) {\n .ant-list-item-action {\n margin-left: 24px;\n }\n .ant-list-vertical .ant-list-item-extra {\n margin-left: 24px;\n }\n}\n@media screen and (max-width: 576px) {\n .ant-list-item {\n flex-wrap: wrap;\n }\n .ant-list-item-action {\n margin-left: 12px;\n }\n .ant-list-vertical .ant-list-item {\n flex-wrap: wrap-reverse;\n }\n .ant-list-vertical .ant-list-item-main {\n min-width: 220px;\n }\n .ant-list-vertical .ant-list-item-extra {\n margin: auto auto 16px;\n }\n}\n.ant-list-rtl {\n direction: rtl;\n text-align: right;\n}\n.ant-list-rtl .ReactVirtualized__List .ant-list-item {\n direction: rtl;\n}\n.ant-list-rtl .ant-list-pagination {\n text-align: left;\n}\n.ant-list-rtl .ant-list-item-meta-avatar {\n margin-right: 0;\n margin-left: 16px;\n}\n.ant-list-rtl .ant-list-item-action {\n margin-right: 48px;\n margin-left: 0;\n}\n.ant-list.ant-list-rtl .ant-list-item-action > li:first-child {\n padding-right: 0;\n padding-left: 16px;\n}\n.ant-list-rtl .ant-list-item-action-split {\n right: auto;\n left: 0;\n}\n.ant-list-rtl.ant-list-vertical .ant-list-item-extra {\n margin-right: 40px;\n margin-left: 0;\n}\n.ant-list-rtl.ant-list-vertical .ant-list-item-action {\n margin-right: auto;\n}\n.ant-list-rtl .ant-list-vertical .ant-list-item-action > li:first-child {\n padding-right: 0;\n padding-left: 16px;\n}\n.ant-list-rtl .ant-list:not(.ant-list-vertical) .ant-list-item-no-flex .ant-list-item-action {\n float: left;\n}\n@media screen and (max-width: 768px) {\n .ant-list-rtl .ant-list-item-action {\n margin-right: 24px;\n margin-left: 0;\n }\n .ant-list-rtl .ant-list-vertical .ant-list-item-extra {\n margin-right: 24px;\n margin-left: 0;\n }\n}\n@media screen and (max-width: 576px) {\n .ant-list-rtl .ant-list-item-action {\n margin-right: 22px;\n margin-left: 0;\n }\n .ant-list-rtl.ant-list-vertical .ant-list-item-extra {\n margin: auto auto 16px;\n }\n}\n",""]);const a=o},6761:(n,e,t)=>{"use strict";t.d(e,{Z:()=>a});var r=t(3645),o=t.n(r)()((function(n){return n[1]}));o.push([n.id,"/* stylelint-disable at-rule-empty-line-before,at-rule-name-space-after,at-rule-no-unknown */\n/* stylelint-disable no-duplicate-selectors */\n/* stylelint-disable */\n/* stylelint-disable declaration-bang-space-before,no-duplicate-selectors,string-no-newline */\n.ant-pagination {\n box-sizing: border-box;\n margin: 0;\n padding: 0;\n color: rgba(0, 0, 0, 0.85);\n font-size: 14px;\n font-variant: tabular-nums;\n line-height: 1.5715;\n list-style: none;\n font-feature-settings: 'tnum';\n}\n.ant-pagination ul,\n.ant-pagination ol {\n margin: 0;\n padding: 0;\n list-style: none;\n}\n.ant-pagination::after {\n display: block;\n clear: both;\n height: 0;\n overflow: hidden;\n visibility: hidden;\n content: ' ';\n}\n.ant-pagination-total-text {\n display: inline-block;\n height: 32px;\n margin-right: 8px;\n line-height: 30px;\n vertical-align: middle;\n}\n.ant-pagination-item {\n display: inline-block;\n min-width: 32px;\n height: 32px;\n margin-right: 8px;\n font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, 'Helvetica Neue', Arial, 'Noto Sans', sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji', 'Segoe UI Symbol', 'Noto Color Emoji';\n line-height: 30px;\n text-align: center;\n vertical-align: middle;\n list-style: none;\n background-color: #fff;\n border: 1px solid #d9d9d9;\n border-radius: 2px;\n outline: 0;\n cursor: pointer;\n -webkit-user-select: none;\n -moz-user-select: none;\n -ms-user-select: none;\n user-select: none;\n}\n.ant-pagination-item a {\n display: block;\n padding: 0 6px;\n color: rgba(0, 0, 0, 0.85);\n transition: none;\n}\n.ant-pagination-item a:hover {\n text-decoration: none;\n}\n.ant-pagination-item:focus-visible,\n.ant-pagination-item:hover {\n border-color: #1890ff;\n transition: all 0.3s;\n}\n.ant-pagination-item:focus-visible a,\n.ant-pagination-item:hover a {\n color: #1890ff;\n}\n.ant-pagination-item-active {\n font-weight: 500;\n background: #fff;\n border-color: #1890ff;\n}\n.ant-pagination-item-active a {\n color: #1890ff;\n}\n.ant-pagination-item-active:focus-visible,\n.ant-pagination-item-active:hover {\n border-color: #40a9ff;\n}\n.ant-pagination-item-active:focus-visible a,\n.ant-pagination-item-active:hover a {\n color: #40a9ff;\n}\n.ant-pagination-jump-prev,\n.ant-pagination-jump-next {\n outline: 0;\n}\n.ant-pagination-jump-prev .ant-pagination-item-container,\n.ant-pagination-jump-next .ant-pagination-item-container {\n position: relative;\n}\n.ant-pagination-jump-prev .ant-pagination-item-container .ant-pagination-item-link-icon,\n.ant-pagination-jump-next .ant-pagination-item-container .ant-pagination-item-link-icon {\n color: #1890ff;\n font-size: 12px;\n letter-spacing: -1px;\n opacity: 0;\n transition: all 0.2s;\n}\n.ant-pagination-jump-prev .ant-pagination-item-container .ant-pagination-item-link-icon-svg,\n.ant-pagination-jump-next .ant-pagination-item-container .ant-pagination-item-link-icon-svg {\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n margin: auto;\n}\n.ant-pagination-jump-prev .ant-pagination-item-container .ant-pagination-item-ellipsis,\n.ant-pagination-jump-next .ant-pagination-item-container .ant-pagination-item-ellipsis {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n display: block;\n margin: auto;\n color: rgba(0, 0, 0, 0.25);\n font-family: Arial, Helvetica, sans-serif;\n letter-spacing: 2px;\n text-align: center;\n text-indent: 0.13em;\n opacity: 1;\n transition: all 0.2s;\n}\n.ant-pagination-jump-prev:focus-visible .ant-pagination-item-link-icon,\n.ant-pagination-jump-next:focus-visible .ant-pagination-item-link-icon,\n.ant-pagination-jump-prev:hover .ant-pagination-item-link-icon,\n.ant-pagination-jump-next:hover .ant-pagination-item-link-icon {\n opacity: 1;\n}\n.ant-pagination-jump-prev:focus-visible .ant-pagination-item-ellipsis,\n.ant-pagination-jump-next:focus-visible .ant-pagination-item-ellipsis,\n.ant-pagination-jump-prev:hover .ant-pagination-item-ellipsis,\n.ant-pagination-jump-next:hover .ant-pagination-item-ellipsis {\n opacity: 0;\n}\n.ant-pagination-prev,\n.ant-pagination-jump-prev,\n.ant-pagination-jump-next {\n margin-right: 8px;\n}\n.ant-pagination-prev,\n.ant-pagination-next,\n.ant-pagination-jump-prev,\n.ant-pagination-jump-next {\n display: inline-block;\n min-width: 32px;\n height: 32px;\n color: rgba(0, 0, 0, 0.85);\n font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, 'Helvetica Neue', Arial, 'Noto Sans', sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji', 'Segoe UI Symbol', 'Noto Color Emoji';\n line-height: 32px;\n text-align: center;\n vertical-align: middle;\n list-style: none;\n border-radius: 2px;\n cursor: pointer;\n transition: all 0.3s;\n}\n.ant-pagination-prev,\n.ant-pagination-next {\n font-family: Arial, Helvetica, sans-serif;\n outline: 0;\n}\n.ant-pagination-prev button,\n.ant-pagination-next button {\n color: rgba(0, 0, 0, 0.85);\n cursor: pointer;\n -webkit-user-select: none;\n -moz-user-select: none;\n -ms-user-select: none;\n user-select: none;\n}\n.ant-pagination-prev:hover button,\n.ant-pagination-next:hover button {\n border-color: #40a9ff;\n}\n.ant-pagination-prev .ant-pagination-item-link,\n.ant-pagination-next .ant-pagination-item-link {\n display: block;\n width: 100%;\n height: 100%;\n padding: 0;\n font-size: 12px;\n text-align: center;\n background-color: #fff;\n border: 1px solid #d9d9d9;\n border-radius: 2px;\n outline: none;\n transition: all 0.3s;\n}\n.ant-pagination-prev:focus-visible .ant-pagination-item-link,\n.ant-pagination-next:focus-visible .ant-pagination-item-link,\n.ant-pagination-prev:hover .ant-pagination-item-link,\n.ant-pagination-next:hover .ant-pagination-item-link {\n color: #1890ff;\n border-color: #1890ff;\n}\n.ant-pagination-disabled,\n.ant-pagination-disabled:hover,\n.ant-pagination-disabled:focus-visible {\n cursor: not-allowed;\n}\n.ant-pagination-disabled .ant-pagination-item-link,\n.ant-pagination-disabled:hover .ant-pagination-item-link,\n.ant-pagination-disabled:focus-visible .ant-pagination-item-link {\n color: rgba(0, 0, 0, 0.25);\n border-color: #d9d9d9;\n cursor: not-allowed;\n}\n.ant-pagination-slash {\n margin: 0 10px 0 5px;\n}\n.ant-pagination-options {\n display: inline-block;\n margin-left: 16px;\n vertical-align: middle;\n}\n@media all and (-ms-high-contrast: none) {\n .ant-pagination-options *::-ms-backdrop,\n .ant-pagination-options {\n vertical-align: top;\n }\n}\n.ant-pagination-options-size-changer.ant-select {\n display: inline-block;\n width: auto;\n}\n.ant-pagination-options-quick-jumper {\n display: inline-block;\n height: 32px;\n margin-left: 8px;\n line-height: 32px;\n vertical-align: top;\n}\n.ant-pagination-options-quick-jumper input {\n position: relative;\n display: inline-block;\n width: 100%;\n min-width: 0;\n padding: 4px 11px;\n color: rgba(0, 0, 0, 0.85);\n font-size: 14px;\n line-height: 1.5715;\n background-color: #fff;\n background-image: none;\n border: 1px solid #d9d9d9;\n border-radius: 2px;\n transition: all 0.3s;\n width: 50px;\n height: 32px;\n margin: 0 8px;\n}\n.ant-pagination-options-quick-jumper input::-moz-placeholder {\n opacity: 1;\n}\n.ant-pagination-options-quick-jumper input:-ms-input-placeholder {\n color: #bfbfbf;\n}\n.ant-pagination-options-quick-jumper input::placeholder {\n color: #bfbfbf;\n}\n.ant-pagination-options-quick-jumper input:-moz-placeholder-shown {\n text-overflow: ellipsis;\n}\n.ant-pagination-options-quick-jumper input:-ms-input-placeholder {\n text-overflow: ellipsis;\n}\n.ant-pagination-options-quick-jumper input:placeholder-shown {\n text-overflow: ellipsis;\n}\n.ant-pagination-options-quick-jumper input:hover {\n border-color: #40a9ff;\n border-right-width: 1px !important;\n}\n.ant-pagination-options-quick-jumper input:focus,\n.ant-pagination-options-quick-jumper input-focused {\n border-color: #40a9ff;\n border-right-width: 1px !important;\n outline: 0;\n box-shadow: 0 0 0 2px rgba(24, 144, 255, 0.2);\n}\n.ant-pagination-options-quick-jumper input-disabled {\n color: rgba(0, 0, 0, 0.25);\n background-color: #f5f5f5;\n cursor: not-allowed;\n opacity: 1;\n}\n.ant-pagination-options-quick-jumper input-disabled:hover {\n border-color: #d9d9d9;\n border-right-width: 1px !important;\n}\n.ant-pagination-options-quick-jumper input[disabled] {\n color: rgba(0, 0, 0, 0.25);\n background-color: #f5f5f5;\n cursor: not-allowed;\n opacity: 1;\n}\n.ant-pagination-options-quick-jumper input[disabled]:hover {\n border-color: #d9d9d9;\n border-right-width: 1px !important;\n}\n.ant-pagination-options-quick-jumper input-borderless,\n.ant-pagination-options-quick-jumper input-borderless:hover,\n.ant-pagination-options-quick-jumper input-borderless:focus,\n.ant-pagination-options-quick-jumper input-borderless-focused,\n.ant-pagination-options-quick-jumper input-borderless-disabled,\n.ant-pagination-options-quick-jumper input-borderless[disabled] {\n background-color: transparent;\n border: none;\n box-shadow: none;\n}\ntextarea.ant-pagination-options-quick-jumper input {\n max-width: 100%;\n height: auto;\n min-height: 32px;\n line-height: 1.5715;\n vertical-align: bottom;\n transition: all 0.3s, height 0s;\n}\n.ant-pagination-options-quick-jumper input-lg {\n padding: 6.5px 11px;\n font-size: 16px;\n}\n.ant-pagination-options-quick-jumper input-sm {\n padding: 0px 7px;\n}\n.ant-pagination-simple .ant-pagination-prev,\n.ant-pagination-simple .ant-pagination-next {\n height: 24px;\n line-height: 24px;\n vertical-align: top;\n}\n.ant-pagination-simple .ant-pagination-prev .ant-pagination-item-link,\n.ant-pagination-simple .ant-pagination-next .ant-pagination-item-link {\n height: 24px;\n background-color: transparent;\n border: 0;\n}\n.ant-pagination-simple .ant-pagination-prev .ant-pagination-item-link::after,\n.ant-pagination-simple .ant-pagination-next .ant-pagination-item-link::after {\n height: 24px;\n line-height: 24px;\n}\n.ant-pagination-simple .ant-pagination-simple-pager {\n display: inline-block;\n height: 24px;\n margin-right: 8px;\n}\n.ant-pagination-simple .ant-pagination-simple-pager input {\n box-sizing: border-box;\n height: 100%;\n margin-right: 8px;\n padding: 0 6px;\n text-align: center;\n background-color: #fff;\n border: 1px solid #d9d9d9;\n border-radius: 2px;\n outline: none;\n transition: border-color 0.3s;\n}\n.ant-pagination-simple .ant-pagination-simple-pager input:hover {\n border-color: #1890ff;\n}\n.ant-pagination-simple .ant-pagination-simple-pager input[disabled] {\n color: rgba(0, 0, 0, 0.25);\n background: #f5f5f5;\n border-color: #d9d9d9;\n cursor: not-allowed;\n}\n.ant-pagination.mini .ant-pagination-total-text,\n.ant-pagination.mini .ant-pagination-simple-pager {\n height: 24px;\n line-height: 24px;\n}\n.ant-pagination.mini .ant-pagination-item {\n min-width: 24px;\n height: 24px;\n margin: 0;\n line-height: 22px;\n}\n.ant-pagination.mini .ant-pagination-item:not(.ant-pagination-item-active) {\n background: transparent;\n border-color: transparent;\n}\n.ant-pagination.mini .ant-pagination-prev,\n.ant-pagination.mini .ant-pagination-next {\n min-width: 24px;\n height: 24px;\n margin: 0;\n line-height: 24px;\n}\n.ant-pagination.mini .ant-pagination-prev .ant-pagination-item-link,\n.ant-pagination.mini .ant-pagination-next .ant-pagination-item-link {\n background: transparent;\n border-color: transparent;\n}\n.ant-pagination.mini .ant-pagination-prev .ant-pagination-item-link::after,\n.ant-pagination.mini .ant-pagination-next .ant-pagination-item-link::after {\n height: 24px;\n line-height: 24px;\n}\n.ant-pagination.mini .ant-pagination-jump-prev,\n.ant-pagination.mini .ant-pagination-jump-next {\n height: 24px;\n margin-right: 0;\n line-height: 24px;\n}\n.ant-pagination.mini .ant-pagination-options {\n margin-left: 2px;\n}\n.ant-pagination.mini .ant-pagination-options-size-changer {\n top: 0px;\n}\n.ant-pagination.mini .ant-pagination-options-quick-jumper {\n height: 24px;\n line-height: 24px;\n}\n.ant-pagination.mini .ant-pagination-options-quick-jumper input {\n padding: 0px 7px;\n width: 44px;\n height: 24px;\n}\n.ant-pagination.ant-pagination-disabled {\n cursor: not-allowed;\n}\n.ant-pagination.ant-pagination-disabled .ant-pagination-item {\n background: #f5f5f5;\n border-color: #d9d9d9;\n cursor: not-allowed;\n}\n.ant-pagination.ant-pagination-disabled .ant-pagination-item a {\n color: rgba(0, 0, 0, 0.25);\n background: transparent;\n border: none;\n cursor: not-allowed;\n}\n.ant-pagination.ant-pagination-disabled .ant-pagination-item-active {\n background: #dbdbdb;\n border-color: transparent;\n}\n.ant-pagination.ant-pagination-disabled .ant-pagination-item-active a {\n color: #fff;\n}\n.ant-pagination.ant-pagination-disabled .ant-pagination-item-link {\n color: rgba(0, 0, 0, 0.25);\n background: #f5f5f5;\n border-color: #d9d9d9;\n cursor: not-allowed;\n}\n.ant-pagination-simple.ant-pagination.ant-pagination-disabled .ant-pagination-item-link {\n background: transparent;\n}\n.ant-pagination.ant-pagination-disabled .ant-pagination-item-link-icon {\n opacity: 0;\n}\n.ant-pagination.ant-pagination-disabled .ant-pagination-item-ellipsis {\n opacity: 1;\n}\n.ant-pagination.ant-pagination-disabled .ant-pagination-simple-pager {\n color: rgba(0, 0, 0, 0.25);\n}\n@media only screen and (max-width: 992px) {\n .ant-pagination-item-after-jump-prev,\n .ant-pagination-item-before-jump-next {\n display: none;\n }\n}\n@media only screen and (max-width: 576px) {\n .ant-pagination-options {\n display: none;\n }\n}\n.ant-pagination-rtl .ant-pagination-total-text {\n margin-right: 0;\n margin-left: 8px;\n}\n.ant-pagination-rtl .ant-pagination-item,\n.ant-pagination-rtl .ant-pagination-prev,\n.ant-pagination-rtl .ant-pagination-jump-prev,\n.ant-pagination-rtl .ant-pagination-jump-next {\n margin-right: 0;\n margin-left: 8px;\n}\n.ant-pagination-rtl .ant-pagination-slash {\n margin: 0 5px 0 10px;\n}\n.ant-pagination-rtl .ant-pagination-options {\n margin-right: 16px;\n margin-left: 0;\n}\n.ant-pagination-rtl .ant-pagination-options .ant-pagination-options-size-changer.ant-select {\n margin-right: 0;\n margin-left: 8px;\n}\n.ant-pagination-rtl .ant-pagination-options .ant-pagination-options-quick-jumper {\n margin-left: 0;\n}\n.ant-pagination-rtl.ant-pagination-simple .ant-pagination-simple-pager {\n margin-right: 0;\n margin-left: 8px;\n}\n.ant-pagination-rtl.ant-pagination-simple .ant-pagination-simple-pager input {\n margin-right: 0;\n margin-left: 8px;\n}\n.ant-pagination-rtl.ant-pagination.mini .ant-pagination-options {\n margin-right: 2px;\n margin-left: 0;\n}\n",""]);const a=o},4782:(n,e,t)=>{"use strict";t.d(e,{Z:()=>a});var r=t(3645),o=t.n(r)()((function(n){return n[1]}));o.push([n.id,"/* stylelint-disable at-rule-empty-line-before,at-rule-name-space-after,at-rule-no-unknown */\n/* stylelint-disable no-duplicate-selectors */\n/* stylelint-disable */\n/* stylelint-disable declaration-bang-space-before,no-duplicate-selectors,string-no-newline */\n.ant-radio-group {\n box-sizing: border-box;\n margin: 0;\n padding: 0;\n color: rgba(0, 0, 0, 0.85);\n font-size: 14px;\n font-variant: tabular-nums;\n line-height: 1.5715;\n list-style: none;\n font-feature-settings: 'tnum';\n display: inline-block;\n font-size: 0;\n line-height: unset;\n}\n.ant-radio-group .ant-badge-count {\n z-index: 1;\n}\n.ant-radio-group > .ant-badge:not(:first-child) > .ant-radio-button-wrapper {\n border-left: none;\n}\n.ant-radio-wrapper {\n box-sizing: border-box;\n margin: 0;\n padding: 0;\n color: rgba(0, 0, 0, 0.85);\n font-size: 14px;\n font-variant: tabular-nums;\n line-height: 1.5715;\n list-style: none;\n font-feature-settings: 'tnum';\n position: relative;\n display: inline-flex;\n align-items: baseline;\n margin-right: 8px;\n cursor: pointer;\n}\n.ant-radio {\n box-sizing: border-box;\n margin: 0;\n padding: 0;\n color: rgba(0, 0, 0, 0.85);\n font-size: 14px;\n font-variant: tabular-nums;\n line-height: 1.5715;\n list-style: none;\n font-feature-settings: 'tnum';\n position: relative;\n top: 0.2em;\n display: inline-block;\n outline: none;\n cursor: pointer;\n}\n.ant-radio-wrapper:hover .ant-radio,\n.ant-radio:hover .ant-radio-inner,\n.ant-radio-input:focus + .ant-radio-inner {\n border-color: #1890ff;\n}\n.ant-radio-input:focus + .ant-radio-inner {\n box-shadow: 0 0 0 3px rgba(24, 144, 255, 0.08);\n}\n.ant-radio-checked::after {\n position: absolute;\n top: 0;\n left: 0;\n width: 100%;\n height: 100%;\n border: 1px solid #1890ff;\n border-radius: 50%;\n visibility: hidden;\n -webkit-animation: antRadioEffect 0.36s ease-in-out;\n animation: antRadioEffect 0.36s ease-in-out;\n -webkit-animation-fill-mode: both;\n animation-fill-mode: both;\n content: '';\n}\n.ant-radio:hover::after,\n.ant-radio-wrapper:hover .ant-radio::after {\n visibility: visible;\n}\n.ant-radio-inner {\n position: relative;\n top: 0;\n left: 0;\n display: block;\n width: 16px;\n height: 16px;\n background-color: #fff;\n border-color: #d9d9d9;\n border-style: solid;\n border-width: 1px;\n border-radius: 50%;\n transition: all 0.3s;\n}\n.ant-radio-inner::after {\n position: absolute;\n top: 3px;\n left: 3px;\n display: table;\n width: 8px;\n height: 8px;\n background-color: #1890ff;\n border-top: 0;\n border-left: 0;\n border-radius: 8px;\n transform: scale(0);\n opacity: 0;\n transition: all 0.3s cubic-bezier(0.78, 0.14, 0.15, 0.86);\n content: ' ';\n}\n.ant-radio-input {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n z-index: 1;\n cursor: pointer;\n opacity: 0;\n}\n.ant-radio-checked .ant-radio-inner {\n border-color: #1890ff;\n}\n.ant-radio-checked .ant-radio-inner::after {\n transform: scale(1);\n opacity: 1;\n transition: all 0.3s cubic-bezier(0.78, 0.14, 0.15, 0.86);\n}\n.ant-radio-disabled {\n cursor: not-allowed;\n}\n.ant-radio-disabled .ant-radio-inner {\n background-color: #f5f5f5;\n border-color: #d9d9d9 !important;\n cursor: not-allowed;\n}\n.ant-radio-disabled .ant-radio-inner::after {\n background-color: rgba(0, 0, 0, 0.2);\n}\n.ant-radio-disabled .ant-radio-input {\n cursor: not-allowed;\n}\n.ant-radio-disabled + span {\n color: rgba(0, 0, 0, 0.25);\n cursor: not-allowed;\n}\nspan.ant-radio + * {\n padding-right: 8px;\n padding-left: 8px;\n}\n.ant-radio-button-wrapper {\n position: relative;\n display: inline-block;\n height: 32px;\n margin: 0;\n padding: 0 15px;\n color: rgba(0, 0, 0, 0.85);\n font-size: 14px;\n line-height: 30px;\n background: #fff;\n border: 1px solid #d9d9d9;\n border-top-width: 1.02px;\n border-left-width: 0;\n cursor: pointer;\n transition: color 0.3s, background 0.3s, border-color 0.3s, box-shadow 0.3s;\n}\n.ant-radio-button-wrapper a {\n color: rgba(0, 0, 0, 0.85);\n}\n.ant-radio-button-wrapper > .ant-radio-button {\n position: absolute;\n top: 0;\n left: 0;\n z-index: -1;\n width: 100%;\n height: 100%;\n}\n.ant-radio-group-large .ant-radio-button-wrapper {\n height: 40px;\n font-size: 16px;\n line-height: 38px;\n}\n.ant-radio-group-small .ant-radio-button-wrapper {\n height: 24px;\n padding: 0 7px;\n line-height: 22px;\n}\n.ant-radio-button-wrapper:not(:first-child)::before {\n position: absolute;\n top: -1px;\n left: -1px;\n display: block;\n box-sizing: content-box;\n width: 1px;\n height: 100%;\n padding: 1px 0;\n background-color: #d9d9d9;\n transition: background-color 0.3s;\n content: '';\n}\n.ant-radio-button-wrapper:first-child {\n border-left: 1px solid #d9d9d9;\n border-radius: 2px 0 0 2px;\n}\n.ant-radio-button-wrapper:last-child {\n border-radius: 0 2px 2px 0;\n}\n.ant-radio-button-wrapper:first-child:last-child {\n border-radius: 2px;\n}\n.ant-radio-button-wrapper:hover {\n position: relative;\n color: #1890ff;\n}\n.ant-radio-button-wrapper:focus-within {\n box-shadow: 0 0 0 3px rgba(24, 144, 255, 0.08);\n}\n.ant-radio-button-wrapper .ant-radio-inner,\n.ant-radio-button-wrapper input[type='checkbox'],\n.ant-radio-button-wrapper input[type='radio'] {\n width: 0;\n height: 0;\n opacity: 0;\n pointer-events: none;\n}\n.ant-radio-button-wrapper-checked:not(.ant-radio-button-wrapper-disabled) {\n z-index: 1;\n color: #1890ff;\n background: #fff;\n border-color: #1890ff;\n}\n.ant-radio-button-wrapper-checked:not(.ant-radio-button-wrapper-disabled)::before {\n background-color: #1890ff;\n}\n.ant-radio-button-wrapper-checked:not(.ant-radio-button-wrapper-disabled):first-child {\n border-color: #1890ff;\n}\n.ant-radio-button-wrapper-checked:not(.ant-radio-button-wrapper-disabled):hover {\n color: #40a9ff;\n border-color: #40a9ff;\n}\n.ant-radio-button-wrapper-checked:not(.ant-radio-button-wrapper-disabled):hover::before {\n background-color: #40a9ff;\n}\n.ant-radio-button-wrapper-checked:not(.ant-radio-button-wrapper-disabled):active {\n color: #096dd9;\n border-color: #096dd9;\n}\n.ant-radio-button-wrapper-checked:not(.ant-radio-button-wrapper-disabled):active::before {\n background-color: #096dd9;\n}\n.ant-radio-button-wrapper-checked:not(.ant-radio-button-wrapper-disabled):focus-within {\n box-shadow: 0 0 0 3px rgba(24, 144, 255, 0.08);\n}\n.ant-radio-group-solid .ant-radio-button-wrapper-checked:not(.ant-radio-button-wrapper-disabled) {\n color: #fff;\n background: #1890ff;\n border-color: #1890ff;\n}\n.ant-radio-group-solid .ant-radio-button-wrapper-checked:not(.ant-radio-button-wrapper-disabled):hover {\n color: #fff;\n background: #40a9ff;\n border-color: #40a9ff;\n}\n.ant-radio-group-solid .ant-radio-button-wrapper-checked:not(.ant-radio-button-wrapper-disabled):active {\n color: #fff;\n background: #096dd9;\n border-color: #096dd9;\n}\n.ant-radio-group-solid .ant-radio-button-wrapper-checked:not(.ant-radio-button-wrapper-disabled):focus-within {\n box-shadow: 0 0 0 3px rgba(24, 144, 255, 0.08);\n}\n.ant-radio-button-wrapper-disabled {\n color: rgba(0, 0, 0, 0.25);\n background-color: #f5f5f5;\n border-color: #d9d9d9;\n cursor: not-allowed;\n}\n.ant-radio-button-wrapper-disabled:first-child,\n.ant-radio-button-wrapper-disabled:hover {\n color: rgba(0, 0, 0, 0.25);\n background-color: #f5f5f5;\n border-color: #d9d9d9;\n}\n.ant-radio-button-wrapper-disabled:first-child {\n border-left-color: #d9d9d9;\n}\n.ant-radio-button-wrapper-disabled.ant-radio-button-wrapper-checked {\n color: rgba(0, 0, 0, 0.25);\n background-color: #e6e6e6;\n border-color: #d9d9d9;\n box-shadow: none;\n}\n@-webkit-keyframes antRadioEffect {\n 0% {\n transform: scale(1);\n opacity: 0.5;\n }\n 100% {\n transform: scale(1.6);\n opacity: 0;\n }\n}\n@keyframes antRadioEffect {\n 0% {\n transform: scale(1);\n opacity: 0.5;\n }\n 100% {\n transform: scale(1.6);\n opacity: 0;\n }\n}\n.ant-radio-group.ant-radio-group-rtl {\n direction: rtl;\n}\n.ant-radio-wrapper.ant-radio-wrapper-rtl {\n margin-right: 0;\n margin-left: 8px;\n direction: rtl;\n}\n.ant-radio-button-wrapper.ant-radio-button-wrapper-rtl {\n border-right-width: 0;\n border-left-width: 1px;\n}\n.ant-radio-button-wrapper.ant-radio-button-wrapper-rtl.ant-radio-button-wrapper:not(:first-child)::before {\n right: -1px;\n left: 0;\n}\n.ant-radio-button-wrapper.ant-radio-button-wrapper-rtl.ant-radio-button-wrapper:first-child {\n border-right: 1px solid #d9d9d9;\n border-radius: 0 2px 2px 0;\n}\n.ant-radio-button-wrapper-checked:not([class*=' ant-radio-button-wrapper-disabled']).ant-radio-button-wrapper:first-child {\n border-right-color: #40a9ff;\n}\n.ant-radio-button-wrapper.ant-radio-button-wrapper-rtl.ant-radio-button-wrapper:last-child {\n border-radius: 2px 0 0 2px;\n}\n.ant-radio-button-wrapper.ant-radio-button-wrapper-rtl.ant-radio-button-wrapper-disabled:first-child {\n border-right-color: #d9d9d9;\n}\n",""]);const a=o},7464:(n,e,t)=>{"use strict";t.d(e,{Z:()=>a});var r=t(3645),o=t.n(r)()((function(n){return n[1]}));o.push([n.id,"/* stylelint-disable at-rule-empty-line-before,at-rule-name-space-after,at-rule-no-unknown */\n/* stylelint-disable no-duplicate-selectors */\n/* stylelint-disable */\n/* stylelint-disable declaration-bang-space-before,no-duplicate-selectors,string-no-newline */\n.ant-select-single .ant-select-selector {\n display: flex;\n}\n.ant-select-single .ant-select-selector .ant-select-selection-search {\n position: absolute;\n top: 0;\n right: 11px;\n bottom: 0;\n left: 11px;\n}\n.ant-select-single .ant-select-selector .ant-select-selection-search-input {\n width: 100%;\n}\n.ant-select-single .ant-select-selector .ant-select-selection-item,\n.ant-select-single .ant-select-selector .ant-select-selection-placeholder {\n padding: 0;\n line-height: 30px;\n transition: all 0.3s;\n}\n@supports (-moz-appearance: meterbar) {\n .ant-select-single .ant-select-selector .ant-select-selection-item,\n .ant-select-single .ant-select-selector .ant-select-selection-placeholder {\n line-height: 30px;\n }\n}\n.ant-select-single .ant-select-selector .ant-select-selection-item {\n position: relative;\n -webkit-user-select: none;\n -moz-user-select: none;\n -ms-user-select: none;\n user-select: none;\n}\n.ant-select-single .ant-select-selector .ant-select-selection-placeholder {\n pointer-events: none;\n}\n.ant-select-single .ant-select-selector::after,\n.ant-select-single .ant-select-selector .ant-select-selection-item::after,\n.ant-select-single .ant-select-selector .ant-select-selection-placeholder::after {\n display: inline-block;\n width: 0;\n visibility: hidden;\n content: '\\a0';\n}\n.ant-select-single.ant-select-show-arrow .ant-select-selection-search {\n right: 25px;\n}\n.ant-select-single.ant-select-show-arrow .ant-select-selection-item,\n.ant-select-single.ant-select-show-arrow .ant-select-selection-placeholder {\n padding-right: 18px;\n}\n.ant-select-single.ant-select-open .ant-select-selection-item {\n color: #bfbfbf;\n}\n.ant-select-single:not(.ant-select-customize-input) .ant-select-selector {\n width: 100%;\n height: 32px;\n padding: 0 11px;\n}\n.ant-select-single:not(.ant-select-customize-input) .ant-select-selector .ant-select-selection-search-input {\n height: 30px;\n}\n.ant-select-single:not(.ant-select-customize-input) .ant-select-selector::after {\n line-height: 30px;\n}\n.ant-select-single.ant-select-customize-input .ant-select-selector::after {\n display: none;\n}\n.ant-select-single.ant-select-customize-input .ant-select-selector .ant-select-selection-search {\n position: static;\n width: 100%;\n}\n.ant-select-single.ant-select-customize-input .ant-select-selector .ant-select-selection-placeholder {\n position: absolute;\n right: 0;\n left: 0;\n padding: 0 11px;\n}\n.ant-select-single.ant-select-customize-input .ant-select-selector .ant-select-selection-placeholder::after {\n display: none;\n}\n.ant-select-single.ant-select-lg:not(.ant-select-customize-input) .ant-select-selector {\n height: 40px;\n}\n.ant-select-single.ant-select-lg:not(.ant-select-customize-input) .ant-select-selector::after,\n.ant-select-single.ant-select-lg:not(.ant-select-customize-input) .ant-select-selector .ant-select-selection-item,\n.ant-select-single.ant-select-lg:not(.ant-select-customize-input) .ant-select-selector .ant-select-selection-placeholder {\n line-height: 38px;\n}\n.ant-select-single.ant-select-lg:not(.ant-select-customize-input):not(.ant-select-customize-input) .ant-select-selection-search-input {\n height: 38px;\n}\n.ant-select-single.ant-select-sm:not(.ant-select-customize-input) .ant-select-selector {\n height: 24px;\n}\n.ant-select-single.ant-select-sm:not(.ant-select-customize-input) .ant-select-selector::after,\n.ant-select-single.ant-select-sm:not(.ant-select-customize-input) .ant-select-selector .ant-select-selection-item,\n.ant-select-single.ant-select-sm:not(.ant-select-customize-input) .ant-select-selector .ant-select-selection-placeholder {\n line-height: 22px;\n}\n.ant-select-single.ant-select-sm:not(.ant-select-customize-input):not(.ant-select-customize-input) .ant-select-selection-search-input {\n height: 22px;\n}\n.ant-select-single.ant-select-sm:not(.ant-select-customize-input) .ant-select-selection-search {\n right: 7px;\n left: 7px;\n}\n.ant-select-single.ant-select-sm:not(.ant-select-customize-input) .ant-select-selector {\n padding: 0 7px;\n}\n.ant-select-single.ant-select-sm:not(.ant-select-customize-input).ant-select-show-arrow .ant-select-selection-search {\n right: 28px;\n}\n.ant-select-single.ant-select-sm:not(.ant-select-customize-input).ant-select-show-arrow .ant-select-selection-item,\n.ant-select-single.ant-select-sm:not(.ant-select-customize-input).ant-select-show-arrow .ant-select-selection-placeholder {\n padding-right: 21px;\n}\n.ant-select-single.ant-select-lg:not(.ant-select-customize-input) .ant-select-selector {\n padding: 0 11px;\n}\n/**\n * Do not merge `height` & `line-height` under style with `selection` & `search`,\n * since chrome may update to redesign with its align logic.\n */\n.ant-select-selection-overflow {\n position: relative;\n display: flex;\n flex: auto;\n flex-wrap: wrap;\n max-width: 100%;\n}\n.ant-select-selection-overflow-item {\n flex: none;\n align-self: center;\n max-width: 100%;\n}\n.ant-select-multiple .ant-select-selector {\n display: flex;\n flex-wrap: wrap;\n align-items: center;\n padding: 1px 4px;\n}\n.ant-select-show-search.ant-select-multiple .ant-select-selector {\n cursor: text;\n}\n.ant-select-disabled.ant-select-multiple .ant-select-selector {\n background: #f5f5f5;\n cursor: not-allowed;\n}\n.ant-select-multiple .ant-select-selector::after {\n display: inline-block;\n width: 0;\n margin: 2px 0;\n line-height: 24px;\n content: '\\a0';\n}\n.ant-select-multiple.ant-select-show-arrow .ant-select-selector,\n.ant-select-multiple.ant-select-allow-clear .ant-select-selector {\n padding-right: 24px;\n}\n.ant-select-multiple .ant-select-selection-item {\n position: relative;\n display: flex;\n flex: none;\n box-sizing: border-box;\n max-width: 100%;\n height: 24px;\n margin-top: 2px;\n margin-bottom: 2px;\n line-height: 22px;\n background: #f5f5f5;\n border: 1px solid #f0f0f0;\n border-radius: 2px;\n cursor: default;\n transition: font-size 0.3s, line-height 0.3s, height 0.3s;\n -webkit-user-select: none;\n -moz-user-select: none;\n -ms-user-select: none;\n user-select: none;\n -webkit-margin-end: 4px;\n margin-inline-end: 4px;\n -webkit-padding-start: 8px;\n padding-inline-start: 8px;\n -webkit-padding-end: 4px;\n padding-inline-end: 4px;\n}\n.ant-select-disabled.ant-select-multiple .ant-select-selection-item {\n color: #bfbfbf;\n border-color: #d9d9d9;\n cursor: not-allowed;\n}\n.ant-select-multiple .ant-select-selection-item-content {\n display: inline-block;\n margin-right: 4px;\n overflow: hidden;\n white-space: pre;\n text-overflow: ellipsis;\n}\n.ant-select-multiple .ant-select-selection-item-remove {\n color: inherit;\n font-style: normal;\n line-height: 0;\n text-align: center;\n text-transform: none;\n vertical-align: -0.125em;\n text-rendering: optimizeLegibility;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n display: inline-block;\n color: rgba(0, 0, 0, 0.45);\n font-weight: bold;\n font-size: 10px;\n line-height: inherit;\n cursor: pointer;\n}\n.ant-select-multiple .ant-select-selection-item-remove > * {\n line-height: 1;\n}\n.ant-select-multiple .ant-select-selection-item-remove svg {\n display: inline-block;\n}\n.ant-select-multiple .ant-select-selection-item-remove::before {\n display: none;\n}\n.ant-select-multiple .ant-select-selection-item-remove .ant-select-multiple .ant-select-selection-item-remove-icon {\n display: block;\n}\n.ant-select-multiple .ant-select-selection-item-remove > .anticon {\n vertical-align: -0.2em;\n}\n.ant-select-multiple .ant-select-selection-item-remove:hover {\n color: rgba(0, 0, 0, 0.75);\n}\n.ant-select-multiple .ant-select-selection-overflow-item + .ant-select-selection-overflow-item .ant-select-selection-search {\n -webkit-margin-start: 0;\n margin-inline-start: 0;\n}\n.ant-select-multiple .ant-select-selection-search {\n position: relative;\n max-width: 100%;\n margin-top: 2px;\n margin-bottom: 2px;\n -webkit-margin-start: 7px;\n margin-inline-start: 7px;\n}\n.ant-select-multiple .ant-select-selection-search-input,\n.ant-select-multiple .ant-select-selection-search-mirror {\n height: 24px;\n font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, 'Helvetica Neue', Arial, 'Noto Sans', sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji', 'Segoe UI Symbol', 'Noto Color Emoji';\n line-height: 24px;\n transition: all 0.3s;\n}\n.ant-select-multiple .ant-select-selection-search-input {\n width: 100%;\n min-width: 4.1px;\n}\n.ant-select-multiple .ant-select-selection-search-mirror {\n position: absolute;\n top: 0;\n left: 0;\n z-index: 999;\n white-space: pre;\n visibility: hidden;\n}\n.ant-select-multiple .ant-select-selection-placeholder {\n position: absolute;\n top: 50%;\n right: 11px;\n left: 11px;\n transform: translateY(-50%);\n transition: all 0.3s;\n}\n.ant-select-multiple.ant-select-lg .ant-select-selector::after {\n line-height: 32px;\n}\n.ant-select-multiple.ant-select-lg .ant-select-selection-item {\n height: 32px;\n line-height: 30px;\n}\n.ant-select-multiple.ant-select-lg .ant-select-selection-search {\n height: 32px;\n line-height: 32px;\n}\n.ant-select-multiple.ant-select-lg .ant-select-selection-search-input,\n.ant-select-multiple.ant-select-lg .ant-select-selection-search-mirror {\n height: 32px;\n line-height: 30px;\n}\n.ant-select-multiple.ant-select-sm .ant-select-selector::after {\n line-height: 16px;\n}\n.ant-select-multiple.ant-select-sm .ant-select-selection-item {\n height: 16px;\n line-height: 14px;\n}\n.ant-select-multiple.ant-select-sm .ant-select-selection-search {\n height: 16px;\n line-height: 16px;\n}\n.ant-select-multiple.ant-select-sm .ant-select-selection-search-input,\n.ant-select-multiple.ant-select-sm .ant-select-selection-search-mirror {\n height: 16px;\n line-height: 14px;\n}\n.ant-select-multiple.ant-select-sm .ant-select-selection-placeholder {\n left: 7px;\n}\n.ant-select-multiple.ant-select-sm .ant-select-selection-search {\n -webkit-margin-start: 3px;\n margin-inline-start: 3px;\n}\n.ant-select-multiple.ant-select-lg .ant-select-selection-item {\n height: 32px;\n line-height: 32px;\n}\n.ant-select-disabled .ant-select-selection-item-remove {\n display: none;\n}\n/* Reset search input style */\n.ant-select {\n box-sizing: border-box;\n margin: 0;\n padding: 0;\n color: rgba(0, 0, 0, 0.85);\n font-size: 14px;\n font-variant: tabular-nums;\n line-height: 1.5715;\n list-style: none;\n font-feature-settings: 'tnum';\n position: relative;\n display: inline-block;\n cursor: pointer;\n}\n.ant-select:not(.ant-select-customize-input) .ant-select-selector {\n position: relative;\n background-color: #fff;\n border: 1px solid #d9d9d9;\n border-radius: 2px;\n transition: all 0.3s cubic-bezier(0.645, 0.045, 0.355, 1);\n}\n.ant-select:not(.ant-select-customize-input) .ant-select-selector input {\n cursor: pointer;\n}\n.ant-select-show-search.ant-select:not(.ant-select-customize-input) .ant-select-selector {\n cursor: text;\n}\n.ant-select-show-search.ant-select:not(.ant-select-customize-input) .ant-select-selector input {\n cursor: auto;\n}\n.ant-select-focused:not(.ant-select-disabled).ant-select:not(.ant-select-customize-input) .ant-select-selector {\n border-color: #40a9ff;\n border-right-width: 1px !important;\n outline: 0;\n box-shadow: 0 0 0 2px rgba(24, 144, 255, 0.2);\n}\n.ant-select-disabled.ant-select:not(.ant-select-customize-input) .ant-select-selector {\n color: rgba(0, 0, 0, 0.25);\n background: #f5f5f5;\n cursor: not-allowed;\n}\n.ant-select-multiple.ant-select-disabled.ant-select:not(.ant-select-customize-input) .ant-select-selector {\n background: #f5f5f5;\n}\n.ant-select-disabled.ant-select:not(.ant-select-customize-input) .ant-select-selector input {\n cursor: not-allowed;\n}\n.ant-select:not(.ant-select-customize-input) .ant-select-selector .ant-select-selection-search-input {\n margin: 0;\n padding: 0;\n background: transparent;\n border: none;\n outline: none;\n -webkit-appearance: none;\n -moz-appearance: none;\n appearance: none;\n}\n.ant-select:not(.ant-select-customize-input) .ant-select-selector .ant-select-selection-search-input::-webkit-search-cancel-button {\n display: none;\n -webkit-appearance: none;\n}\n.ant-select:not(.ant-select-disabled):hover .ant-select-selector {\n border-color: #40a9ff;\n border-right-width: 1px !important;\n}\n.ant-select-selection-item {\n flex: 1;\n overflow: hidden;\n white-space: nowrap;\n text-overflow: ellipsis;\n}\n@media all and (-ms-high-contrast: none) {\n .ant-select-selection-item *::-ms-backdrop,\n .ant-select-selection-item {\n flex: auto;\n }\n}\n.ant-select-selection-placeholder {\n flex: 1;\n overflow: hidden;\n color: #bfbfbf;\n white-space: nowrap;\n text-overflow: ellipsis;\n pointer-events: none;\n}\n@media all and (-ms-high-contrast: none) {\n .ant-select-selection-placeholder *::-ms-backdrop,\n .ant-select-selection-placeholder {\n flex: auto;\n }\n}\n.ant-select-arrow {\n display: inline-block;\n color: inherit;\n font-style: normal;\n line-height: 0;\n text-transform: none;\n vertical-align: -0.125em;\n text-rendering: optimizeLegibility;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n position: absolute;\n top: 53%;\n right: 11px;\n width: 12px;\n height: 12px;\n margin-top: -6px;\n color: rgba(0, 0, 0, 0.25);\n font-size: 12px;\n line-height: 1;\n text-align: center;\n pointer-events: none;\n}\n.ant-select-arrow > * {\n line-height: 1;\n}\n.ant-select-arrow svg {\n display: inline-block;\n}\n.ant-select-arrow::before {\n display: none;\n}\n.ant-select-arrow .ant-select-arrow-icon {\n display: block;\n}\n.ant-select-arrow .anticon {\n vertical-align: top;\n transition: transform 0.3s;\n}\n.ant-select-arrow .anticon > svg {\n vertical-align: top;\n}\n.ant-select-arrow .anticon:not(.ant-select-suffix) {\n pointer-events: auto;\n}\n.ant-select-disabled .ant-select-arrow {\n cursor: not-allowed;\n}\n.ant-select-clear {\n position: absolute;\n top: 50%;\n right: 11px;\n z-index: 1;\n display: inline-block;\n width: 12px;\n height: 12px;\n margin-top: -6px;\n color: rgba(0, 0, 0, 0.25);\n font-size: 12px;\n font-style: normal;\n line-height: 1;\n text-align: center;\n text-transform: none;\n background: #fff;\n cursor: pointer;\n opacity: 0;\n transition: color 0.3s ease, opacity 0.15s ease;\n text-rendering: auto;\n}\n.ant-select-clear::before {\n display: block;\n}\n.ant-select-clear:hover {\n color: rgba(0, 0, 0, 0.45);\n}\n.ant-select:hover .ant-select-clear {\n opacity: 1;\n}\n.ant-select-dropdown {\n margin: 0;\n padding: 0;\n color: rgba(0, 0, 0, 0.85);\n font-variant: tabular-nums;\n line-height: 1.5715;\n list-style: none;\n font-feature-settings: 'tnum';\n position: absolute;\n top: -9999px;\n left: -9999px;\n z-index: 1050;\n box-sizing: border-box;\n padding: 4px 0;\n overflow: hidden;\n font-size: 14px;\n font-variant: initial;\n background-color: #fff;\n border-radius: 2px;\n outline: none;\n box-shadow: 0 3px 6px -4px rgba(0, 0, 0, 0.12), 0 6px 16px 0 rgba(0, 0, 0, 0.08), 0 9px 28px 8px rgba(0, 0, 0, 0.05);\n}\n.ant-select-dropdown.slide-up-enter.slide-up-enter-active.ant-select-dropdown-placement-bottomLeft,\n.ant-select-dropdown.slide-up-appear.slide-up-appear-active.ant-select-dropdown-placement-bottomLeft {\n -webkit-animation-name: antSlideUpIn;\n animation-name: antSlideUpIn;\n}\n.ant-select-dropdown.slide-up-enter.slide-up-enter-active.ant-select-dropdown-placement-topLeft,\n.ant-select-dropdown.slide-up-appear.slide-up-appear-active.ant-select-dropdown-placement-topLeft {\n -webkit-animation-name: antSlideDownIn;\n animation-name: antSlideDownIn;\n}\n.ant-select-dropdown.slide-up-leave.slide-up-leave-active.ant-select-dropdown-placement-bottomLeft {\n -webkit-animation-name: antSlideUpOut;\n animation-name: antSlideUpOut;\n}\n.ant-select-dropdown.slide-up-leave.slide-up-leave-active.ant-select-dropdown-placement-topLeft {\n -webkit-animation-name: antSlideDownOut;\n animation-name: antSlideDownOut;\n}\n.ant-select-dropdown-hidden {\n display: none;\n}\n.ant-select-dropdown-empty {\n color: rgba(0, 0, 0, 0.25);\n}\n.ant-select-item-empty {\n position: relative;\n display: block;\n min-height: 32px;\n padding: 5px 12px;\n color: rgba(0, 0, 0, 0.85);\n font-weight: normal;\n font-size: 14px;\n line-height: 22px;\n color: rgba(0, 0, 0, 0.25);\n}\n.ant-select-item {\n position: relative;\n display: block;\n min-height: 32px;\n padding: 5px 12px;\n color: rgba(0, 0, 0, 0.85);\n font-weight: normal;\n font-size: 14px;\n line-height: 22px;\n cursor: pointer;\n transition: background 0.3s ease;\n}\n.ant-select-item-group {\n color: rgba(0, 0, 0, 0.45);\n font-size: 12px;\n cursor: default;\n}\n.ant-select-item-option {\n display: flex;\n}\n.ant-select-item-option-content {\n flex: auto;\n overflow: hidden;\n white-space: nowrap;\n text-overflow: ellipsis;\n}\n.ant-select-item-option-state {\n flex: none;\n}\n.ant-select-item-option-active:not(.ant-select-item-option-disabled) {\n background-color: #f5f5f5;\n}\n.ant-select-item-option-selected:not(.ant-select-item-option-disabled) {\n color: rgba(0, 0, 0, 0.85);\n font-weight: 600;\n background-color: #e6f7ff;\n}\n.ant-select-item-option-selected:not(.ant-select-item-option-disabled) .ant-select-item-option-state {\n color: #1890ff;\n}\n.ant-select-item-option-disabled {\n color: rgba(0, 0, 0, 0.25);\n cursor: not-allowed;\n}\n.ant-select-item-option-grouped {\n padding-left: 24px;\n}\n.ant-select-lg {\n font-size: 16px;\n}\n.ant-select-borderless .ant-select-selector {\n background-color: transparent !important;\n border-color: transparent !important;\n box-shadow: none !important;\n}\n.ant-select-rtl {\n direction: rtl;\n}\n.ant-select-rtl .ant-select-arrow {\n right: initial;\n left: 11px;\n}\n.ant-select-rtl .ant-select-clear {\n right: initial;\n left: 11px;\n}\n.ant-select-dropdown-rtl {\n direction: rtl;\n}\n.ant-select-dropdown-rtl .ant-select-item-option-grouped {\n padding-right: 24px;\n padding-left: 12px;\n}\n.ant-select-rtl.ant-select-multiple.ant-select-show-arrow .ant-select-selector,\n.ant-select-rtl.ant-select-multiple.ant-select-allow-clear .ant-select-selector {\n padding-right: 4px;\n padding-left: 24px;\n}\n.ant-select-rtl.ant-select-multiple .ant-select-selection-item {\n text-align: right;\n}\n.ant-select-rtl.ant-select-multiple .ant-select-selection-item-content {\n margin-right: 0;\n margin-left: 4px;\n text-align: right;\n}\n.ant-select-rtl.ant-select-multiple .ant-select-selection-search-mirror {\n right: 0;\n left: auto;\n}\n.ant-select-rtl.ant-select-multiple .ant-select-selection-placeholder {\n right: 11px;\n left: auto;\n}\n.ant-select-rtl.ant-select-multiple.ant-select-sm .ant-select-selection-placeholder {\n right: 7px;\n}\n.ant-select-rtl.ant-select-single .ant-select-selector .ant-select-selection-item,\n.ant-select-rtl.ant-select-single .ant-select-selector .ant-select-selection-placeholder {\n right: 0;\n left: 9px;\n text-align: right;\n}\n.ant-select-rtl.ant-select-single.ant-select-show-arrow .ant-select-selection-search {\n right: 11px;\n left: 25px;\n}\n.ant-select-rtl.ant-select-single.ant-select-show-arrow .ant-select-selection-item,\n.ant-select-rtl.ant-select-single.ant-select-show-arrow .ant-select-selection-placeholder {\n padding-right: 0;\n padding-left: 18px;\n}\n.ant-select-rtl.ant-select-single.ant-select-sm:not(.ant-select-customize-input).ant-select-show-arrow .ant-select-selection-search {\n right: 6px;\n}\n.ant-select-rtl.ant-select-single.ant-select-sm:not(.ant-select-customize-input).ant-select-show-arrow .ant-select-selection-item,\n.ant-select-rtl.ant-select-single.ant-select-sm:not(.ant-select-customize-input).ant-select-show-arrow .ant-select-selection-placeholder {\n padding-right: 0;\n padding-left: 21px;\n}\n",""]);const a=o},6536:(n,e,t)=>{"use strict";t.d(e,{Z:()=>a});var r=t(3645),o=t.n(r)()((function(n){return n[1]}));o.push([n.id,"/* stylelint-disable at-rule-empty-line-before,at-rule-name-space-after,at-rule-no-unknown */\n/* stylelint-disable no-duplicate-selectors */\n/* stylelint-disable */\n/* stylelint-disable declaration-bang-space-before,no-duplicate-selectors,string-no-newline */\n.ant-spin {\n box-sizing: border-box;\n margin: 0;\n padding: 0;\n color: rgba(0, 0, 0, 0.85);\n font-size: 14px;\n font-variant: tabular-nums;\n line-height: 1.5715;\n list-style: none;\n font-feature-settings: 'tnum';\n position: absolute;\n display: none;\n color: #1890ff;\n text-align: center;\n vertical-align: middle;\n opacity: 0;\n transition: transform 0.3s cubic-bezier(0.78, 0.14, 0.15, 0.86);\n}\n.ant-spin-spinning {\n position: static;\n display: inline-block;\n opacity: 1;\n}\n.ant-spin-nested-loading {\n position: relative;\n}\n.ant-spin-nested-loading > div > .ant-spin {\n position: absolute;\n top: 0;\n left: 0;\n z-index: 4;\n display: block;\n width: 100%;\n height: 100%;\n max-height: 400px;\n}\n.ant-spin-nested-loading > div > .ant-spin .ant-spin-dot {\n position: absolute;\n top: 50%;\n left: 50%;\n margin: -10px;\n}\n.ant-spin-nested-loading > div > .ant-spin .ant-spin-text {\n position: absolute;\n top: 50%;\n width: 100%;\n padding-top: 5px;\n text-shadow: 0 1px 2px #fff;\n}\n.ant-spin-nested-loading > div > .ant-spin.ant-spin-show-text .ant-spin-dot {\n margin-top: -20px;\n}\n.ant-spin-nested-loading > div > .ant-spin-sm .ant-spin-dot {\n margin: -7px;\n}\n.ant-spin-nested-loading > div > .ant-spin-sm .ant-spin-text {\n padding-top: 2px;\n}\n.ant-spin-nested-loading > div > .ant-spin-sm.ant-spin-show-text .ant-spin-dot {\n margin-top: -17px;\n}\n.ant-spin-nested-loading > div > .ant-spin-lg .ant-spin-dot {\n margin: -16px;\n}\n.ant-spin-nested-loading > div > .ant-spin-lg .ant-spin-text {\n padding-top: 11px;\n}\n.ant-spin-nested-loading > div > .ant-spin-lg.ant-spin-show-text .ant-spin-dot {\n margin-top: -26px;\n}\n.ant-spin-container {\n position: relative;\n transition: opacity 0.3s;\n}\n.ant-spin-container::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n z-index: 10;\n display: none \\9;\n width: 100%;\n height: 100%;\n background: #fff;\n opacity: 0;\n transition: all 0.3s;\n content: '';\n pointer-events: none;\n}\n.ant-spin-blur {\n clear: both;\n overflow: hidden;\n opacity: 0.5;\n -webkit-user-select: none;\n -moz-user-select: none;\n -ms-user-select: none;\n user-select: none;\n pointer-events: none;\n}\n.ant-spin-blur::after {\n opacity: 0.4;\n pointer-events: auto;\n}\n.ant-spin-tip {\n color: rgba(0, 0, 0, 0.45);\n}\n.ant-spin-dot {\n position: relative;\n display: inline-block;\n font-size: 20px;\n width: 1em;\n height: 1em;\n}\n.ant-spin-dot-item {\n position: absolute;\n display: block;\n width: 9px;\n height: 9px;\n background-color: #1890ff;\n border-radius: 100%;\n transform: scale(0.75);\n transform-origin: 50% 50%;\n opacity: 0.3;\n -webkit-animation: antSpinMove 1s infinite linear alternate;\n animation: antSpinMove 1s infinite linear alternate;\n}\n.ant-spin-dot-item:nth-child(1) {\n top: 0;\n left: 0;\n}\n.ant-spin-dot-item:nth-child(2) {\n top: 0;\n right: 0;\n -webkit-animation-delay: 0.4s;\n animation-delay: 0.4s;\n}\n.ant-spin-dot-item:nth-child(3) {\n right: 0;\n bottom: 0;\n -webkit-animation-delay: 0.8s;\n animation-delay: 0.8s;\n}\n.ant-spin-dot-item:nth-child(4) {\n bottom: 0;\n left: 0;\n -webkit-animation-delay: 1.2s;\n animation-delay: 1.2s;\n}\n.ant-spin-dot-spin {\n transform: rotate(45deg);\n -webkit-animation: antRotate 1.2s infinite linear;\n animation: antRotate 1.2s infinite linear;\n}\n.ant-spin-sm .ant-spin-dot {\n font-size: 14px;\n}\n.ant-spin-sm .ant-spin-dot i {\n width: 6px;\n height: 6px;\n}\n.ant-spin-lg .ant-spin-dot {\n font-size: 32px;\n}\n.ant-spin-lg .ant-spin-dot i {\n width: 14px;\n height: 14px;\n}\n.ant-spin.ant-spin-show-text .ant-spin-text {\n display: block;\n}\n@media all and (-ms-high-contrast: none), (-ms-high-contrast: active) {\n /* IE10+ */\n .ant-spin-blur {\n background: #fff;\n opacity: 0.5;\n }\n}\n@-webkit-keyframes antSpinMove {\n to {\n opacity: 1;\n }\n}\n@keyframes antSpinMove {\n to {\n opacity: 1;\n }\n}\n@-webkit-keyframes antRotate {\n to {\n transform: rotate(405deg);\n }\n}\n@keyframes antRotate {\n to {\n transform: rotate(405deg);\n }\n}\n.ant-spin-rtl {\n direction: rtl;\n}\n.ant-spin-rtl .ant-spin-dot-spin {\n transform: rotate(-45deg);\n -webkit-animation-name: antRotateRtl;\n animation-name: antRotateRtl;\n}\n@-webkit-keyframes antRotateRtl {\n to {\n transform: rotate(-405deg);\n }\n}\n@keyframes antRotateRtl {\n to {\n transform: rotate(-405deg);\n }\n}\n",""]);const a=o},7828:(n,e,t)=>{"use strict";t.d(e,{Z:()=>a});var r=t(3645),o=t.n(r)()((function(n){return n[1]}));o.push([n.id,"/* stylelint-disable at-rule-empty-line-before,at-rule-name-space-after,at-rule-no-unknown */\n/* stylelint-disable no-duplicate-selectors */\n/* stylelint-disable */\n/* stylelint-disable declaration-bang-space-before,no-duplicate-selectors,string-no-newline */\n[class^=ant-]::-ms-clear,\n[class*= ant-]::-ms-clear,\n[class^=ant-] input::-ms-clear,\n[class*= ant-] input::-ms-clear,\n[class^=ant-] input::-ms-reveal,\n[class*= ant-] input::-ms-reveal {\n display: none;\n}\n[class^=ant-],\n[class*= ant-],\n[class^=ant-] *,\n[class*= ant-] *,\n[class^=ant-] *::before,\n[class*= ant-] *::before,\n[class^=ant-] *::after,\n[class*= ant-] *::after {\n box-sizing: border-box;\n}\n/* stylelint-disable at-rule-no-unknown */\nhtml,\nbody {\n width: 100%;\n height: 100%;\n}\ninput::-ms-clear,\ninput::-ms-reveal {\n display: none;\n}\n*,\n*::before,\n*::after {\n box-sizing: border-box;\n}\nhtml {\n font-family: sans-serif;\n line-height: 1.15;\n -webkit-text-size-adjust: 100%;\n -ms-text-size-adjust: 100%;\n -ms-overflow-style: scrollbar;\n -webkit-tap-highlight-color: rgba(0, 0, 0, 0);\n}\n@-ms-viewport {\n width: device-width;\n}\nbody {\n margin: 0;\n color: rgba(0, 0, 0, 0.85);\n font-size: 14px;\n font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, 'Helvetica Neue', Arial, 'Noto Sans', sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji', 'Segoe UI Symbol', 'Noto Color Emoji';\n font-variant: tabular-nums;\n line-height: 1.5715;\n background-color: #fff;\n font-feature-settings: 'tnum';\n}\n[tabindex='-1']:focus {\n outline: none !important;\n}\nhr {\n box-sizing: content-box;\n height: 0;\n overflow: visible;\n}\nh1,\nh2,\nh3,\nh4,\nh5,\nh6 {\n margin-top: 0;\n margin-bottom: 0.5em;\n color: rgba(0, 0, 0, 0.85);\n font-weight: 500;\n}\np {\n margin-top: 0;\n margin-bottom: 1em;\n}\nabbr[title],\nabbr[data-original-title] {\n text-decoration: underline;\n -webkit-text-decoration: underline dotted;\n text-decoration: underline dotted;\n border-bottom: 0;\n cursor: help;\n}\naddress {\n margin-bottom: 1em;\n font-style: normal;\n line-height: inherit;\n}\ninput[type='text'],\ninput[type='password'],\ninput[type='number'],\ntextarea {\n -webkit-appearance: none;\n}\nol,\nul,\ndl {\n margin-top: 0;\n margin-bottom: 1em;\n}\nol ol,\nul ul,\nol ul,\nul ol {\n margin-bottom: 0;\n}\ndt {\n font-weight: 500;\n}\ndd {\n margin-bottom: 0.5em;\n margin-left: 0;\n}\nblockquote {\n margin: 0 0 1em;\n}\ndfn {\n font-style: italic;\n}\nb,\nstrong {\n font-weight: bolder;\n}\nsmall {\n font-size: 80%;\n}\nsub,\nsup {\n position: relative;\n font-size: 75%;\n line-height: 0;\n vertical-align: baseline;\n}\nsub {\n bottom: -0.25em;\n}\nsup {\n top: -0.5em;\n}\na {\n color: #1890ff;\n text-decoration: none;\n background-color: transparent;\n outline: none;\n cursor: pointer;\n transition: color 0.3s;\n -webkit-text-decoration-skip: objects;\n}\na:hover {\n color: #40a9ff;\n}\na:active {\n color: #096dd9;\n}\na:active,\na:hover {\n text-decoration: none;\n outline: 0;\n}\na:focus {\n text-decoration: none;\n outline: 0;\n}\na[disabled] {\n color: rgba(0, 0, 0, 0.25);\n cursor: not-allowed;\n pointer-events: none;\n}\npre,\ncode,\nkbd,\nsamp {\n font-size: 1em;\n font-family: 'SFMono-Regular', Consolas, 'Liberation Mono', Menlo, Courier, monospace;\n}\npre {\n margin-top: 0;\n margin-bottom: 1em;\n overflow: auto;\n}\nfigure {\n margin: 0 0 1em;\n}\nimg {\n vertical-align: middle;\n border-style: none;\n}\nsvg:not(:root) {\n overflow: hidden;\n}\na,\narea,\nbutton,\n[role='button'],\ninput:not([type='range']),\nlabel,\nselect,\nsummary,\ntextarea {\n touch-action: manipulation;\n}\ntable {\n border-collapse: collapse;\n}\ncaption {\n padding-top: 0.75em;\n padding-bottom: 0.3em;\n color: rgba(0, 0, 0, 0.45);\n text-align: left;\n caption-side: bottom;\n}\nth {\n text-align: inherit;\n}\ninput,\nbutton,\nselect,\noptgroup,\ntextarea {\n margin: 0;\n color: inherit;\n font-size: inherit;\n font-family: inherit;\n line-height: inherit;\n}\nbutton,\ninput {\n overflow: visible;\n}\nbutton,\nselect {\n text-transform: none;\n}\nbutton,\nhtml [type=\"button\"],\n[type=\"reset\"],\n[type=\"submit\"] {\n -webkit-appearance: button;\n}\nbutton::-moz-focus-inner,\n[type='button']::-moz-focus-inner,\n[type='reset']::-moz-focus-inner,\n[type='submit']::-moz-focus-inner {\n padding: 0;\n border-style: none;\n}\ninput[type='radio'],\ninput[type='checkbox'] {\n box-sizing: border-box;\n padding: 0;\n}\ninput[type='date'],\ninput[type='time'],\ninput[type='datetime-local'],\ninput[type='month'] {\n -webkit-appearance: listbox;\n}\ntextarea {\n overflow: auto;\n resize: vertical;\n}\nfieldset {\n min-width: 0;\n margin: 0;\n padding: 0;\n border: 0;\n}\nlegend {\n display: block;\n width: 100%;\n max-width: 100%;\n margin-bottom: 0.5em;\n padding: 0;\n color: inherit;\n font-size: 1.5em;\n line-height: inherit;\n white-space: normal;\n}\nprogress {\n vertical-align: baseline;\n}\n[type='number']::-webkit-inner-spin-button,\n[type='number']::-webkit-outer-spin-button {\n height: auto;\n}\n[type='search'] {\n outline-offset: -2px;\n -webkit-appearance: none;\n}\n[type='search']::-webkit-search-cancel-button,\n[type='search']::-webkit-search-decoration {\n -webkit-appearance: none;\n}\n::-webkit-file-upload-button {\n font: inherit;\n -webkit-appearance: button;\n}\noutput {\n display: inline-block;\n}\nsummary {\n display: list-item;\n}\ntemplate {\n display: none;\n}\n[hidden] {\n display: none !important;\n}\nmark {\n padding: 0.2em;\n background-color: #feffe6;\n}\n::-moz-selection {\n color: #fff;\n background: #1890ff;\n}\n::selection {\n color: #fff;\n background: #1890ff;\n}\n.clearfix::before {\n display: table;\n content: '';\n}\n.clearfix::after {\n display: table;\n clear: both;\n content: '';\n}\n.anticon {\n display: inline-block;\n color: inherit;\n font-style: normal;\n line-height: 0;\n text-align: center;\n text-transform: none;\n vertical-align: -0.125em;\n text-rendering: optimizeLegibility;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n}\n.anticon > * {\n line-height: 1;\n}\n.anticon svg {\n display: inline-block;\n}\n.anticon::before {\n display: none;\n}\n.anticon .anticon-icon {\n display: block;\n}\n.anticon[tabindex] {\n cursor: pointer;\n}\n.anticon-spin::before {\n display: inline-block;\n -webkit-animation: loadingCircle 1s infinite linear;\n animation: loadingCircle 1s infinite linear;\n}\n.anticon-spin {\n display: inline-block;\n -webkit-animation: loadingCircle 1s infinite linear;\n animation: loadingCircle 1s infinite linear;\n}\n.ant-fade-enter,\n.ant-fade-appear {\n -webkit-animation-duration: 0.2s;\n animation-duration: 0.2s;\n -webkit-animation-fill-mode: both;\n animation-fill-mode: both;\n -webkit-animation-play-state: paused;\n animation-play-state: paused;\n}\n.ant-fade-leave {\n -webkit-animation-duration: 0.2s;\n animation-duration: 0.2s;\n -webkit-animation-fill-mode: both;\n animation-fill-mode: both;\n -webkit-animation-play-state: paused;\n animation-play-state: paused;\n}\n.ant-fade-enter.ant-fade-enter-active,\n.ant-fade-appear.ant-fade-appear-active {\n -webkit-animation-name: antFadeIn;\n animation-name: antFadeIn;\n -webkit-animation-play-state: running;\n animation-play-state: running;\n}\n.ant-fade-leave.ant-fade-leave-active {\n -webkit-animation-name: antFadeOut;\n animation-name: antFadeOut;\n -webkit-animation-play-state: running;\n animation-play-state: running;\n pointer-events: none;\n}\n.ant-fade-enter,\n.ant-fade-appear {\n opacity: 0;\n -webkit-animation-timing-function: linear;\n animation-timing-function: linear;\n}\n.ant-fade-leave {\n -webkit-animation-timing-function: linear;\n animation-timing-function: linear;\n}\n@-webkit-keyframes antFadeIn {\n 0% {\n opacity: 0;\n }\n 100% {\n opacity: 1;\n }\n}\n@keyframes antFadeIn {\n 0% {\n opacity: 0;\n }\n 100% {\n opacity: 1;\n }\n}\n@-webkit-keyframes antFadeOut {\n 0% {\n opacity: 1;\n }\n 100% {\n opacity: 0;\n }\n}\n@keyframes antFadeOut {\n 0% {\n opacity: 1;\n }\n 100% {\n opacity: 0;\n }\n}\n.ant-move-up-enter,\n.ant-move-up-appear {\n -webkit-animation-duration: 0.2s;\n animation-duration: 0.2s;\n -webkit-animation-fill-mode: both;\n animation-fill-mode: both;\n -webkit-animation-play-state: paused;\n animation-play-state: paused;\n}\n.ant-move-up-leave {\n -webkit-animation-duration: 0.2s;\n animation-duration: 0.2s;\n -webkit-animation-fill-mode: both;\n animation-fill-mode: both;\n -webkit-animation-play-state: paused;\n animation-play-state: paused;\n}\n.ant-move-up-enter.ant-move-up-enter-active,\n.ant-move-up-appear.ant-move-up-appear-active {\n -webkit-animation-name: antMoveUpIn;\n animation-name: antMoveUpIn;\n -webkit-animation-play-state: running;\n animation-play-state: running;\n}\n.ant-move-up-leave.ant-move-up-leave-active {\n -webkit-animation-name: antMoveUpOut;\n animation-name: antMoveUpOut;\n -webkit-animation-play-state: running;\n animation-play-state: running;\n pointer-events: none;\n}\n.ant-move-up-enter,\n.ant-move-up-appear {\n opacity: 0;\n -webkit-animation-timing-function: cubic-bezier(0.08, 0.82, 0.17, 1);\n animation-timing-function: cubic-bezier(0.08, 0.82, 0.17, 1);\n}\n.ant-move-up-leave {\n -webkit-animation-timing-function: cubic-bezier(0.6, 0.04, 0.98, 0.34);\n animation-timing-function: cubic-bezier(0.6, 0.04, 0.98, 0.34);\n}\n.ant-move-down-enter,\n.ant-move-down-appear {\n -webkit-animation-duration: 0.2s;\n animation-duration: 0.2s;\n -webkit-animation-fill-mode: both;\n animation-fill-mode: both;\n -webkit-animation-play-state: paused;\n animation-play-state: paused;\n}\n.ant-move-down-leave {\n -webkit-animation-duration: 0.2s;\n animation-duration: 0.2s;\n -webkit-animation-fill-mode: both;\n animation-fill-mode: both;\n -webkit-animation-play-state: paused;\n animation-play-state: paused;\n}\n.ant-move-down-enter.ant-move-down-enter-active,\n.ant-move-down-appear.ant-move-down-appear-active {\n -webkit-animation-name: antMoveDownIn;\n animation-name: antMoveDownIn;\n -webkit-animation-play-state: running;\n animation-play-state: running;\n}\n.ant-move-down-leave.ant-move-down-leave-active {\n -webkit-animation-name: antMoveDownOut;\n animation-name: antMoveDownOut;\n -webkit-animation-play-state: running;\n animation-play-state: running;\n pointer-events: none;\n}\n.ant-move-down-enter,\n.ant-move-down-appear {\n opacity: 0;\n -webkit-animation-timing-function: cubic-bezier(0.08, 0.82, 0.17, 1);\n animation-timing-function: cubic-bezier(0.08, 0.82, 0.17, 1);\n}\n.ant-move-down-leave {\n -webkit-animation-timing-function: cubic-bezier(0.6, 0.04, 0.98, 0.34);\n animation-timing-function: cubic-bezier(0.6, 0.04, 0.98, 0.34);\n}\n.ant-move-left-enter,\n.ant-move-left-appear {\n -webkit-animation-duration: 0.2s;\n animation-duration: 0.2s;\n -webkit-animation-fill-mode: both;\n animation-fill-mode: both;\n -webkit-animation-play-state: paused;\n animation-play-state: paused;\n}\n.ant-move-left-leave {\n -webkit-animation-duration: 0.2s;\n animation-duration: 0.2s;\n -webkit-animation-fill-mode: both;\n animation-fill-mode: both;\n -webkit-animation-play-state: paused;\n animation-play-state: paused;\n}\n.ant-move-left-enter.ant-move-left-enter-active,\n.ant-move-left-appear.ant-move-left-appear-active {\n -webkit-animation-name: antMoveLeftIn;\n animation-name: antMoveLeftIn;\n -webkit-animation-play-state: running;\n animation-play-state: running;\n}\n.ant-move-left-leave.ant-move-left-leave-active {\n -webkit-animation-name: antMoveLeftOut;\n animation-name: antMoveLeftOut;\n -webkit-animation-play-state: running;\n animation-play-state: running;\n pointer-events: none;\n}\n.ant-move-left-enter,\n.ant-move-left-appear {\n opacity: 0;\n -webkit-animation-timing-function: cubic-bezier(0.08, 0.82, 0.17, 1);\n animation-timing-function: cubic-bezier(0.08, 0.82, 0.17, 1);\n}\n.ant-move-left-leave {\n -webkit-animation-timing-function: cubic-bezier(0.6, 0.04, 0.98, 0.34);\n animation-timing-function: cubic-bezier(0.6, 0.04, 0.98, 0.34);\n}\n.ant-move-right-enter,\n.ant-move-right-appear {\n -webkit-animation-duration: 0.2s;\n animation-duration: 0.2s;\n -webkit-animation-fill-mode: both;\n animation-fill-mode: both;\n -webkit-animation-play-state: paused;\n animation-play-state: paused;\n}\n.ant-move-right-leave {\n -webkit-animation-duration: 0.2s;\n animation-duration: 0.2s;\n -webkit-animation-fill-mode: both;\n animation-fill-mode: both;\n -webkit-animation-play-state: paused;\n animation-play-state: paused;\n}\n.ant-move-right-enter.ant-move-right-enter-active,\n.ant-move-right-appear.ant-move-right-appear-active {\n -webkit-animation-name: antMoveRightIn;\n animation-name: antMoveRightIn;\n -webkit-animation-play-state: running;\n animation-play-state: running;\n}\n.ant-move-right-leave.ant-move-right-leave-active {\n -webkit-animation-name: antMoveRightOut;\n animation-name: antMoveRightOut;\n -webkit-animation-play-state: running;\n animation-play-state: running;\n pointer-events: none;\n}\n.ant-move-right-enter,\n.ant-move-right-appear {\n opacity: 0;\n -webkit-animation-timing-function: cubic-bezier(0.08, 0.82, 0.17, 1);\n animation-timing-function: cubic-bezier(0.08, 0.82, 0.17, 1);\n}\n.ant-move-right-leave {\n -webkit-animation-timing-function: cubic-bezier(0.6, 0.04, 0.98, 0.34);\n animation-timing-function: cubic-bezier(0.6, 0.04, 0.98, 0.34);\n}\n@-webkit-keyframes antMoveDownIn {\n 0% {\n transform: translateY(100%);\n transform-origin: 0 0;\n opacity: 0;\n }\n 100% {\n transform: translateY(0%);\n transform-origin: 0 0;\n opacity: 1;\n }\n}\n@keyframes antMoveDownIn {\n 0% {\n transform: translateY(100%);\n transform-origin: 0 0;\n opacity: 0;\n }\n 100% {\n transform: translateY(0%);\n transform-origin: 0 0;\n opacity: 1;\n }\n}\n@-webkit-keyframes antMoveDownOut {\n 0% {\n transform: translateY(0%);\n transform-origin: 0 0;\n opacity: 1;\n }\n 100% {\n transform: translateY(100%);\n transform-origin: 0 0;\n opacity: 0;\n }\n}\n@keyframes antMoveDownOut {\n 0% {\n transform: translateY(0%);\n transform-origin: 0 0;\n opacity: 1;\n }\n 100% {\n transform: translateY(100%);\n transform-origin: 0 0;\n opacity: 0;\n }\n}\n@-webkit-keyframes antMoveLeftIn {\n 0% {\n transform: translateX(-100%);\n transform-origin: 0 0;\n opacity: 0;\n }\n 100% {\n transform: translateX(0%);\n transform-origin: 0 0;\n opacity: 1;\n }\n}\n@keyframes antMoveLeftIn {\n 0% {\n transform: translateX(-100%);\n transform-origin: 0 0;\n opacity: 0;\n }\n 100% {\n transform: translateX(0%);\n transform-origin: 0 0;\n opacity: 1;\n }\n}\n@-webkit-keyframes antMoveLeftOut {\n 0% {\n transform: translateX(0%);\n transform-origin: 0 0;\n opacity: 1;\n }\n 100% {\n transform: translateX(-100%);\n transform-origin: 0 0;\n opacity: 0;\n }\n}\n@keyframes antMoveLeftOut {\n 0% {\n transform: translateX(0%);\n transform-origin: 0 0;\n opacity: 1;\n }\n 100% {\n transform: translateX(-100%);\n transform-origin: 0 0;\n opacity: 0;\n }\n}\n@-webkit-keyframes antMoveRightIn {\n 0% {\n transform: translateX(100%);\n transform-origin: 0 0;\n opacity: 0;\n }\n 100% {\n transform: translateX(0%);\n transform-origin: 0 0;\n opacity: 1;\n }\n}\n@keyframes antMoveRightIn {\n 0% {\n transform: translateX(100%);\n transform-origin: 0 0;\n opacity: 0;\n }\n 100% {\n transform: translateX(0%);\n transform-origin: 0 0;\n opacity: 1;\n }\n}\n@-webkit-keyframes antMoveRightOut {\n 0% {\n transform: translateX(0%);\n transform-origin: 0 0;\n opacity: 1;\n }\n 100% {\n transform: translateX(100%);\n transform-origin: 0 0;\n opacity: 0;\n }\n}\n@keyframes antMoveRightOut {\n 0% {\n transform: translateX(0%);\n transform-origin: 0 0;\n opacity: 1;\n }\n 100% {\n transform: translateX(100%);\n transform-origin: 0 0;\n opacity: 0;\n }\n}\n@-webkit-keyframes antMoveUpIn {\n 0% {\n transform: translateY(-100%);\n transform-origin: 0 0;\n opacity: 0;\n }\n 100% {\n transform: translateY(0%);\n transform-origin: 0 0;\n opacity: 1;\n }\n}\n@keyframes antMoveUpIn {\n 0% {\n transform: translateY(-100%);\n transform-origin: 0 0;\n opacity: 0;\n }\n 100% {\n transform: translateY(0%);\n transform-origin: 0 0;\n opacity: 1;\n }\n}\n@-webkit-keyframes antMoveUpOut {\n 0% {\n transform: translateY(0%);\n transform-origin: 0 0;\n opacity: 1;\n }\n 100% {\n transform: translateY(-100%);\n transform-origin: 0 0;\n opacity: 0;\n }\n}\n@keyframes antMoveUpOut {\n 0% {\n transform: translateY(0%);\n transform-origin: 0 0;\n opacity: 1;\n }\n 100% {\n transform: translateY(-100%);\n transform-origin: 0 0;\n opacity: 0;\n }\n}\n@-webkit-keyframes loadingCircle {\n 100% {\n transform: rotate(360deg);\n }\n}\n@keyframes loadingCircle {\n 100% {\n transform: rotate(360deg);\n }\n}\n[ant-click-animating='true'],\n[ant-click-animating-without-extra-node='true'] {\n position: relative;\n}\nhtml {\n --antd-wave-shadow-color: #1890ff;\n --scroll-bar: 0;\n}\n[ant-click-animating-without-extra-node='true']::after,\n.ant-click-animating-node {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n display: block;\n border-radius: inherit;\n box-shadow: 0 0 0 0 #1890ff;\n box-shadow: 0 0 0 0 var(--antd-wave-shadow-color);\n opacity: 0.2;\n -webkit-animation: fadeEffect 2s cubic-bezier(0.08, 0.82, 0.17, 1), waveEffect 0.4s cubic-bezier(0.08, 0.82, 0.17, 1);\n animation: fadeEffect 2s cubic-bezier(0.08, 0.82, 0.17, 1), waveEffect 0.4s cubic-bezier(0.08, 0.82, 0.17, 1);\n -webkit-animation-fill-mode: forwards;\n animation-fill-mode: forwards;\n content: '';\n pointer-events: none;\n}\n@-webkit-keyframes waveEffect {\n 100% {\n box-shadow: 0 0 0 #1890ff;\n box-shadow: 0 0 0 6px var(--antd-wave-shadow-color);\n }\n}\n@keyframes waveEffect {\n 100% {\n box-shadow: 0 0 0 #1890ff;\n box-shadow: 0 0 0 6px var(--antd-wave-shadow-color);\n }\n}\n@-webkit-keyframes fadeEffect {\n 100% {\n opacity: 0;\n }\n}\n@keyframes fadeEffect {\n 100% {\n opacity: 0;\n }\n}\n.ant-slide-up-enter,\n.ant-slide-up-appear {\n -webkit-animation-duration: 0.2s;\n animation-duration: 0.2s;\n -webkit-animation-fill-mode: both;\n animation-fill-mode: both;\n -webkit-animation-play-state: paused;\n animation-play-state: paused;\n}\n.ant-slide-up-leave {\n -webkit-animation-duration: 0.2s;\n animation-duration: 0.2s;\n -webkit-animation-fill-mode: both;\n animation-fill-mode: both;\n -webkit-animation-play-state: paused;\n animation-play-state: paused;\n}\n.ant-slide-up-enter.ant-slide-up-enter-active,\n.ant-slide-up-appear.ant-slide-up-appear-active {\n -webkit-animation-name: antSlideUpIn;\n animation-name: antSlideUpIn;\n -webkit-animation-play-state: running;\n animation-play-state: running;\n}\n.ant-slide-up-leave.ant-slide-up-leave-active {\n -webkit-animation-name: antSlideUpOut;\n animation-name: antSlideUpOut;\n -webkit-animation-play-state: running;\n animation-play-state: running;\n pointer-events: none;\n}\n.ant-slide-up-enter,\n.ant-slide-up-appear {\n opacity: 0;\n -webkit-animation-timing-function: cubic-bezier(0.23, 1, 0.32, 1);\n animation-timing-function: cubic-bezier(0.23, 1, 0.32, 1);\n}\n.ant-slide-up-leave {\n -webkit-animation-timing-function: cubic-bezier(0.755, 0.05, 0.855, 0.06);\n animation-timing-function: cubic-bezier(0.755, 0.05, 0.855, 0.06);\n}\n.ant-slide-down-enter,\n.ant-slide-down-appear {\n -webkit-animation-duration: 0.2s;\n animation-duration: 0.2s;\n -webkit-animation-fill-mode: both;\n animation-fill-mode: both;\n -webkit-animation-play-state: paused;\n animation-play-state: paused;\n}\n.ant-slide-down-leave {\n -webkit-animation-duration: 0.2s;\n animation-duration: 0.2s;\n -webkit-animation-fill-mode: both;\n animation-fill-mode: both;\n -webkit-animation-play-state: paused;\n animation-play-state: paused;\n}\n.ant-slide-down-enter.ant-slide-down-enter-active,\n.ant-slide-down-appear.ant-slide-down-appear-active {\n -webkit-animation-name: antSlideDownIn;\n animation-name: antSlideDownIn;\n -webkit-animation-play-state: running;\n animation-play-state: running;\n}\n.ant-slide-down-leave.ant-slide-down-leave-active {\n -webkit-animation-name: antSlideDownOut;\n animation-name: antSlideDownOut;\n -webkit-animation-play-state: running;\n animation-play-state: running;\n pointer-events: none;\n}\n.ant-slide-down-enter,\n.ant-slide-down-appear {\n opacity: 0;\n -webkit-animation-timing-function: cubic-bezier(0.23, 1, 0.32, 1);\n animation-timing-function: cubic-bezier(0.23, 1, 0.32, 1);\n}\n.ant-slide-down-leave {\n -webkit-animation-timing-function: cubic-bezier(0.755, 0.05, 0.855, 0.06);\n animation-timing-function: cubic-bezier(0.755, 0.05, 0.855, 0.06);\n}\n.ant-slide-left-enter,\n.ant-slide-left-appear {\n -webkit-animation-duration: 0.2s;\n animation-duration: 0.2s;\n -webkit-animation-fill-mode: both;\n animation-fill-mode: both;\n -webkit-animation-play-state: paused;\n animation-play-state: paused;\n}\n.ant-slide-left-leave {\n -webkit-animation-duration: 0.2s;\n animation-duration: 0.2s;\n -webkit-animation-fill-mode: both;\n animation-fill-mode: both;\n -webkit-animation-play-state: paused;\n animation-play-state: paused;\n}\n.ant-slide-left-enter.ant-slide-left-enter-active,\n.ant-slide-left-appear.ant-slide-left-appear-active {\n -webkit-animation-name: antSlideLeftIn;\n animation-name: antSlideLeftIn;\n -webkit-animation-play-state: running;\n animation-play-state: running;\n}\n.ant-slide-left-leave.ant-slide-left-leave-active {\n -webkit-animation-name: antSlideLeftOut;\n animation-name: antSlideLeftOut;\n -webkit-animation-play-state: running;\n animation-play-state: running;\n pointer-events: none;\n}\n.ant-slide-left-enter,\n.ant-slide-left-appear {\n opacity: 0;\n -webkit-animation-timing-function: cubic-bezier(0.23, 1, 0.32, 1);\n animation-timing-function: cubic-bezier(0.23, 1, 0.32, 1);\n}\n.ant-slide-left-leave {\n -webkit-animation-timing-function: cubic-bezier(0.755, 0.05, 0.855, 0.06);\n animation-timing-function: cubic-bezier(0.755, 0.05, 0.855, 0.06);\n}\n.ant-slide-right-enter,\n.ant-slide-right-appear {\n -webkit-animation-duration: 0.2s;\n animation-duration: 0.2s;\n -webkit-animation-fill-mode: both;\n animation-fill-mode: both;\n -webkit-animation-play-state: paused;\n animation-play-state: paused;\n}\n.ant-slide-right-leave {\n -webkit-animation-duration: 0.2s;\n animation-duration: 0.2s;\n -webkit-animation-fill-mode: both;\n animation-fill-mode: both;\n -webkit-animation-play-state: paused;\n animation-play-state: paused;\n}\n.ant-slide-right-enter.ant-slide-right-enter-active,\n.ant-slide-right-appear.ant-slide-right-appear-active {\n -webkit-animation-name: antSlideRightIn;\n animation-name: antSlideRightIn;\n -webkit-animation-play-state: running;\n animation-play-state: running;\n}\n.ant-slide-right-leave.ant-slide-right-leave-active {\n -webkit-animation-name: antSlideRightOut;\n animation-name: antSlideRightOut;\n -webkit-animation-play-state: running;\n animation-play-state: running;\n pointer-events: none;\n}\n.ant-slide-right-enter,\n.ant-slide-right-appear {\n opacity: 0;\n -webkit-animation-timing-function: cubic-bezier(0.23, 1, 0.32, 1);\n animation-timing-function: cubic-bezier(0.23, 1, 0.32, 1);\n}\n.ant-slide-right-leave {\n -webkit-animation-timing-function: cubic-bezier(0.755, 0.05, 0.855, 0.06);\n animation-timing-function: cubic-bezier(0.755, 0.05, 0.855, 0.06);\n}\n@-webkit-keyframes antSlideUpIn {\n 0% {\n transform: scaleY(0.8);\n transform-origin: 0% 0%;\n opacity: 0;\n }\n 100% {\n transform: scaleY(1);\n transform-origin: 0% 0%;\n opacity: 1;\n }\n}\n@keyframes antSlideUpIn {\n 0% {\n transform: scaleY(0.8);\n transform-origin: 0% 0%;\n opacity: 0;\n }\n 100% {\n transform: scaleY(1);\n transform-origin: 0% 0%;\n opacity: 1;\n }\n}\n@-webkit-keyframes antSlideUpOut {\n 0% {\n transform: scaleY(1);\n transform-origin: 0% 0%;\n opacity: 1;\n }\n 100% {\n transform: scaleY(0.8);\n transform-origin: 0% 0%;\n opacity: 0;\n }\n}\n@keyframes antSlideUpOut {\n 0% {\n transform: scaleY(1);\n transform-origin: 0% 0%;\n opacity: 1;\n }\n 100% {\n transform: scaleY(0.8);\n transform-origin: 0% 0%;\n opacity: 0;\n }\n}\n@-webkit-keyframes antSlideDownIn {\n 0% {\n transform: scaleY(0.8);\n transform-origin: 100% 100%;\n opacity: 0;\n }\n 100% {\n transform: scaleY(1);\n transform-origin: 100% 100%;\n opacity: 1;\n }\n}\n@keyframes antSlideDownIn {\n 0% {\n transform: scaleY(0.8);\n transform-origin: 100% 100%;\n opacity: 0;\n }\n 100% {\n transform: scaleY(1);\n transform-origin: 100% 100%;\n opacity: 1;\n }\n}\n@-webkit-keyframes antSlideDownOut {\n 0% {\n transform: scaleY(1);\n transform-origin: 100% 100%;\n opacity: 1;\n }\n 100% {\n transform: scaleY(0.8);\n transform-origin: 100% 100%;\n opacity: 0;\n }\n}\n@keyframes antSlideDownOut {\n 0% {\n transform: scaleY(1);\n transform-origin: 100% 100%;\n opacity: 1;\n }\n 100% {\n transform: scaleY(0.8);\n transform-origin: 100% 100%;\n opacity: 0;\n }\n}\n@-webkit-keyframes antSlideLeftIn {\n 0% {\n transform: scaleX(0.8);\n transform-origin: 0% 0%;\n opacity: 0;\n }\n 100% {\n transform: scaleX(1);\n transform-origin: 0% 0%;\n opacity: 1;\n }\n}\n@keyframes antSlideLeftIn {\n 0% {\n transform: scaleX(0.8);\n transform-origin: 0% 0%;\n opacity: 0;\n }\n 100% {\n transform: scaleX(1);\n transform-origin: 0% 0%;\n opacity: 1;\n }\n}\n@-webkit-keyframes antSlideLeftOut {\n 0% {\n transform: scaleX(1);\n transform-origin: 0% 0%;\n opacity: 1;\n }\n 100% {\n transform: scaleX(0.8);\n transform-origin: 0% 0%;\n opacity: 0;\n }\n}\n@keyframes antSlideLeftOut {\n 0% {\n transform: scaleX(1);\n transform-origin: 0% 0%;\n opacity: 1;\n }\n 100% {\n transform: scaleX(0.8);\n transform-origin: 0% 0%;\n opacity: 0;\n }\n}\n@-webkit-keyframes antSlideRightIn {\n 0% {\n transform: scaleX(0.8);\n transform-origin: 100% 0%;\n opacity: 0;\n }\n 100% {\n transform: scaleX(1);\n transform-origin: 100% 0%;\n opacity: 1;\n }\n}\n@keyframes antSlideRightIn {\n 0% {\n transform: scaleX(0.8);\n transform-origin: 100% 0%;\n opacity: 0;\n }\n 100% {\n transform: scaleX(1);\n transform-origin: 100% 0%;\n opacity: 1;\n }\n}\n@-webkit-keyframes antSlideRightOut {\n 0% {\n transform: scaleX(1);\n transform-origin: 100% 0%;\n opacity: 1;\n }\n 100% {\n transform: scaleX(0.8);\n transform-origin: 100% 0%;\n opacity: 0;\n }\n}\n@keyframes antSlideRightOut {\n 0% {\n transform: scaleX(1);\n transform-origin: 100% 0%;\n opacity: 1;\n }\n 100% {\n transform: scaleX(0.8);\n transform-origin: 100% 0%;\n opacity: 0;\n }\n}\n.ant-zoom-enter,\n.ant-zoom-appear {\n -webkit-animation-duration: 0.2s;\n animation-duration: 0.2s;\n -webkit-animation-fill-mode: both;\n animation-fill-mode: both;\n -webkit-animation-play-state: paused;\n animation-play-state: paused;\n}\n.ant-zoom-leave {\n -webkit-animation-duration: 0.2s;\n animation-duration: 0.2s;\n -webkit-animation-fill-mode: both;\n animation-fill-mode: both;\n -webkit-animation-play-state: paused;\n animation-play-state: paused;\n}\n.ant-zoom-enter.ant-zoom-enter-active,\n.ant-zoom-appear.ant-zoom-appear-active {\n -webkit-animation-name: antZoomIn;\n animation-name: antZoomIn;\n -webkit-animation-play-state: running;\n animation-play-state: running;\n}\n.ant-zoom-leave.ant-zoom-leave-active {\n -webkit-animation-name: antZoomOut;\n animation-name: antZoomOut;\n -webkit-animation-play-state: running;\n animation-play-state: running;\n pointer-events: none;\n}\n.ant-zoom-enter,\n.ant-zoom-appear {\n transform: scale(0);\n opacity: 0;\n -webkit-animation-timing-function: cubic-bezier(0.08, 0.82, 0.17, 1);\n animation-timing-function: cubic-bezier(0.08, 0.82, 0.17, 1);\n}\n.ant-zoom-enter-prepare,\n.ant-zoom-appear-prepare {\n transform: none;\n}\n.ant-zoom-leave {\n -webkit-animation-timing-function: cubic-bezier(0.78, 0.14, 0.15, 0.86);\n animation-timing-function: cubic-bezier(0.78, 0.14, 0.15, 0.86);\n}\n.ant-zoom-big-enter,\n.ant-zoom-big-appear {\n -webkit-animation-duration: 0.2s;\n animation-duration: 0.2s;\n -webkit-animation-fill-mode: both;\n animation-fill-mode: both;\n -webkit-animation-play-state: paused;\n animation-play-state: paused;\n}\n.ant-zoom-big-leave {\n -webkit-animation-duration: 0.2s;\n animation-duration: 0.2s;\n -webkit-animation-fill-mode: both;\n animation-fill-mode: both;\n -webkit-animation-play-state: paused;\n animation-play-state: paused;\n}\n.ant-zoom-big-enter.ant-zoom-big-enter-active,\n.ant-zoom-big-appear.ant-zoom-big-appear-active {\n -webkit-animation-name: antZoomBigIn;\n animation-name: antZoomBigIn;\n -webkit-animation-play-state: running;\n animation-play-state: running;\n}\n.ant-zoom-big-leave.ant-zoom-big-leave-active {\n -webkit-animation-name: antZoomBigOut;\n animation-name: antZoomBigOut;\n -webkit-animation-play-state: running;\n animation-play-state: running;\n pointer-events: none;\n}\n.ant-zoom-big-enter,\n.ant-zoom-big-appear {\n transform: scale(0);\n opacity: 0;\n -webkit-animation-timing-function: cubic-bezier(0.08, 0.82, 0.17, 1);\n animation-timing-function: cubic-bezier(0.08, 0.82, 0.17, 1);\n}\n.ant-zoom-big-enter-prepare,\n.ant-zoom-big-appear-prepare {\n transform: none;\n}\n.ant-zoom-big-leave {\n -webkit-animation-timing-function: cubic-bezier(0.78, 0.14, 0.15, 0.86);\n animation-timing-function: cubic-bezier(0.78, 0.14, 0.15, 0.86);\n}\n.ant-zoom-big-fast-enter,\n.ant-zoom-big-fast-appear {\n -webkit-animation-duration: 0.1s;\n animation-duration: 0.1s;\n -webkit-animation-fill-mode: both;\n animation-fill-mode: both;\n -webkit-animation-play-state: paused;\n animation-play-state: paused;\n}\n.ant-zoom-big-fast-leave {\n -webkit-animation-duration: 0.1s;\n animation-duration: 0.1s;\n -webkit-animation-fill-mode: both;\n animation-fill-mode: both;\n -webkit-animation-play-state: paused;\n animation-play-state: paused;\n}\n.ant-zoom-big-fast-enter.ant-zoom-big-fast-enter-active,\n.ant-zoom-big-fast-appear.ant-zoom-big-fast-appear-active {\n -webkit-animation-name: antZoomBigIn;\n animation-name: antZoomBigIn;\n -webkit-animation-play-state: running;\n animation-play-state: running;\n}\n.ant-zoom-big-fast-leave.ant-zoom-big-fast-leave-active {\n -webkit-animation-name: antZoomBigOut;\n animation-name: antZoomBigOut;\n -webkit-animation-play-state: running;\n animation-play-state: running;\n pointer-events: none;\n}\n.ant-zoom-big-fast-enter,\n.ant-zoom-big-fast-appear {\n transform: scale(0);\n opacity: 0;\n -webkit-animation-timing-function: cubic-bezier(0.08, 0.82, 0.17, 1);\n animation-timing-function: cubic-bezier(0.08, 0.82, 0.17, 1);\n}\n.ant-zoom-big-fast-enter-prepare,\n.ant-zoom-big-fast-appear-prepare {\n transform: none;\n}\n.ant-zoom-big-fast-leave {\n -webkit-animation-timing-function: cubic-bezier(0.78, 0.14, 0.15, 0.86);\n animation-timing-function: cubic-bezier(0.78, 0.14, 0.15, 0.86);\n}\n.ant-zoom-up-enter,\n.ant-zoom-up-appear {\n -webkit-animation-duration: 0.2s;\n animation-duration: 0.2s;\n -webkit-animation-fill-mode: both;\n animation-fill-mode: both;\n -webkit-animation-play-state: paused;\n animation-play-state: paused;\n}\n.ant-zoom-up-leave {\n -webkit-animation-duration: 0.2s;\n animation-duration: 0.2s;\n -webkit-animation-fill-mode: both;\n animation-fill-mode: both;\n -webkit-animation-play-state: paused;\n animation-play-state: paused;\n}\n.ant-zoom-up-enter.ant-zoom-up-enter-active,\n.ant-zoom-up-appear.ant-zoom-up-appear-active {\n -webkit-animation-name: antZoomUpIn;\n animation-name: antZoomUpIn;\n -webkit-animation-play-state: running;\n animation-play-state: running;\n}\n.ant-zoom-up-leave.ant-zoom-up-leave-active {\n -webkit-animation-name: antZoomUpOut;\n animation-name: antZoomUpOut;\n -webkit-animation-play-state: running;\n animation-play-state: running;\n pointer-events: none;\n}\n.ant-zoom-up-enter,\n.ant-zoom-up-appear {\n transform: scale(0);\n opacity: 0;\n -webkit-animation-timing-function: cubic-bezier(0.08, 0.82, 0.17, 1);\n animation-timing-function: cubic-bezier(0.08, 0.82, 0.17, 1);\n}\n.ant-zoom-up-enter-prepare,\n.ant-zoom-up-appear-prepare {\n transform: none;\n}\n.ant-zoom-up-leave {\n -webkit-animation-timing-function: cubic-bezier(0.78, 0.14, 0.15, 0.86);\n animation-timing-function: cubic-bezier(0.78, 0.14, 0.15, 0.86);\n}\n.ant-zoom-down-enter,\n.ant-zoom-down-appear {\n -webkit-animation-duration: 0.2s;\n animation-duration: 0.2s;\n -webkit-animation-fill-mode: both;\n animation-fill-mode: both;\n -webkit-animation-play-state: paused;\n animation-play-state: paused;\n}\n.ant-zoom-down-leave {\n -webkit-animation-duration: 0.2s;\n animation-duration: 0.2s;\n -webkit-animation-fill-mode: both;\n animation-fill-mode: both;\n -webkit-animation-play-state: paused;\n animation-play-state: paused;\n}\n.ant-zoom-down-enter.ant-zoom-down-enter-active,\n.ant-zoom-down-appear.ant-zoom-down-appear-active {\n -webkit-animation-name: antZoomDownIn;\n animation-name: antZoomDownIn;\n -webkit-animation-play-state: running;\n animation-play-state: running;\n}\n.ant-zoom-down-leave.ant-zoom-down-leave-active {\n -webkit-animation-name: antZoomDownOut;\n animation-name: antZoomDownOut;\n -webkit-animation-play-state: running;\n animation-play-state: running;\n pointer-events: none;\n}\n.ant-zoom-down-enter,\n.ant-zoom-down-appear {\n transform: scale(0);\n opacity: 0;\n -webkit-animation-timing-function: cubic-bezier(0.08, 0.82, 0.17, 1);\n animation-timing-function: cubic-bezier(0.08, 0.82, 0.17, 1);\n}\n.ant-zoom-down-enter-prepare,\n.ant-zoom-down-appear-prepare {\n transform: none;\n}\n.ant-zoom-down-leave {\n -webkit-animation-timing-function: cubic-bezier(0.78, 0.14, 0.15, 0.86);\n animation-timing-function: cubic-bezier(0.78, 0.14, 0.15, 0.86);\n}\n.ant-zoom-left-enter,\n.ant-zoom-left-appear {\n -webkit-animation-duration: 0.2s;\n animation-duration: 0.2s;\n -webkit-animation-fill-mode: both;\n animation-fill-mode: both;\n -webkit-animation-play-state: paused;\n animation-play-state: paused;\n}\n.ant-zoom-left-leave {\n -webkit-animation-duration: 0.2s;\n animation-duration: 0.2s;\n -webkit-animation-fill-mode: both;\n animation-fill-mode: both;\n -webkit-animation-play-state: paused;\n animation-play-state: paused;\n}\n.ant-zoom-left-enter.ant-zoom-left-enter-active,\n.ant-zoom-left-appear.ant-zoom-left-appear-active {\n -webkit-animation-name: antZoomLeftIn;\n animation-name: antZoomLeftIn;\n -webkit-animation-play-state: running;\n animation-play-state: running;\n}\n.ant-zoom-left-leave.ant-zoom-left-leave-active {\n -webkit-animation-name: antZoomLeftOut;\n animation-name: antZoomLeftOut;\n -webkit-animation-play-state: running;\n animation-play-state: running;\n pointer-events: none;\n}\n.ant-zoom-left-enter,\n.ant-zoom-left-appear {\n transform: scale(0);\n opacity: 0;\n -webkit-animation-timing-function: cubic-bezier(0.08, 0.82, 0.17, 1);\n animation-timing-function: cubic-bezier(0.08, 0.82, 0.17, 1);\n}\n.ant-zoom-left-enter-prepare,\n.ant-zoom-left-appear-prepare {\n transform: none;\n}\n.ant-zoom-left-leave {\n -webkit-animation-timing-function: cubic-bezier(0.78, 0.14, 0.15, 0.86);\n animation-timing-function: cubic-bezier(0.78, 0.14, 0.15, 0.86);\n}\n.ant-zoom-right-enter,\n.ant-zoom-right-appear {\n -webkit-animation-duration: 0.2s;\n animation-duration: 0.2s;\n -webkit-animation-fill-mode: both;\n animation-fill-mode: both;\n -webkit-animation-play-state: paused;\n animation-play-state: paused;\n}\n.ant-zoom-right-leave {\n -webkit-animation-duration: 0.2s;\n animation-duration: 0.2s;\n -webkit-animation-fill-mode: both;\n animation-fill-mode: both;\n -webkit-animation-play-state: paused;\n animation-play-state: paused;\n}\n.ant-zoom-right-enter.ant-zoom-right-enter-active,\n.ant-zoom-right-appear.ant-zoom-right-appear-active {\n -webkit-animation-name: antZoomRightIn;\n animation-name: antZoomRightIn;\n -webkit-animation-play-state: running;\n animation-play-state: running;\n}\n.ant-zoom-right-leave.ant-zoom-right-leave-active {\n -webkit-animation-name: antZoomRightOut;\n animation-name: antZoomRightOut;\n -webkit-animation-play-state: running;\n animation-play-state: running;\n pointer-events: none;\n}\n.ant-zoom-right-enter,\n.ant-zoom-right-appear {\n transform: scale(0);\n opacity: 0;\n -webkit-animation-timing-function: cubic-bezier(0.08, 0.82, 0.17, 1);\n animation-timing-function: cubic-bezier(0.08, 0.82, 0.17, 1);\n}\n.ant-zoom-right-enter-prepare,\n.ant-zoom-right-appear-prepare {\n transform: none;\n}\n.ant-zoom-right-leave {\n -webkit-animation-timing-function: cubic-bezier(0.78, 0.14, 0.15, 0.86);\n animation-timing-function: cubic-bezier(0.78, 0.14, 0.15, 0.86);\n}\n@-webkit-keyframes antZoomIn {\n 0% {\n transform: scale(0.2);\n opacity: 0;\n }\n 100% {\n transform: scale(1);\n opacity: 1;\n }\n}\n@keyframes antZoomIn {\n 0% {\n transform: scale(0.2);\n opacity: 0;\n }\n 100% {\n transform: scale(1);\n opacity: 1;\n }\n}\n@-webkit-keyframes antZoomOut {\n 0% {\n transform: scale(1);\n }\n 100% {\n transform: scale(0.2);\n opacity: 0;\n }\n}\n@keyframes antZoomOut {\n 0% {\n transform: scale(1);\n }\n 100% {\n transform: scale(0.2);\n opacity: 0;\n }\n}\n@-webkit-keyframes antZoomBigIn {\n 0% {\n transform: scale(0.8);\n opacity: 0;\n }\n 100% {\n transform: scale(1);\n opacity: 1;\n }\n}\n@keyframes antZoomBigIn {\n 0% {\n transform: scale(0.8);\n opacity: 0;\n }\n 100% {\n transform: scale(1);\n opacity: 1;\n }\n}\n@-webkit-keyframes antZoomBigOut {\n 0% {\n transform: scale(1);\n }\n 100% {\n transform: scale(0.8);\n opacity: 0;\n }\n}\n@keyframes antZoomBigOut {\n 0% {\n transform: scale(1);\n }\n 100% {\n transform: scale(0.8);\n opacity: 0;\n }\n}\n@-webkit-keyframes antZoomUpIn {\n 0% {\n transform: scale(0.8);\n transform-origin: 50% 0%;\n opacity: 0;\n }\n 100% {\n transform: scale(1);\n transform-origin: 50% 0%;\n }\n}\n@keyframes antZoomUpIn {\n 0% {\n transform: scale(0.8);\n transform-origin: 50% 0%;\n opacity: 0;\n }\n 100% {\n transform: scale(1);\n transform-origin: 50% 0%;\n }\n}\n@-webkit-keyframes antZoomUpOut {\n 0% {\n transform: scale(1);\n transform-origin: 50% 0%;\n }\n 100% {\n transform: scale(0.8);\n transform-origin: 50% 0%;\n opacity: 0;\n }\n}\n@keyframes antZoomUpOut {\n 0% {\n transform: scale(1);\n transform-origin: 50% 0%;\n }\n 100% {\n transform: scale(0.8);\n transform-origin: 50% 0%;\n opacity: 0;\n }\n}\n@-webkit-keyframes antZoomLeftIn {\n 0% {\n transform: scale(0.8);\n transform-origin: 0% 50%;\n opacity: 0;\n }\n 100% {\n transform: scale(1);\n transform-origin: 0% 50%;\n }\n}\n@keyframes antZoomLeftIn {\n 0% {\n transform: scale(0.8);\n transform-origin: 0% 50%;\n opacity: 0;\n }\n 100% {\n transform: scale(1);\n transform-origin: 0% 50%;\n }\n}\n@-webkit-keyframes antZoomLeftOut {\n 0% {\n transform: scale(1);\n transform-origin: 0% 50%;\n }\n 100% {\n transform: scale(0.8);\n transform-origin: 0% 50%;\n opacity: 0;\n }\n}\n@keyframes antZoomLeftOut {\n 0% {\n transform: scale(1);\n transform-origin: 0% 50%;\n }\n 100% {\n transform: scale(0.8);\n transform-origin: 0% 50%;\n opacity: 0;\n }\n}\n@-webkit-keyframes antZoomRightIn {\n 0% {\n transform: scale(0.8);\n transform-origin: 100% 50%;\n opacity: 0;\n }\n 100% {\n transform: scale(1);\n transform-origin: 100% 50%;\n }\n}\n@keyframes antZoomRightIn {\n 0% {\n transform: scale(0.8);\n transform-origin: 100% 50%;\n opacity: 0;\n }\n 100% {\n transform: scale(1);\n transform-origin: 100% 50%;\n }\n}\n@-webkit-keyframes antZoomRightOut {\n 0% {\n transform: scale(1);\n transform-origin: 100% 50%;\n }\n 100% {\n transform: scale(0.8);\n transform-origin: 100% 50%;\n opacity: 0;\n }\n}\n@keyframes antZoomRightOut {\n 0% {\n transform: scale(1);\n transform-origin: 100% 50%;\n }\n 100% {\n transform: scale(0.8);\n transform-origin: 100% 50%;\n opacity: 0;\n }\n}\n@-webkit-keyframes antZoomDownIn {\n 0% {\n transform: scale(0.8);\n transform-origin: 50% 100%;\n opacity: 0;\n }\n 100% {\n transform: scale(1);\n transform-origin: 50% 100%;\n }\n}\n@keyframes antZoomDownIn {\n 0% {\n transform: scale(0.8);\n transform-origin: 50% 100%;\n opacity: 0;\n }\n 100% {\n transform: scale(1);\n transform-origin: 50% 100%;\n }\n}\n@-webkit-keyframes antZoomDownOut {\n 0% {\n transform: scale(1);\n transform-origin: 50% 100%;\n }\n 100% {\n transform: scale(0.8);\n transform-origin: 50% 100%;\n opacity: 0;\n }\n}\n@keyframes antZoomDownOut {\n 0% {\n transform: scale(1);\n transform-origin: 50% 100%;\n }\n 100% {\n transform: scale(0.8);\n transform-origin: 50% 100%;\n opacity: 0;\n }\n}\n.ant-motion-collapse-legacy {\n overflow: hidden;\n}\n.ant-motion-collapse-legacy-active {\n transition: height 0.2s cubic-bezier(0.645, 0.045, 0.355, 1), opacity 0.2s cubic-bezier(0.645, 0.045, 0.355, 1) !important;\n}\n.ant-motion-collapse {\n overflow: hidden;\n transition: height 0.2s cubic-bezier(0.645, 0.045, 0.355, 1), opacity 0.2s cubic-bezier(0.645, 0.045, 0.355, 1) !important;\n}\n",""]);const a=o},8401:(n,e,t)=>{"use strict";t.d(e,{Z:()=>a});var r=t(3645),o=t.n(r)()((function(n){return n[1]}));o.push([n.id,"/* stylelint-disable at-rule-empty-line-before,at-rule-name-space-after,at-rule-no-unknown */\n/* stylelint-disable no-duplicate-selectors */\n/* stylelint-disable */\n/* stylelint-disable declaration-bang-space-before,no-duplicate-selectors,string-no-newline */\n.ant-table.ant-table-middle {\n font-size: 14px;\n}\n.ant-table.ant-table-middle .ant-table-title,\n.ant-table.ant-table-middle .ant-table-footer,\n.ant-table.ant-table-middle .ant-table-thead > tr > th,\n.ant-table.ant-table-middle .ant-table-tbody > tr > td,\n.ant-table.ant-table-middle tfoot > tr > th,\n.ant-table.ant-table-middle tfoot > tr > td {\n padding: 12px 8px;\n}\n.ant-table.ant-table-middle .ant-table-thead th.ant-table-column-has-sorters {\n padding: 0;\n}\n.ant-table.ant-table-middle .ant-table-thead .ant-table-filter-column {\n margin: -12px -8px;\n}\n.ant-table.ant-table-middle .ant-table-thead .ant-table-filter-column-title {\n padding: 12px 2.3em 12px 8px;\n}\n.ant-table.ant-table-middle .ant-table-thead .ant-table-column-sorters {\n padding: 12px 8px;\n}\n.ant-table.ant-table-middle .ant-table-expanded-row-fixed {\n margin: -12px -8px;\n}\n.ant-table.ant-table-middle .ant-table-tbody .ant-table-wrapper:only-child .ant-table {\n margin: -12px -8px -12px 25px;\n}\n.ant-table.ant-table-small {\n font-size: 14px;\n}\n.ant-table.ant-table-small .ant-table-title,\n.ant-table.ant-table-small .ant-table-footer,\n.ant-table.ant-table-small .ant-table-thead > tr > th,\n.ant-table.ant-table-small .ant-table-tbody > tr > td,\n.ant-table.ant-table-small tfoot > tr > th,\n.ant-table.ant-table-small tfoot > tr > td {\n padding: 8px 8px;\n}\n.ant-table.ant-table-small .ant-table-thead th.ant-table-column-has-sorters {\n padding: 0;\n}\n.ant-table.ant-table-small .ant-table-thead .ant-table-filter-column {\n margin: -8px -8px;\n}\n.ant-table.ant-table-small .ant-table-thead .ant-table-filter-column-title {\n padding: 8px 2.3em 8px 8px;\n}\n.ant-table.ant-table-small .ant-table-thead .ant-table-column-sorters {\n padding: 8px 8px;\n}\n.ant-table.ant-table-small .ant-table-expanded-row-fixed {\n margin: -8px -8px;\n}\n.ant-table.ant-table-small .ant-table-tbody .ant-table-wrapper:only-child .ant-table {\n margin: -8px -8px -8px 25px;\n}\n.ant-table-small .ant-table-thead > tr > th {\n background-color: #fafafa;\n}\n.ant-table-small .ant-table-selection-column {\n width: 46px;\n min-width: 46px;\n}\n.ant-table.ant-table-bordered > .ant-table-title {\n border: 1px solid #f0f0f0;\n border-bottom: 0;\n}\n.ant-table.ant-table-bordered > .ant-table-container {\n border: 1px solid #f0f0f0;\n border-right: 0;\n border-bottom: 0;\n}\n.ant-table.ant-table-bordered > .ant-table-container > .ant-table-content > table > thead > tr > th,\n.ant-table.ant-table-bordered > .ant-table-container > .ant-table-header > table > thead > tr > th,\n.ant-table.ant-table-bordered > .ant-table-container > .ant-table-body > table > thead > tr > th,\n.ant-table.ant-table-bordered > .ant-table-container > .ant-table-content > table > tbody > tr > td,\n.ant-table.ant-table-bordered > .ant-table-container > .ant-table-header > table > tbody > tr > td,\n.ant-table.ant-table-bordered > .ant-table-container > .ant-table-body > table > tbody > tr > td,\n.ant-table.ant-table-bordered > .ant-table-container > .ant-table-content > table > tfoot > tr > th,\n.ant-table.ant-table-bordered > .ant-table-container > .ant-table-header > table > tfoot > tr > th,\n.ant-table.ant-table-bordered > .ant-table-container > .ant-table-body > table > tfoot > tr > th,\n.ant-table.ant-table-bordered > .ant-table-container > .ant-table-content > table > tfoot > tr > td,\n.ant-table.ant-table-bordered > .ant-table-container > .ant-table-header > table > tfoot > tr > td,\n.ant-table.ant-table-bordered > .ant-table-container > .ant-table-body > table > tfoot > tr > td {\n border-right: 1px solid #f0f0f0;\n}\n.ant-table.ant-table-bordered > .ant-table-container > .ant-table-content > table > thead > tr:not(:last-child) > th,\n.ant-table.ant-table-bordered > .ant-table-container > .ant-table-header > table > thead > tr:not(:last-child) > th,\n.ant-table.ant-table-bordered > .ant-table-container > .ant-table-body > table > thead > tr:not(:last-child) > th {\n border-bottom: 1px solid #f0f0f0;\n}\n.ant-table.ant-table-bordered > .ant-table-container > .ant-table-content > table > thead > tr > .ant-table-cell-fix-right-first::after,\n.ant-table.ant-table-bordered > .ant-table-container > .ant-table-header > table > thead > tr > .ant-table-cell-fix-right-first::after,\n.ant-table.ant-table-bordered > .ant-table-container > .ant-table-body > table > thead > tr > .ant-table-cell-fix-right-first::after,\n.ant-table.ant-table-bordered > .ant-table-container > .ant-table-content > table > tbody > tr > .ant-table-cell-fix-right-first::after,\n.ant-table.ant-table-bordered > .ant-table-container > .ant-table-header > table > tbody > tr > .ant-table-cell-fix-right-first::after,\n.ant-table.ant-table-bordered > .ant-table-container > .ant-table-body > table > tbody > tr > .ant-table-cell-fix-right-first::after,\n.ant-table.ant-table-bordered > .ant-table-container > .ant-table-content > table > tfoot > tr > .ant-table-cell-fix-right-first::after,\n.ant-table.ant-table-bordered > .ant-table-container > .ant-table-header > table > tfoot > tr > .ant-table-cell-fix-right-first::after,\n.ant-table.ant-table-bordered > .ant-table-container > .ant-table-body > table > tfoot > tr > .ant-table-cell-fix-right-first::after {\n border-right: 1px solid #f0f0f0;\n}\n.ant-table.ant-table-bordered > .ant-table-container > .ant-table-content > table > tbody > tr > td > .ant-table-expanded-row-fixed,\n.ant-table.ant-table-bordered > .ant-table-container > .ant-table-header > table > tbody > tr > td > .ant-table-expanded-row-fixed,\n.ant-table.ant-table-bordered > .ant-table-container > .ant-table-body > table > tbody > tr > td > .ant-table-expanded-row-fixed {\n margin: -16px -17px;\n}\n.ant-table.ant-table-bordered > .ant-table-container > .ant-table-content > table > tbody > tr > td > .ant-table-expanded-row-fixed::after,\n.ant-table.ant-table-bordered > .ant-table-container > .ant-table-header > table > tbody > tr > td > .ant-table-expanded-row-fixed::after,\n.ant-table.ant-table-bordered > .ant-table-container > .ant-table-body > table > tbody > tr > td > .ant-table-expanded-row-fixed::after {\n position: absolute;\n top: 0;\n right: 1px;\n bottom: 0;\n border-right: 1px solid #f0f0f0;\n content: '';\n}\n.ant-table.ant-table-bordered.ant-table-scroll-horizontal > .ant-table-container > .ant-table-body > table > tbody > tr.ant-table-expanded-row > td,\n.ant-table.ant-table-bordered.ant-table-scroll-horizontal > .ant-table-container > .ant-table-body > table > tbody > tr.ant-table-placeholder > td {\n border-right: 0;\n}\n.ant-table.ant-table-bordered.ant-table-middle > .ant-table-container > .ant-table-content > table > tbody > tr > td > .ant-table-expanded-row-fixed,\n.ant-table.ant-table-bordered.ant-table-middle > .ant-table-container > .ant-table-body > table > tbody > tr > td > .ant-table-expanded-row-fixed {\n margin: -12px -9px;\n}\n.ant-table.ant-table-bordered.ant-table-small > .ant-table-container > .ant-table-content > table > tbody > tr > td > .ant-table-expanded-row-fixed,\n.ant-table.ant-table-bordered.ant-table-small > .ant-table-container > .ant-table-body > table > tbody > tr > td > .ant-table-expanded-row-fixed {\n margin: -8px -9px;\n}\n.ant-table.ant-table-bordered > .ant-table-footer {\n border: 1px solid #f0f0f0;\n border-top: 0;\n}\n.ant-table-cell .ant-table-container:first-child {\n border-top: 0;\n}\n.ant-table-cell-scrollbar {\n box-shadow: 0 1px 0 1px #fafafa;\n}\n.ant-table-wrapper {\n clear: both;\n max-width: 100%;\n}\n.ant-table-wrapper::before {\n display: table;\n content: '';\n}\n.ant-table-wrapper::after {\n display: table;\n clear: both;\n content: '';\n}\n.ant-table {\n box-sizing: border-box;\n margin: 0;\n padding: 0;\n color: rgba(0, 0, 0, 0.85);\n font-variant: tabular-nums;\n line-height: 1.5715;\n list-style: none;\n font-feature-settings: 'tnum';\n position: relative;\n font-size: 14px;\n background: #fff;\n border-radius: 2px;\n}\n.ant-table table {\n width: 100%;\n text-align: left;\n border-radius: 2px 2px 0 0;\n border-collapse: separate;\n border-spacing: 0;\n}\n.ant-table-thead > tr > th,\n.ant-table-tbody > tr > td,\n.ant-table tfoot > tr > th,\n.ant-table tfoot > tr > td {\n position: relative;\n padding: 16px 16px;\n overflow-wrap: break-word;\n}\n.ant-table-cell-ellipsis {\n overflow: hidden;\n white-space: nowrap;\n text-overflow: ellipsis;\n word-break: keep-all;\n}\n.ant-table-cell-ellipsis.ant-table-cell-fix-left-last,\n.ant-table-cell-ellipsis.ant-table-cell-fix-right-first {\n overflow: visible;\n}\n.ant-table-cell-ellipsis.ant-table-cell-fix-left-last .ant-table-cell-content,\n.ant-table-cell-ellipsis.ant-table-cell-fix-right-first .ant-table-cell-content {\n display: block;\n overflow: hidden;\n text-overflow: ellipsis;\n}\n.ant-table-title {\n padding: 16px 16px;\n}\n.ant-table-footer {\n padding: 16px 16px;\n color: rgba(0, 0, 0, 0.85);\n background: #fafafa;\n}\n.ant-table-thead > tr > th {\n color: rgba(0, 0, 0, 0.85);\n font-weight: 500;\n text-align: left;\n background: #fafafa;\n border-bottom: 1px solid #f0f0f0;\n transition: background 0.3s ease;\n}\n.ant-table-thead > tr > th[colspan]:not([colspan='1']) {\n text-align: center;\n}\n.ant-table-thead > tr:not(:last-child) > th[colspan] {\n border-bottom: 0;\n}\n.ant-table-tbody > tr > td {\n border-bottom: 1px solid #f0f0f0;\n transition: background 0.3s;\n}\n.ant-table-tbody > tr > td > .ant-table-wrapper:only-child .ant-table {\n margin: -16px -16px -16px 33px;\n}\n.ant-table-tbody > tr > td > .ant-table-wrapper:only-child .ant-table-tbody > tr:last-child > td {\n border-bottom: 0;\n}\n.ant-table-tbody > tr > td > .ant-table-wrapper:only-child .ant-table-tbody > tr:last-child > td:first-child,\n.ant-table-tbody > tr > td > .ant-table-wrapper:only-child .ant-table-tbody > tr:last-child > td:last-child {\n border-radius: 0;\n}\n.ant-table-tbody > tr.ant-table-row:hover > td {\n background: #fafafa;\n}\n.ant-table-tbody > tr.ant-table-row-selected > td {\n background: #e6f7ff;\n border-color: rgba(0, 0, 0, 0.03);\n}\n.ant-table-tbody > tr.ant-table-row-selected:hover > td {\n background: #dcf4ff;\n}\n.ant-table tfoot > tr > th,\n.ant-table tfoot > tr > td {\n border-bottom: 1px solid #f0f0f0;\n}\n.ant-table-pagination.ant-pagination {\n margin: 16px 0;\n}\n.ant-table-pagination {\n display: flex;\n flex-wrap: wrap;\n row-gap: 8px;\n}\n.ant-table-pagination > * {\n flex: none;\n}\n.ant-table-pagination-left {\n justify-content: flex-start;\n}\n.ant-table-pagination-center {\n justify-content: center;\n}\n.ant-table-pagination-right {\n justify-content: flex-end;\n}\n.ant-table-thead th.ant-table-column-has-sorters {\n padding: 0;\n cursor: pointer;\n transition: all 0.3s;\n}\n.ant-table-thead th.ant-table-column-has-sorters:hover {\n background: #f2f2f2;\n}\n.ant-table-thead th.ant-table-column-has-sorters:hover .ant-table-filter-trigger-container {\n background: #f7f7f7;\n}\n.ant-table-thead th.ant-table-column-sort {\n background: #f5f5f5;\n}\ntd.ant-table-column-sort {\n background: #fafafa;\n}\n.ant-table-column-sorters-with-tooltip {\n display: inline-block;\n width: 100%;\n}\n.ant-table-column-sorters {\n display: inline-flex;\n align-items: center;\n padding: 16px 16px;\n}\n.ant-table-column-sorter {\n margin-top: 0.15em;\n margin-bottom: -0.15em;\n margin-left: 8px;\n color: #bfbfbf;\n}\n.ant-table-column-sorter-full {\n margin-top: -0.2em;\n margin-bottom: 0;\n}\n.ant-table-column-sorter-inner {\n display: inline-flex;\n flex-direction: column;\n align-items: center;\n}\n.ant-table-column-sorter-up,\n.ant-table-column-sorter-down {\n font-size: 11px;\n}\n.ant-table-column-sorter-up.active,\n.ant-table-column-sorter-down.active {\n color: #1890ff;\n}\n.ant-table-column-sorter-up + .ant-table-column-sorter-down {\n margin-top: -0.3em;\n}\n.ant-table-filter-column {\n display: flex;\n align-items: center;\n margin: -16px -16px;\n}\n.ant-table-filter-column-title {\n flex: auto;\n padding: 16px 2.3em 16px 16px;\n}\n.ant-table-thead tr th.ant-table-column-has-sorters .ant-table-filter-column {\n margin: 0;\n}\n.ant-table-thead tr th.ant-table-column-has-sorters .ant-table-filter-column-title {\n padding: 0 2.3em 0 0;\n}\n.ant-table-filter-trigger-container {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n display: flex;\n flex: none;\n align-items: stretch;\n align-self: stretch;\n cursor: pointer;\n transition: background-color 0.3s;\n}\n.ant-table-filter-trigger-container-open,\n.ant-table-filter-trigger-container:hover,\n.ant-table-thead th.ant-table-column-has-sorters:hover .ant-table-filter-trigger-container:hover {\n background: #e5e5e5;\n}\n.ant-table-filter-trigger {\n display: block;\n width: 2.3em;\n color: #bfbfbf;\n font-size: 12px;\n transition: color 0.3s;\n}\n.ant-table-filter-trigger .anticon {\n position: absolute;\n top: 50%;\n left: 50%;\n transform: translate(-50%, -50%);\n}\n.ant-table-filter-trigger-container-open .ant-table-filter-trigger,\n.ant-table-filter-trigger:hover {\n color: rgba(0, 0, 0, 0.45);\n}\n.ant-table-filter-trigger.active {\n color: #1890ff;\n}\n.ant-table-filter-dropdown {\n box-sizing: border-box;\n margin: 0;\n padding: 0;\n color: rgba(0, 0, 0, 0.85);\n font-size: 14px;\n font-variant: tabular-nums;\n line-height: 1.5715;\n list-style: none;\n font-feature-settings: 'tnum';\n min-width: 120px;\n background-color: #fff;\n border-radius: 2px;\n box-shadow: 0 3px 6px -4px rgba(0, 0, 0, 0.12), 0 6px 16px 0 rgba(0, 0, 0, 0.08), 0 9px 28px 8px rgba(0, 0, 0, 0.05);\n}\n.ant-table-filter-dropdown .ant-dropdown-menu {\n max-height: 264px;\n overflow-x: hidden;\n border: 0;\n box-shadow: none;\n}\n.ant-table-filter-dropdown-submenu > ul {\n max-height: calc(100vh - 130px);\n overflow-x: hidden;\n overflow-y: auto;\n}\n.ant-table-filter-dropdown .ant-checkbox-wrapper + span,\n.ant-table-filter-dropdown-submenu .ant-checkbox-wrapper + span {\n padding-left: 8px;\n}\n.ant-table-filter-dropdown-btns {\n display: flex;\n justify-content: space-between;\n padding: 7px 8px 7px 3px;\n overflow: hidden;\n background-color: inherit;\n border-top: 1px solid #f0f0f0;\n}\n.ant-table .ant-table-selection-col {\n width: 32px;\n}\ntable tr th.ant-table-selection-column,\ntable tr td.ant-table-selection-column {\n padding-right: 8px;\n padding-left: 8px;\n text-align: center;\n}\ntable tr th.ant-table-selection-column .ant-radio-wrapper,\ntable tr td.ant-table-selection-column .ant-radio-wrapper {\n margin-right: 0;\n}\n.ant-table-selection {\n position: relative;\n display: inline-flex;\n flex-direction: column;\n}\n.ant-table-selection-extra {\n position: absolute;\n top: 0;\n z-index: 1;\n cursor: pointer;\n transition: all 0.3s;\n -webkit-margin-start: 100%;\n margin-inline-start: 100%;\n -webkit-padding-start: 4px;\n padding-inline-start: 4px;\n}\n.ant-table-selection-extra .anticon {\n color: #bfbfbf;\n font-size: 10px;\n}\n.ant-table-selection-extra .anticon:hover {\n color: #a6a6a6;\n}\n.ant-table-expand-icon-col {\n width: 48px;\n}\n.ant-table-row-expand-icon-cell {\n text-align: center;\n}\n.ant-table-row-indent {\n float: left;\n height: 1px;\n}\n.ant-table-row-expand-icon {\n color: #1890ff;\n text-decoration: none;\n cursor: pointer;\n transition: color 0.3s;\n position: relative;\n display: inline-flex;\n float: left;\n box-sizing: border-box;\n width: 17px;\n height: 17px;\n padding: 0;\n color: inherit;\n line-height: 17px;\n background: #fff;\n border: 1px solid #f0f0f0;\n border-radius: 2px;\n outline: none;\n transform: scale(0.94117647);\n transform-origin: bottom;\n transition: all 0.3s;\n -webkit-user-select: none;\n -moz-user-select: none;\n -ms-user-select: none;\n user-select: none;\n}\n.ant-table-row-expand-icon:focus,\n.ant-table-row-expand-icon:hover {\n color: #40a9ff;\n}\n.ant-table-row-expand-icon:active {\n color: #096dd9;\n}\n.ant-table-row-expand-icon:focus,\n.ant-table-row-expand-icon:hover,\n.ant-table-row-expand-icon:active {\n border-color: currentColor;\n}\n.ant-table-row-expand-icon::before,\n.ant-table-row-expand-icon::after {\n position: absolute;\n background: currentColor;\n transition: transform 0.3s ease-out;\n content: '';\n}\n.ant-table-row-expand-icon::before {\n top: 7px;\n right: 3px;\n left: 3px;\n height: 1px;\n}\n.ant-table-row-expand-icon::after {\n top: 3px;\n bottom: 3px;\n left: 7px;\n width: 1px;\n transform: rotate(90deg);\n}\n.ant-table-row-expand-icon-collapsed::before {\n transform: rotate(-180deg);\n}\n.ant-table-row-expand-icon-collapsed::after {\n transform: rotate(0deg);\n}\n.ant-table-row-expand-icon-spaced {\n background: transparent;\n border: 0;\n visibility: hidden;\n}\n.ant-table-row-expand-icon-spaced::before,\n.ant-table-row-expand-icon-spaced::after {\n display: none;\n content: none;\n}\n.ant-table-row-indent + .ant-table-row-expand-icon {\n margin-top: 2.5005px;\n margin-right: 8px;\n}\ntr.ant-table-expanded-row > td,\ntr.ant-table-expanded-row:hover > td {\n background: #fbfbfb;\n}\ntr.ant-table-expanded-row .ant-descriptions-view {\n display: flex;\n}\ntr.ant-table-expanded-row .ant-descriptions-view table {\n flex: auto;\n width: auto;\n}\n.ant-table .ant-table-expanded-row-fixed {\n position: relative;\n margin: -16px -16px;\n padding: 16px 16px;\n}\n.ant-table-tbody > tr.ant-table-placeholder {\n text-align: center;\n}\n.ant-table-empty .ant-table-tbody > tr.ant-table-placeholder {\n color: rgba(0, 0, 0, 0.25);\n}\n.ant-table-tbody > tr.ant-table-placeholder:hover > td {\n background: #fff;\n}\n.ant-table-cell-fix-left,\n.ant-table-cell-fix-right {\n position: sticky !important;\n z-index: 2;\n background: #fff;\n}\n.ant-table-cell-fix-left-first::after,\n.ant-table-cell-fix-left-last::after {\n position: absolute;\n top: 0;\n right: 0;\n bottom: -1px;\n width: 30px;\n transform: translateX(100%);\n transition: box-shadow 0.3s;\n content: '';\n pointer-events: none;\n}\n.ant-table-cell-fix-right-first::after,\n.ant-table-cell-fix-right-last::after {\n position: absolute;\n top: 0;\n bottom: -1px;\n left: 0;\n width: 30px;\n transform: translateX(-100%);\n transition: box-shadow 0.3s;\n content: '';\n pointer-events: none;\n}\n.ant-table .ant-table-container::before,\n.ant-table .ant-table-container::after {\n position: absolute;\n top: 0;\n bottom: 0;\n z-index: 1;\n width: 30px;\n transition: box-shadow 0.3s;\n content: '';\n pointer-events: none;\n}\n.ant-table .ant-table-container::before {\n left: 0;\n}\n.ant-table .ant-table-container::after {\n right: 0;\n}\n.ant-table-ping-left:not(.ant-table-has-fix-left) .ant-table-container {\n position: relative;\n}\n.ant-table-ping-left:not(.ant-table-has-fix-left) .ant-table-container::before {\n box-shadow: inset 10px 0 8px -8px rgba(0, 0, 0, 0.15);\n}\n.ant-table-ping-left .ant-table-cell-fix-left-first::after,\n.ant-table-ping-left .ant-table-cell-fix-left-last::after {\n box-shadow: inset 10px 0 8px -8px rgba(0, 0, 0, 0.15);\n}\n.ant-table-ping-right:not(.ant-table-has-fix-right) .ant-table-container {\n position: relative;\n}\n.ant-table-ping-right:not(.ant-table-has-fix-right) .ant-table-container::after {\n box-shadow: inset -10px 0 8px -8px rgba(0, 0, 0, 0.15);\n}\n.ant-table-ping-right .ant-table-cell-fix-right-first::after,\n.ant-table-ping-right .ant-table-cell-fix-right-last::after {\n box-shadow: inset -10px 0 8px -8px rgba(0, 0, 0, 0.15);\n}\n.ant-table-sticky-header {\n position: sticky;\n z-index: calc(2 + 1);\n}\n.ant-table-sticky-scroll {\n position: sticky;\n bottom: 0;\n z-index: calc(2 + 1);\n display: flex;\n align-items: center;\n background: #ffffff;\n border-top: 1px solid #f0f0f0;\n opacity: 0.6;\n}\n.ant-table-sticky-scroll:hover {\n transform-origin: center bottom;\n}\n.ant-table-sticky-scroll-bar {\n height: 8px;\n background-color: rgba(0, 0, 0, 0.35);\n border-radius: 4px;\n}\n.ant-table-sticky-scroll-bar:hover {\n background-color: rgba(0, 0, 0, 0.8);\n}\n.ant-table-sticky-scroll-bar-active {\n background-color: rgba(0, 0, 0, 0.8);\n}\n@media all and (-ms-high-contrast: none) {\n .ant-table-ping-left .ant-table-cell-fix-left-last::after {\n box-shadow: none !important;\n }\n .ant-table-ping-right .ant-table-cell-fix-right-first::after {\n box-shadow: none !important;\n }\n}\n.ant-table {\n /* title + table */\n /* table */\n /* table + footer */\n}\n.ant-table-title {\n border-radius: 2px 2px 0 0;\n}\n.ant-table-title + .ant-table-container {\n border-top-left-radius: 0;\n border-top-right-radius: 0;\n}\n.ant-table-title + .ant-table-container table > thead > tr:first-child th:first-child {\n border-radius: 0;\n}\n.ant-table-title + .ant-table-container table > thead > tr:first-child th:last-child {\n border-radius: 0;\n}\n.ant-table-container {\n border-top-left-radius: 2px;\n border-top-right-radius: 2px;\n}\n.ant-table-container table > thead > tr:first-child th:first-child {\n border-top-left-radius: 2px;\n}\n.ant-table-container table > thead > tr:first-child th:last-child {\n border-top-right-radius: 2px;\n}\n.ant-table-footer {\n border-radius: 0 0 2px 2px;\n}\n.ant-table-wrapper-rtl {\n direction: rtl;\n}\n.ant-table-rtl {\n direction: rtl;\n}\n.ant-table-wrapper-rtl .ant-table table {\n text-align: right;\n}\n.ant-table-wrapper-rtl .ant-table-thead > tr > th[colspan]:not([colspan='1']) {\n text-align: center;\n}\n.ant-table-wrapper-rtl .ant-table-thead > tr > th {\n text-align: right;\n}\n.ant-table-tbody > tr .ant-table-wrapper:only-child .ant-table.ant-table-rtl {\n margin: -16px 33px -16px -16px;\n}\n.ant-table-wrapper.ant-table-wrapper-rtl .ant-table-pagination-left {\n justify-content: flex-end;\n}\n.ant-table-wrapper.ant-table-wrapper-rtl .ant-table-pagination-right {\n justify-content: flex-start;\n}\n.ant-table-wrapper-rtl .ant-table-column-sorter {\n margin-right: 8px;\n margin-left: 0;\n}\n.ant-table-wrapper-rtl .ant-table-filter-column-title {\n padding: 16px 16px 16px 2.3em;\n}\n.ant-table-rtl .ant-table-thead tr th.ant-table-column-has-sorters .ant-table-filter-column-title {\n padding: 0 0 0 2.3em;\n}\n.ant-table-wrapper-rtl .ant-table-filter-trigger-container {\n right: auto;\n left: 0;\n}\n.ant-dropdown-rtl .ant-table-filter-dropdown .ant-checkbox-wrapper + span,\n.ant-dropdown-rtl .ant-table-filter-dropdown-submenu .ant-checkbox-wrapper + span,\n.ant-dropdown-menu-submenu-rtl.ant-table-filter-dropdown .ant-checkbox-wrapper + span,\n.ant-dropdown-menu-submenu-rtl.ant-table-filter-dropdown-submenu .ant-checkbox-wrapper + span {\n padding-right: 8px;\n padding-left: 0;\n}\n.ant-table-wrapper-rtl .ant-table-selection {\n text-align: center;\n}\n.ant-table-wrapper-rtl .ant-table-row-indent {\n float: right;\n}\n.ant-table-wrapper-rtl .ant-table-row-expand-icon {\n float: right;\n}\n.ant-table-wrapper-rtl .ant-table-row-indent + .ant-table-row-expand-icon {\n margin-right: 0;\n margin-left: 8px;\n}\n.ant-table-wrapper-rtl .ant-table-row-expand-icon::after {\n transform: rotate(-90deg);\n}\n.ant-table-wrapper-rtl .ant-table-row-expand-icon-collapsed::before {\n transform: rotate(180deg);\n}\n.ant-table-wrapper-rtl .ant-table-row-expand-icon-collapsed::after {\n transform: rotate(0deg);\n}\n",""]);const a=o},3951:(n,e,t)=>{"use strict";t.d(e,{Z:()=>a});var r=t(3645),o=t.n(r)()((function(n){return n[1]}));o.push([n.id,"/* stylelint-disable at-rule-empty-line-before,at-rule-name-space-after,at-rule-no-unknown */\n/* stylelint-disable no-duplicate-selectors */\n/* stylelint-disable */\n/* stylelint-disable declaration-bang-space-before,no-duplicate-selectors,string-no-newline */\n.ant-tooltip {\n box-sizing: border-box;\n margin: 0;\n padding: 0;\n color: rgba(0, 0, 0, 0.85);\n font-size: 14px;\n font-variant: tabular-nums;\n line-height: 1.5715;\n list-style: none;\n font-feature-settings: 'tnum';\n position: absolute;\n z-index: 1070;\n display: block;\n max-width: 250px;\n visibility: visible;\n}\n.ant-tooltip-hidden {\n display: none;\n}\n.ant-tooltip-placement-top,\n.ant-tooltip-placement-topLeft,\n.ant-tooltip-placement-topRight {\n padding-bottom: 8px;\n}\n.ant-tooltip-placement-right,\n.ant-tooltip-placement-rightTop,\n.ant-tooltip-placement-rightBottom {\n padding-left: 8px;\n}\n.ant-tooltip-placement-bottom,\n.ant-tooltip-placement-bottomLeft,\n.ant-tooltip-placement-bottomRight {\n padding-top: 8px;\n}\n.ant-tooltip-placement-left,\n.ant-tooltip-placement-leftTop,\n.ant-tooltip-placement-leftBottom {\n padding-right: 8px;\n}\n.ant-tooltip-inner {\n min-width: 30px;\n min-height: 32px;\n padding: 6px 8px;\n color: #fff;\n text-align: left;\n text-decoration: none;\n word-wrap: break-word;\n background-color: rgba(0, 0, 0, 0.75);\n border-radius: 2px;\n box-shadow: 0 3px 6px -4px rgba(0, 0, 0, 0.12), 0 6px 16px 0 rgba(0, 0, 0, 0.08), 0 9px 28px 8px rgba(0, 0, 0, 0.05);\n}\n.ant-tooltip-arrow {\n position: absolute;\n display: block;\n width: 13.07106781px;\n height: 13.07106781px;\n overflow: hidden;\n background: transparent;\n pointer-events: none;\n}\n.ant-tooltip-arrow-content {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n display: block;\n width: 5px;\n height: 5px;\n margin: auto;\n background-color: rgba(0, 0, 0, 0.75);\n content: '';\n pointer-events: auto;\n}\n.ant-tooltip-placement-top .ant-tooltip-arrow,\n.ant-tooltip-placement-topLeft .ant-tooltip-arrow,\n.ant-tooltip-placement-topRight .ant-tooltip-arrow {\n bottom: -5.07106781px;\n}\n.ant-tooltip-placement-top .ant-tooltip-arrow-content,\n.ant-tooltip-placement-topLeft .ant-tooltip-arrow-content,\n.ant-tooltip-placement-topRight .ant-tooltip-arrow-content {\n box-shadow: 3px 3px 7px rgba(0, 0, 0, 0.07);\n transform: translateY(-6.53553391px) rotate(45deg);\n}\n.ant-tooltip-placement-top .ant-tooltip-arrow {\n left: 50%;\n transform: translateX(-50%);\n}\n.ant-tooltip-placement-topLeft .ant-tooltip-arrow {\n left: 13px;\n}\n.ant-tooltip-placement-topRight .ant-tooltip-arrow {\n right: 13px;\n}\n.ant-tooltip-placement-right .ant-tooltip-arrow,\n.ant-tooltip-placement-rightTop .ant-tooltip-arrow,\n.ant-tooltip-placement-rightBottom .ant-tooltip-arrow {\n left: -5.07106781px;\n}\n.ant-tooltip-placement-right .ant-tooltip-arrow-content,\n.ant-tooltip-placement-rightTop .ant-tooltip-arrow-content,\n.ant-tooltip-placement-rightBottom .ant-tooltip-arrow-content {\n box-shadow: -3px 3px 7px rgba(0, 0, 0, 0.07);\n transform: translateX(6.53553391px) rotate(45deg);\n}\n.ant-tooltip-placement-right .ant-tooltip-arrow {\n top: 50%;\n transform: translateY(-50%);\n}\n.ant-tooltip-placement-rightTop .ant-tooltip-arrow {\n top: 5px;\n}\n.ant-tooltip-placement-rightBottom .ant-tooltip-arrow {\n bottom: 5px;\n}\n.ant-tooltip-placement-left .ant-tooltip-arrow,\n.ant-tooltip-placement-leftTop .ant-tooltip-arrow,\n.ant-tooltip-placement-leftBottom .ant-tooltip-arrow {\n right: -5.07106781px;\n}\n.ant-tooltip-placement-left .ant-tooltip-arrow-content,\n.ant-tooltip-placement-leftTop .ant-tooltip-arrow-content,\n.ant-tooltip-placement-leftBottom .ant-tooltip-arrow-content {\n box-shadow: 3px -3px 7px rgba(0, 0, 0, 0.07);\n transform: translateX(-6.53553391px) rotate(45deg);\n}\n.ant-tooltip-placement-left .ant-tooltip-arrow {\n top: 50%;\n transform: translateY(-50%);\n}\n.ant-tooltip-placement-leftTop .ant-tooltip-arrow {\n top: 5px;\n}\n.ant-tooltip-placement-leftBottom .ant-tooltip-arrow {\n bottom: 5px;\n}\n.ant-tooltip-placement-bottom .ant-tooltip-arrow,\n.ant-tooltip-placement-bottomLeft .ant-tooltip-arrow,\n.ant-tooltip-placement-bottomRight .ant-tooltip-arrow {\n top: -5.07106781px;\n}\n.ant-tooltip-placement-bottom .ant-tooltip-arrow-content,\n.ant-tooltip-placement-bottomLeft .ant-tooltip-arrow-content,\n.ant-tooltip-placement-bottomRight .ant-tooltip-arrow-content {\n box-shadow: -3px -3px 7px rgba(0, 0, 0, 0.07);\n transform: translateY(6.53553391px) rotate(45deg);\n}\n.ant-tooltip-placement-bottom .ant-tooltip-arrow {\n left: 50%;\n transform: translateX(-50%);\n}\n.ant-tooltip-placement-bottomLeft .ant-tooltip-arrow {\n left: 13px;\n}\n.ant-tooltip-placement-bottomRight .ant-tooltip-arrow {\n right: 13px;\n}\n.ant-tooltip-pink .ant-tooltip-inner {\n background-color: #eb2f96;\n}\n.ant-tooltip-pink .ant-tooltip-arrow-content {\n background-color: #eb2f96;\n}\n.ant-tooltip-magenta .ant-tooltip-inner {\n background-color: #eb2f96;\n}\n.ant-tooltip-magenta .ant-tooltip-arrow-content {\n background-color: #eb2f96;\n}\n.ant-tooltip-red .ant-tooltip-inner {\n background-color: #f5222d;\n}\n.ant-tooltip-red .ant-tooltip-arrow-content {\n background-color: #f5222d;\n}\n.ant-tooltip-volcano .ant-tooltip-inner {\n background-color: #fa541c;\n}\n.ant-tooltip-volcano .ant-tooltip-arrow-content {\n background-color: #fa541c;\n}\n.ant-tooltip-orange .ant-tooltip-inner {\n background-color: #fa8c16;\n}\n.ant-tooltip-orange .ant-tooltip-arrow-content {\n background-color: #fa8c16;\n}\n.ant-tooltip-yellow .ant-tooltip-inner {\n background-color: #fadb14;\n}\n.ant-tooltip-yellow .ant-tooltip-arrow-content {\n background-color: #fadb14;\n}\n.ant-tooltip-gold .ant-tooltip-inner {\n background-color: #faad14;\n}\n.ant-tooltip-gold .ant-tooltip-arrow-content {\n background-color: #faad14;\n}\n.ant-tooltip-cyan .ant-tooltip-inner {\n background-color: #13c2c2;\n}\n.ant-tooltip-cyan .ant-tooltip-arrow-content {\n background-color: #13c2c2;\n}\n.ant-tooltip-lime .ant-tooltip-inner {\n background-color: #a0d911;\n}\n.ant-tooltip-lime .ant-tooltip-arrow-content {\n background-color: #a0d911;\n}\n.ant-tooltip-green .ant-tooltip-inner {\n background-color: #52c41a;\n}\n.ant-tooltip-green .ant-tooltip-arrow-content {\n background-color: #52c41a;\n}\n.ant-tooltip-blue .ant-tooltip-inner {\n background-color: #1890ff;\n}\n.ant-tooltip-blue .ant-tooltip-arrow-content {\n background-color: #1890ff;\n}\n.ant-tooltip-geekblue .ant-tooltip-inner {\n background-color: #2f54eb;\n}\n.ant-tooltip-geekblue .ant-tooltip-arrow-content {\n background-color: #2f54eb;\n}\n.ant-tooltip-purple .ant-tooltip-inner {\n background-color: #722ed1;\n}\n.ant-tooltip-purple .ant-tooltip-arrow-content {\n background-color: #722ed1;\n}\n.ant-tooltip-rtl {\n direction: rtl;\n}\n.ant-tooltip-rtl .ant-tooltip-inner {\n text-align: right;\n}\n",""]);const a=o},28:(n,e,t)=>{"use strict";t.d(e,{Z:()=>a});var r=t(3645),o=t.n(r)()((function(n){return n[1]}));o.push([n.id,".goog-tooltip {\r\n display: none !important;\r\n}\r\n\r\n.visualization-tooltip {\r\n padding: 4px 10px;\r\n white-space: nowrap;\r\n}\r\n\r\ndiv.google-visualization-tooltip { \r\n pointer-events: none;\r\n max-width: 90%;\r\n}\r\n",""]);const a=o},3645:n=>{"use strict";n.exports=function(n){var e=[];return e.toString=function(){return this.map((function(e){var t=n(e);return e[2]?"@media ".concat(e[2]," {").concat(t,"}"):t})).join("")},e.i=function(n,t,r){"string"==typeof n&&(n=[[null,n,""]]);var o={};if(r)for(var a=0;a{"use strict";var r=t(9864),o={childContextTypes:!0,contextType:!0,contextTypes:!0,defaultProps:!0,displayName:!0,getDefaultProps:!0,getDerivedStateFromError:!0,getDerivedStateFromProps:!0,mixins:!0,propTypes:!0,type:!0},a={name:!0,length:!0,prototype:!0,caller:!0,callee:!0,arguments:!0,arity:!0},i={$$typeof:!0,compare:!0,defaultProps:!0,displayName:!0,propTypes:!0,type:!0},l={};function s(n){return r.isMemo(n)?i:l[n.$$typeof]||o}l[r.ForwardRef]={$$typeof:!0,render:!0,defaultProps:!0,displayName:!0,propTypes:!0},l[r.Memo]=i;var c=Object.defineProperty,u=Object.getOwnPropertyNames,d=Object.getOwnPropertySymbols,f=Object.getOwnPropertyDescriptor,p=Object.getPrototypeOf,m=Object.prototype;n.exports=function n(e,t,r){if("string"!=typeof t){if(m){var o=p(t);o&&o!==m&&n(e,o,r)}var i=u(t);d&&(i=i.concat(d(t)));for(var l=s(e),h=s(t),g=0;g{var r=t(852)(t(5639),"DataView");n.exports=r},1989:(n,e,t)=>{var r=t(1789),o=t(401),a=t(7667),i=t(1327),l=t(1866);function s(n){var e=-1,t=null==n?0:n.length;for(this.clear();++e{var r=t(7040),o=t(4125),a=t(2117),i=t(7518),l=t(4705);function s(n){var e=-1,t=null==n?0:n.length;for(this.clear();++e{var r=t(852)(t(5639),"Map");n.exports=r},3369:(n,e,t)=>{var r=t(4785),o=t(1285),a=t(6e3),i=t(9916),l=t(5265);function s(n){var e=-1,t=null==n?0:n.length;for(this.clear();++e{var r=t(852)(t(5639),"Promise");n.exports=r},8525:(n,e,t)=>{var r=t(852)(t(5639),"Set");n.exports=r},8668:(n,e,t)=>{var r=t(3369),o=t(619),a=t(2385);function i(n){var e=-1,t=null==n?0:n.length;for(this.__data__=new r;++e{var r=t(8407),o=t(7465),a=t(3779),i=t(7599),l=t(4758),s=t(4309);function c(n){var e=this.__data__=new r(n);this.size=e.size}c.prototype.clear=o,c.prototype.delete=a,c.prototype.get=i,c.prototype.has=l,c.prototype.set=s,n.exports=c},2705:(n,e,t)=>{var r=t(5639).Symbol;n.exports=r},1149:(n,e,t)=>{var r=t(5639).Uint8Array;n.exports=r},577:(n,e,t)=>{var r=t(852)(t(5639),"WeakMap");n.exports=r},4963:n=>{n.exports=function(n,e){for(var t=-1,r=null==n?0:n.length,o=0,a=[];++t{var r=t(2545),o=t(5694),a=t(1469),i=t(4144),l=t(5776),s=t(6719),c=Object.prototype.hasOwnProperty;n.exports=function(n,e){var t=a(n),u=!t&&o(n),d=!t&&!u&&i(n),f=!t&&!u&&!d&&s(n),p=t||u||d||f,m=p?r(n.length,String):[],h=m.length;for(var g in n)!e&&!c.call(n,g)||p&&("length"==g||d&&("offset"==g||"parent"==g)||f&&("buffer"==g||"byteLength"==g||"byteOffset"==g)||l(g,h))||m.push(g);return m}},2488:n=>{n.exports=function(n,e){for(var t=-1,r=e.length,o=n.length;++t{n.exports=function(n,e){for(var t=-1,r=null==n?0:n.length;++t{var r=t(7813);n.exports=function(n,e){for(var t=n.length;t--;)if(r(n[t][0],e))return t;return-1}},8866:(n,e,t)=>{var r=t(2488),o=t(1469);n.exports=function(n,e,t){var a=e(n);return o(n)?a:r(a,t(n))}},4239:(n,e,t)=>{var r=t(2705),o=t(9607),a=t(2333),i=r?r.toStringTag:void 0;n.exports=function(n){return null==n?void 0===n?"[object Undefined]":"[object Null]":i&&i in Object(n)?o(n):a(n)}},9454:(n,e,t)=>{var r=t(4239),o=t(7005);n.exports=function(n){return o(n)&&"[object Arguments]"==r(n)}},939:(n,e,t)=>{var r=t(2492),o=t(7005);n.exports=function n(e,t,a,i,l){return e===t||(null==e||null==t||!o(e)&&!o(t)?e!=e&&t!=t:r(e,t,a,i,n,l))}},2492:(n,e,t)=>{var r=t(6384),o=t(7114),a=t(8351),i=t(933),l=t(4160),s=t(1469),c=t(4144),u=t(6719),d="[object Arguments]",f="[object Array]",p="[object Object]",m=Object.prototype.hasOwnProperty;n.exports=function(n,e,t,h,g,b){var v=s(n),y=s(e),x=v?f:l(n),w=y?f:l(e),k=(x=x==d?p:x)==p,E=(w=w==d?p:w)==p,C=x==w;if(C&&c(n)){if(!c(e))return!1;v=!0,k=!1}if(C&&!k)return b||(b=new r),v||u(n)?o(n,e,t,h,g,b):a(n,e,x,t,h,g,b);if(!(1&t)){var S=k&&m.call(n,"__wrapped__"),O=E&&m.call(e,"__wrapped__");if(S||O){var T=S?n.value():n,N=O?e.value():e;return b||(b=new r),g(T,N,t,h,b)}}return!!C&&(b||(b=new r),i(n,e,t,h,g,b))}},8458:(n,e,t)=>{var r=t(3560),o=t(7724),a=t(3218),i=t(346),l=/^\[object .+?Constructor\]$/,s=Function.prototype,c=Object.prototype,u=s.toString,d=c.hasOwnProperty,f=RegExp("^"+u.call(d).replace(/[\\^$.*+?()[\]{}|]/g,"\\$&").replace(/hasOwnProperty|(function).*?(?=\\\()| for .+?(?=\\\])/g,"$1.*?")+"$");n.exports=function(n){return!(!a(n)||o(n))&&(r(n)?f:l).test(i(n))}},8749:(n,e,t)=>{var r=t(4239),o=t(1780),a=t(7005),i={};i["[object Float32Array]"]=i["[object Float64Array]"]=i["[object Int8Array]"]=i["[object Int16Array]"]=i["[object Int32Array]"]=i["[object Uint8Array]"]=i["[object Uint8ClampedArray]"]=i["[object Uint16Array]"]=i["[object Uint32Array]"]=!0,i["[object Arguments]"]=i["[object Array]"]=i["[object ArrayBuffer]"]=i["[object Boolean]"]=i["[object DataView]"]=i["[object Date]"]=i["[object Error]"]=i["[object Function]"]=i["[object Map]"]=i["[object Number]"]=i["[object Object]"]=i["[object RegExp]"]=i["[object Set]"]=i["[object String]"]=i["[object WeakMap]"]=!1,n.exports=function(n){return a(n)&&o(n.length)&&!!i[r(n)]}},280:(n,e,t)=>{var r=t(5726),o=t(6916),a=Object.prototype.hasOwnProperty;n.exports=function(n){if(!r(n))return o(n);var e=[];for(var t in Object(n))a.call(n,t)&&"constructor"!=t&&e.push(t);return e}},2545:n=>{n.exports=function(n,e){for(var t=-1,r=Array(n);++t{var r=t(7990),o=/^\s+/;n.exports=function(n){return n?n.slice(0,r(n)+1).replace(o,""):n}},1717:n=>{n.exports=function(n){return function(e){return n(e)}}},4757:n=>{n.exports=function(n,e){return n.has(e)}},4429:(n,e,t)=>{var r=t(5639)["__core-js_shared__"];n.exports=r},7114:(n,e,t)=>{var r=t(8668),o=t(2908),a=t(4757);n.exports=function(n,e,t,i,l,s){var c=1&t,u=n.length,d=e.length;if(u!=d&&!(c&&d>u))return!1;var f=s.get(n),p=s.get(e);if(f&&p)return f==e&&p==n;var m=-1,h=!0,g=2&t?new r:void 0;for(s.set(n,e),s.set(e,n);++m{var r=t(2705),o=t(1149),a=t(7813),i=t(7114),l=t(8776),s=t(1814),c=r?r.prototype:void 0,u=c?c.valueOf:void 0;n.exports=function(n,e,t,r,c,d,f){switch(t){case"[object DataView]":if(n.byteLength!=e.byteLength||n.byteOffset!=e.byteOffset)return!1;n=n.buffer,e=e.buffer;case"[object ArrayBuffer]":return!(n.byteLength!=e.byteLength||!d(new o(n),new o(e)));case"[object Boolean]":case"[object Date]":case"[object Number]":return a(+n,+e);case"[object Error]":return n.name==e.name&&n.message==e.message;case"[object RegExp]":case"[object String]":return n==e+"";case"[object Map]":var p=l;case"[object Set]":var m=1&r;if(p||(p=s),n.size!=e.size&&!m)return!1;var h=f.get(n);if(h)return h==e;r|=2,f.set(n,e);var g=i(p(n),p(e),r,c,d,f);return f.delete(n),g;case"[object Symbol]":if(u)return u.call(n)==u.call(e)}return!1}},933:(n,e,t)=>{var r=t(8234),o=Object.prototype.hasOwnProperty;n.exports=function(n,e,t,a,i,l){var s=1&t,c=r(n),u=c.length;if(u!=r(e).length&&!s)return!1;for(var d=u;d--;){var f=c[d];if(!(s?f in e:o.call(e,f)))return!1}var p=l.get(n),m=l.get(e);if(p&&m)return p==e&&m==n;var h=!0;l.set(n,e),l.set(e,n);for(var g=s;++d{var r="object"==typeof t.g&&t.g&&t.g.Object===Object&&t.g;n.exports=r},8234:(n,e,t)=>{var r=t(8866),o=t(9551),a=t(3674);n.exports=function(n){return r(n,a,o)}},5050:(n,e,t)=>{var r=t(7019);n.exports=function(n,e){var t=n.__data__;return r(e)?t["string"==typeof e?"string":"hash"]:t.map}},852:(n,e,t)=>{var r=t(8458),o=t(7801);n.exports=function(n,e){var t=o(n,e);return r(t)?t:void 0}},9607:(n,e,t)=>{var r=t(2705),o=Object.prototype,a=o.hasOwnProperty,i=o.toString,l=r?r.toStringTag:void 0;n.exports=function(n){var e=a.call(n,l),t=n[l];try{n[l]=void 0;var r=!0}catch(n){}var o=i.call(n);return r&&(e?n[l]=t:delete n[l]),o}},9551:(n,e,t)=>{var r=t(4963),o=t(479),a=Object.prototype.propertyIsEnumerable,i=Object.getOwnPropertySymbols,l=i?function(n){return null==n?[]:(n=Object(n),r(i(n),(function(e){return a.call(n,e)})))}:o;n.exports=l},4160:(n,e,t)=>{var r=t(8552),o=t(7071),a=t(3818),i=t(8525),l=t(577),s=t(4239),c=t(346),u="[object Map]",d="[object Promise]",f="[object Set]",p="[object WeakMap]",m="[object DataView]",h=c(r),g=c(o),b=c(a),v=c(i),y=c(l),x=s;(r&&x(new r(new ArrayBuffer(1)))!=m||o&&x(new o)!=u||a&&x(a.resolve())!=d||i&&x(new i)!=f||l&&x(new l)!=p)&&(x=function(n){var e=s(n),t="[object Object]"==e?n.constructor:void 0,r=t?c(t):"";if(r)switch(r){case h:return m;case g:return u;case b:return d;case v:return f;case y:return p}return e}),n.exports=x},7801:n=>{n.exports=function(n,e){return null==n?void 0:n[e]}},1789:(n,e,t)=>{var r=t(4536);n.exports=function(){this.__data__=r?r(null):{},this.size=0}},401:n=>{n.exports=function(n){var e=this.has(n)&&delete this.__data__[n];return this.size-=e?1:0,e}},7667:(n,e,t)=>{var r=t(4536),o=Object.prototype.hasOwnProperty;n.exports=function(n){var e=this.__data__;if(r){var t=e[n];return"__lodash_hash_undefined__"===t?void 0:t}return o.call(e,n)?e[n]:void 0}},1327:(n,e,t)=>{var r=t(4536),o=Object.prototype.hasOwnProperty;n.exports=function(n){var e=this.__data__;return r?void 0!==e[n]:o.call(e,n)}},1866:(n,e,t)=>{var r=t(4536);n.exports=function(n,e){var t=this.__data__;return this.size+=this.has(n)?0:1,t[n]=r&&void 0===e?"__lodash_hash_undefined__":e,this}},5776:n=>{var e=/^(?:0|[1-9]\d*)$/;n.exports=function(n,t){var r=typeof n;return!!(t=null==t?9007199254740991:t)&&("number"==r||"symbol"!=r&&e.test(n))&&n>-1&&n%1==0&&n{n.exports=function(n){var e=typeof n;return"string"==e||"number"==e||"symbol"==e||"boolean"==e?"__proto__"!==n:null===n}},7724:(n,e,t)=>{var r,o=t(4429),a=(r=/[^.]+$/.exec(o&&o.keys&&o.keys.IE_PROTO||""))?"Symbol(src)_1."+r:"";n.exports=function(n){return!!a&&a in n}},5726:n=>{var e=Object.prototype;n.exports=function(n){var t=n&&n.constructor;return n===("function"==typeof t&&t.prototype||e)}},7040:n=>{n.exports=function(){this.__data__=[],this.size=0}},4125:(n,e,t)=>{var r=t(8470),o=Array.prototype.splice;n.exports=function(n){var e=this.__data__,t=r(e,n);return!(t<0||(t==e.length-1?e.pop():o.call(e,t,1),--this.size,0))}},2117:(n,e,t)=>{var r=t(8470);n.exports=function(n){var e=this.__data__,t=r(e,n);return t<0?void 0:e[t][1]}},7518:(n,e,t)=>{var r=t(8470);n.exports=function(n){return r(this.__data__,n)>-1}},4705:(n,e,t)=>{var r=t(8470);n.exports=function(n,e){var t=this.__data__,o=r(t,n);return o<0?(++this.size,t.push([n,e])):t[o][1]=e,this}},4785:(n,e,t)=>{var r=t(1989),o=t(8407),a=t(7071);n.exports=function(){this.size=0,this.__data__={hash:new r,map:new(a||o),string:new r}}},1285:(n,e,t)=>{var r=t(5050);n.exports=function(n){var e=r(this,n).delete(n);return this.size-=e?1:0,e}},6e3:(n,e,t)=>{var r=t(5050);n.exports=function(n){return r(this,n).get(n)}},9916:(n,e,t)=>{var r=t(5050);n.exports=function(n){return r(this,n).has(n)}},5265:(n,e,t)=>{var r=t(5050);n.exports=function(n,e){var t=r(this,n),o=t.size;return t.set(n,e),this.size+=t.size==o?0:1,this}},8776:n=>{n.exports=function(n){var e=-1,t=Array(n.size);return n.forEach((function(n,r){t[++e]=[r,n]})),t}},4536:(n,e,t)=>{var r=t(852)(Object,"create");n.exports=r},6916:(n,e,t)=>{var r=t(5569)(Object.keys,Object);n.exports=r},1167:(n,e,t)=>{n=t.nmd(n);var r=t(1957),o=e&&!e.nodeType&&e,a=o&&n&&!n.nodeType&&n,i=a&&a.exports===o&&r.process,l=function(){try{return a&&a.require&&a.require("util").types||i&&i.binding&&i.binding("util")}catch(n){}}();n.exports=l},2333:n=>{var e=Object.prototype.toString;n.exports=function(n){return e.call(n)}},5569:n=>{n.exports=function(n,e){return function(t){return n(e(t))}}},5639:(n,e,t)=>{var r=t(1957),o="object"==typeof self&&self&&self.Object===Object&&self,a=r||o||Function("return this")();n.exports=a},619:n=>{n.exports=function(n){return this.__data__.set(n,"__lodash_hash_undefined__"),this}},2385:n=>{n.exports=function(n){return this.__data__.has(n)}},1814:n=>{n.exports=function(n){var e=-1,t=Array(n.size);return n.forEach((function(n){t[++e]=n})),t}},7465:(n,e,t)=>{var r=t(8407);n.exports=function(){this.__data__=new r,this.size=0}},3779:n=>{n.exports=function(n){var e=this.__data__,t=e.delete(n);return this.size=e.size,t}},7599:n=>{n.exports=function(n){return this.__data__.get(n)}},4758:n=>{n.exports=function(n){return this.__data__.has(n)}},4309:(n,e,t)=>{var r=t(8407),o=t(7071),a=t(3369);n.exports=function(n,e){var t=this.__data__;if(t instanceof r){var i=t.__data__;if(!o||i.length<199)return i.push([n,e]),this.size=++t.size,this;t=this.__data__=new a(i)}return t.set(n,e),this.size=t.size,this}},346:n=>{var e=Function.prototype.toString;n.exports=function(n){if(null!=n){try{return e.call(n)}catch(n){}try{return n+""}catch(n){}}return""}},7990:n=>{var e=/\s/;n.exports=function(n){for(var t=n.length;t--&&e.test(n.charAt(t)););return t}},3279:(n,e,t)=>{var r=t(3218),o=t(7771),a=t(4841),i=Math.max,l=Math.min;n.exports=function(n,e,t){var s,c,u,d,f,p,m=0,h=!1,g=!1,b=!0;if("function"!=typeof n)throw new TypeError("Expected a function");function v(e){var t=s,r=c;return s=c=void 0,m=e,d=n.apply(r,t)}function y(n){return m=n,f=setTimeout(w,e),h?v(n):d}function x(n){var t=n-p;return void 0===p||t>=e||t<0||g&&n-m>=u}function w(){var n=o();if(x(n))return k(n);f=setTimeout(w,function(n){var t=e-(n-p);return g?l(t,u-(n-m)):t}(n))}function k(n){return f=void 0,b&&s?v(n):(s=c=void 0,d)}function E(){var n=o(),t=x(n);if(s=arguments,c=this,p=n,t){if(void 0===f)return y(p);if(g)return clearTimeout(f),f=setTimeout(w,e),v(p)}return void 0===f&&(f=setTimeout(w,e)),d}return e=a(e)||0,r(t)&&(h=!!t.leading,u=(g="maxWait"in t)?i(a(t.maxWait)||0,e):u,b="trailing"in t?!!t.trailing:b),E.cancel=function(){void 0!==f&&clearTimeout(f),m=0,s=p=c=f=void 0},E.flush=function(){return void 0===f?d:k(o())},E}},7813:n=>{n.exports=function(n,e){return n===e||n!=n&&e!=e}},5694:(n,e,t)=>{var r=t(9454),o=t(7005),a=Object.prototype,i=a.hasOwnProperty,l=a.propertyIsEnumerable,s=r(function(){return arguments}())?r:function(n){return o(n)&&i.call(n,"callee")&&!l.call(n,"callee")};n.exports=s},1469:n=>{var e=Array.isArray;n.exports=e},8612:(n,e,t)=>{var r=t(3560),o=t(1780);n.exports=function(n){return null!=n&&o(n.length)&&!r(n)}},4144:(n,e,t)=>{n=t.nmd(n);var r=t(5639),o=t(5062),a=e&&!e.nodeType&&e,i=a&&n&&!n.nodeType&&n,l=i&&i.exports===a?r.Buffer:void 0,s=(l?l.isBuffer:void 0)||o;n.exports=s},8446:(n,e,t)=>{var r=t(939);n.exports=function(n,e){return r(n,e)}},3560:(n,e,t)=>{var r=t(4239),o=t(3218);n.exports=function(n){if(!o(n))return!1;var e=r(n);return"[object Function]"==e||"[object GeneratorFunction]"==e||"[object AsyncFunction]"==e||"[object Proxy]"==e}},1780:n=>{n.exports=function(n){return"number"==typeof n&&n>-1&&n%1==0&&n<=9007199254740991}},3218:n=>{n.exports=function(n){var e=typeof n;return null!=n&&("object"==e||"function"==e)}},7005:n=>{n.exports=function(n){return null!=n&&"object"==typeof n}},3448:(n,e,t)=>{var r=t(4239),o=t(7005);n.exports=function(n){return"symbol"==typeof n||o(n)&&"[object Symbol]"==r(n)}},6719:(n,e,t)=>{var r=t(8749),o=t(1717),a=t(1167),i=a&&a.isTypedArray,l=i?o(i):r;n.exports=l},3674:(n,e,t)=>{var r=t(4636),o=t(280),a=t(8612);n.exports=function(n){return a(n)?r(n):o(n)}},7771:(n,e,t)=>{var r=t(5639);n.exports=function(){return r.Date.now()}},479:n=>{n.exports=function(){return[]}},5062:n=>{n.exports=function(){return!1}},4841:(n,e,t)=>{var r=t(7561),o=t(3218),a=t(3448),i=/^[-+]0x[0-9a-f]+$/i,l=/^0b[01]+$/i,s=/^0o[0-7]+$/i,c=parseInt;n.exports=function(n){if("number"==typeof n)return n;if(a(n))return NaN;if(o(n)){var e="function"==typeof n.valueOf?n.valueOf():n;n=o(e)?e+"":e}if("string"!=typeof n)return 0===n?n:+n;n=r(n);var t=l.test(n);return t||s.test(n)?c(n.slice(2),t?2:8):i.test(n)?NaN:+n}},7418:n=>{"use strict";var e=Object.getOwnPropertySymbols,t=Object.prototype.hasOwnProperty,r=Object.prototype.propertyIsEnumerable;function o(n){if(null==n)throw new TypeError("Object.assign cannot be called with null or undefined");return Object(n)}n.exports=function(){try{if(!Object.assign)return!1;var n=new String("abc");if(n[5]="de","5"===Object.getOwnPropertyNames(n)[0])return!1;for(var e={},t=0;t<10;t++)e["_"+String.fromCharCode(t)]=t;if("0123456789"!==Object.getOwnPropertyNames(e).map((function(n){return e[n]})).join(""))return!1;var r={};return"abcdefghijklmnopqrst".split("").forEach((function(n){r[n]=n})),"abcdefghijklmnopqrst"===Object.keys(Object.assign({},r)).join("")}catch(n){return!1}}()?Object.assign:function(n,a){for(var i,l,s=o(n),c=1;c{t(7147),n.exports=self.fetch.bind(self)},2703:(n,e,t)=>{"use strict";var r=t(414);function o(){}function a(){}a.resetWarningCache=o,n.exports=function(){function n(n,e,t,o,a,i){if(i!==r){var l=new Error("Calling PropTypes validators directly is not supported by the `prop-types` package. Use PropTypes.checkPropTypes() to call them. Read more at http://fb.me/use-check-prop-types");throw l.name="Invariant Violation",l}}function e(){return n}n.isRequired=n;var t={array:n,bool:n,func:n,number:n,object:n,string:n,symbol:n,any:n,arrayOf:e,element:n,elementType:n,instanceOf:e,node:n,objectOf:e,oneOf:e,oneOfType:e,shape:e,exact:e,checkPropTypes:a,resetWarningCache:o};return t.PropTypes=t,t}},5697:(n,e,t)=>{n.exports=t(2703)()},414:n=>{"use strict";n.exports="SECRET_DO_NOT_PASS_THIS_OR_YOU_WILL_BE_FIRED"},4971:function(n,e,t){var r;n=t.nmd(n),function(o){e&&e.nodeType,n&&n.nodeType;var a="object"==typeof t.g&&t.g;a.global!==a&&a.window!==a&&a.self;var i,l=2147483647,s=36,c=/^xn--/,u=/[^\x20-\x7E]/,d=/[\x2E\u3002\uFF0E\uFF61]/g,f={overflow:"Overflow: input needs wider integers to process","not-basic":"Illegal input >= 0x80 (not a basic code point)","invalid-input":"Invalid input"},p=Math.floor,m=String.fromCharCode;function h(n){throw RangeError(f[n])}function g(n,e){for(var t=n.length,r=[];t--;)r[t]=e(n[t]);return r}function b(n,e){var t=n.split("@"),r="";return t.length>1&&(r=t[0]+"@",n=t[1]),r+g((n=n.replace(d,".")).split("."),e).join(".")}function v(n){for(var e,t,r=[],o=0,a=n.length;o=55296&&e<=56319&&o65535&&(e+=m((n-=65536)>>>10&1023|55296),n=56320|1023&n),e+m(n)})).join("")}function x(n,e){return n+22+75*(n<26)-((0!=e)<<5)}function w(n,e,t){var r=0;for(n=t?p(n/700):n>>1,n+=p(n/e);n>455;r+=s)n=p(n/35);return p(r+36*n/(n+38))}function k(n){var e,t,r,o,a,i,c,u,d,f,m,g=[],b=n.length,v=0,x=128,k=72;for((t=n.lastIndexOf("-"))<0&&(t=0),r=0;r=128&&h("not-basic"),g.push(n.charCodeAt(r));for(o=t>0?t+1:0;o=b&&h("invalid-input"),((u=(m=n.charCodeAt(o++))-48<10?m-22:m-65<26?m-65:m-97<26?m-97:s)>=s||u>p((l-v)/i))&&h("overflow"),v+=u*i,!(u<(d=c<=k?1:c>=k+26?26:c-k));c+=s)i>p(l/(f=s-d))&&h("overflow"),i*=f;k=w(v-a,e=g.length+1,0==a),p(v/e)>l-x&&h("overflow"),x+=p(v/e),v%=e,g.splice(v++,0,x)}return y(g)}function E(n){var e,t,r,o,a,i,c,u,d,f,g,b,y,k,E,C=[];for(b=(n=v(n)).length,e=128,t=0,a=72,i=0;i=e&&gp((l-t)/(y=r+1))&&h("overflow"),t+=(c-e)*y,e=c,i=0;il&&h("overflow"),g==e){for(u=t,d=s;!(u<(f=d<=a?1:d>=a+26?26:d-a));d+=s)E=u-f,k=s-f,C.push(m(x(f+E%k,0))),u=p(E/k);C.push(m(x(u,0))),a=w(t,y,r==o),t=0,++r}++t,++e}return C.join("")}i={version:"1.3.2",ucs2:{decode:v,encode:y},decode:k,encode:E,toASCII:function(n){return b(n,(function(n){return u.test(n)?"xn--"+E(n):n}))},toUnicode:function(n){return b(n,(function(n){return c.test(n)?k(n.slice(4).toLowerCase()):n}))}},void 0===(r=function(){return i}.call(e,t,e,n))||(n.exports=r)}()},2587:n=>{"use strict";function e(n,e){return Object.prototype.hasOwnProperty.call(n,e)}n.exports=function(n,t,r,o){t=t||"&",r=r||"=";var a={};if("string"!=typeof n||0===n.length)return a;var i=/\+/g;n=n.split(t);var l=1e3;o&&"number"==typeof o.maxKeys&&(l=o.maxKeys);var s=n.length;l>0&&s>l&&(s=l);for(var c=0;c=0?(u=m.substr(0,h),d=m.substr(h+1)):(u=m,d=""),f=decodeURIComponent(u),p=decodeURIComponent(d),e(a,f)?Array.isArray(a[f])?a[f].push(p):a[f]=[a[f],p]:a[f]=p}return a}},2361:n=>{"use strict";var e=function(n){switch(typeof n){case"string":return n;case"boolean":return n?"true":"false";case"number":return isFinite(n)?n:"";default:return""}};n.exports=function(n,t,r,o){return t=t||"&",r=r||"=",null===n&&(n=void 0),"object"==typeof n?Object.keys(n).map((function(o){var a=encodeURIComponent(e(o))+r;return Array.isArray(n[o])?n[o].map((function(n){return a+encodeURIComponent(e(n))})).join(t):a+encodeURIComponent(e(n[o]))})).join(t):o?encodeURIComponent(e(o))+r+encodeURIComponent(e(n)):""}},7673:(n,e,t)=>{"use strict";e.decode=e.parse=t(2587),e.encode=e.stringify=t(2361)},4448:(n,e,t)=>{"use strict";var r=t(7294),o=t(7418),a=t(3840);function i(n){for(var e="https://reactjs.org/docs/error-decoder.html?invariant="+n,t=1;t