From e00a2d0fc1a9f39f0f18a8082a32db8f9a2baf7c Mon Sep 17 00:00:00 2001 From: luca <681992+lukka@users.noreply.github.com> Date: Tue, 18 Jul 2023 18:56:41 -0700 Subject: [PATCH] cmake v3.27.0 --- .latest_cmake_version | 2 +- .latestrc_cmake_version | 2 +- dist/index.js | 52664 +++++++++++++++++++------------------- src/releases-catalog.ts | 2 +- 4 files changed, 26381 insertions(+), 26289 deletions(-) diff --git a/.latest_cmake_version b/.latest_cmake_version index b40dc19..d558367 100644 --- a/.latest_cmake_version +++ b/.latest_cmake_version @@ -1 +1 @@ -3.26.4 \ No newline at end of file +3.27.0 \ No newline at end of file diff --git a/.latestrc_cmake_version b/.latestrc_cmake_version index 0e56262..226737d 100644 --- a/.latestrc_cmake_version +++ b/.latestrc_cmake_version @@ -1 +1 @@ -3.27.0-rc2 \ No newline at end of file +3.27.0-rc5 \ No newline at end of file diff --git a/dist/index.js b/dist/index.js index 790cdb6..f17ecc0 100644 --- a/dist/index.js +++ b/dist/index.js @@ -315,7 +315,7 @@ exports.main = main; Object.defineProperty(exports, "__esModule", ({ value: true })); exports.ninjaCatalog = exports.cmakeCatalog = void 0; -exports.cmakeCatalog = { "3.27.0-rc2": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0-rc2/cmake-3.27.0-rc2-linux-aarch64.tar.gz", "fileName": "cmake-3.27.0-rc2-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0-rc2/cmake-3.27.0-rc2-linux-x86_64.tar.gz", "fileName": "cmake-3.27.0-rc2-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0-rc2/cmake-3.27.0-rc2-macos-universal.tar.gz", "fileName": "cmake-3.27.0-rc2-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0-rc2/cmake-3.27.0-rc2-windows-x86_64.zip", "fileName": "cmake-3.27.0-rc2-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "latestrc": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0-rc2/cmake-3.27.0-rc2-linux-aarch64.tar.gz", "fileName": "cmake-3.27.0-rc2-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0-rc2/cmake-3.27.0-rc2-linux-x86_64.tar.gz", "fileName": "cmake-3.27.0-rc2-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0-rc2/cmake-3.27.0-rc2-macos-universal.tar.gz", "fileName": "cmake-3.27.0-rc2-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0-rc2/cmake-3.27.0-rc2-windows-x86_64.zip", "fileName": "cmake-3.27.0-rc2-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.27.0-rc1": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0-rc1/cmake-3.27.0-rc1-linux-aarch64.tar.gz", "fileName": "cmake-3.27.0-rc1-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0-rc1/cmake-3.27.0-rc1-linux-x86_64.tar.gz", "fileName": "cmake-3.27.0-rc1-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0-rc1/cmake-3.27.0-rc1-macos-universal.tar.gz", "fileName": "cmake-3.27.0-rc1-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0-rc1/cmake-3.27.0-rc1-windows-x86_64.zip", "fileName": "cmake-3.27.0-rc1-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.26.4": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.4/cmake-3.26.4-linux-aarch64.tar.gz", "fileName": "cmake-3.26.4-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.4/cmake-3.26.4-linux-x86_64.tar.gz", "fileName": "cmake-3.26.4-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.4/cmake-3.26.4-macos-universal.tar.gz", "fileName": "cmake-3.26.4-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.4/cmake-3.26.4-windows-x86_64.zip", "fileName": "cmake-3.26.4-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "latest": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.4/cmake-3.26.4-linux-aarch64.tar.gz", "fileName": "cmake-3.26.4-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.4/cmake-3.26.4-linux-x86_64.tar.gz", "fileName": "cmake-3.26.4-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.4/cmake-3.26.4-macos-universal.tar.gz", "fileName": "cmake-3.26.4-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.4/cmake-3.26.4-windows-x86_64.zip", "fileName": "cmake-3.26.4-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.26.3": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.3/cmake-3.26.3-linux-aarch64.tar.gz", "fileName": "cmake-3.26.3-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.3/cmake-3.26.3-linux-x86_64.tar.gz", "fileName": "cmake-3.26.3-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.3/cmake-3.26.3-macos-universal.tar.gz", "fileName": "cmake-3.26.3-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.3/cmake-3.26.3-windows-x86_64.zip", "fileName": "cmake-3.26.3-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.26.2": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.2/cmake-3.26.2-linux-aarch64.tar.gz", "fileName": "cmake-3.26.2-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.2/cmake-3.26.2-linux-x86_64.tar.gz", "fileName": "cmake-3.26.2-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.2/cmake-3.26.2-macos-universal.tar.gz", "fileName": "cmake-3.26.2-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.2/cmake-3.26.2-windows-x86_64.zip", "fileName": "cmake-3.26.2-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.26.1": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.1/cmake-3.26.1-linux-aarch64.tar.gz", "fileName": "cmake-3.26.1-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.1/cmake-3.26.1-linux-x86_64.tar.gz", "fileName": "cmake-3.26.1-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.1/cmake-3.26.1-macos-universal.tar.gz", "fileName": "cmake-3.26.1-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.1/cmake-3.26.1-windows-x86_64.zip", "fileName": "cmake-3.26.1-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.26.0": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0/cmake-3.26.0-linux-aarch64.tar.gz", "fileName": "cmake-3.26.0-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0/cmake-3.26.0-linux-x86_64.tar.gz", "fileName": "cmake-3.26.0-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0/cmake-3.26.0-macos-universal.tar.gz", "fileName": "cmake-3.26.0-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0/cmake-3.26.0-windows-x86_64.zip", "fileName": "cmake-3.26.0-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.26.0-rc6": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc6/cmake-3.26.0-rc6-linux-aarch64.tar.gz", "fileName": "cmake-3.26.0-rc6-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc6/cmake-3.26.0-rc6-linux-x86_64.tar.gz", "fileName": "cmake-3.26.0-rc6-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc6/cmake-3.26.0-rc6-macos-universal.tar.gz", "fileName": "cmake-3.26.0-rc6-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc6/cmake-3.26.0-rc6-windows-x86_64.zip", "fileName": "cmake-3.26.0-rc6-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.25.3": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.3/cmake-3.25.3-linux-aarch64.tar.gz", "fileName": "cmake-3.25.3-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.3/cmake-3.25.3-linux-x86_64.tar.gz", "fileName": "cmake-3.25.3-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.3/cmake-3.25.3-macos-universal.tar.gz", "fileName": "cmake-3.25.3-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.3/cmake-3.25.3-windows-x86_64.zip", "fileName": "cmake-3.25.3-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.24.4": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.4/cmake-3.24.4-linux-aarch64.tar.gz", "fileName": "cmake-3.24.4-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.4/cmake-3.24.4-linux-x86_64.tar.gz", "fileName": "cmake-3.24.4-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.4/cmake-3.24.4-macos-universal.tar.gz", "fileName": "cmake-3.24.4-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.4/cmake-3.24.4-windows-x86_64.zip", "fileName": "cmake-3.24.4-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.26.0-rc5": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc5/cmake-3.26.0-rc5-linux-aarch64.tar.gz", "fileName": "cmake-3.26.0-rc5-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc5/cmake-3.26.0-rc5-linux-x86_64.tar.gz", "fileName": "cmake-3.26.0-rc5-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc5/cmake-3.26.0-rc5-macos-universal.tar.gz", "fileName": "cmake-3.26.0-rc5-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc5/cmake-3.26.0-rc5-windows-x86_64.zip", "fileName": "cmake-3.26.0-rc5-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.26.0-rc4": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc4/cmake-3.26.0-rc4-linux-aarch64.tar.gz", "fileName": "cmake-3.26.0-rc4-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc4/cmake-3.26.0-rc4-linux-x86_64.tar.gz", "fileName": "cmake-3.26.0-rc4-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc4/cmake-3.26.0-rc4-macos-universal.tar.gz", "fileName": "cmake-3.26.0-rc4-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc4/cmake-3.26.0-rc4-windows-x86_64.zip", "fileName": "cmake-3.26.0-rc4-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.26.0-rc3": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc3/cmake-3.26.0-rc3-linux-aarch64.tar.gz", "fileName": "cmake-3.26.0-rc3-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc3/cmake-3.26.0-rc3-linux-x86_64.tar.gz", "fileName": "cmake-3.26.0-rc3-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc3/cmake-3.26.0-rc3-macos-universal.tar.gz", "fileName": "cmake-3.26.0-rc3-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc3/cmake-3.26.0-rc3-windows-x86_64.zip", "fileName": "cmake-3.26.0-rc3-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.26.0-rc2": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc2/cmake-3.26.0-rc2-linux-aarch64.tar.gz", "fileName": "cmake-3.26.0-rc2-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc2/cmake-3.26.0-rc2-linux-x86_64.tar.gz", "fileName": "cmake-3.26.0-rc2-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc2/cmake-3.26.0-rc2-macos-universal.tar.gz", "fileName": "cmake-3.26.0-rc2-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc2/cmake-3.26.0-rc2-windows-x86_64.zip", "fileName": "cmake-3.26.0-rc2-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.26.0-rc1": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc1/cmake-3.26.0-rc1-linux-aarch64.tar.gz", "fileName": "cmake-3.26.0-rc1-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc1/cmake-3.26.0-rc1-linux-x86_64.tar.gz", "fileName": "cmake-3.26.0-rc1-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc1/cmake-3.26.0-rc1-macos-universal.tar.gz", "fileName": "cmake-3.26.0-rc1-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc1/cmake-3.26.0-rc1-windows-x86_64.zip", "fileName": "cmake-3.26.0-rc1-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.25.2": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.2/cmake-3.25.2-linux-aarch64.tar.gz", "fileName": "cmake-3.25.2-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.2/cmake-3.25.2-linux-x86_64.tar.gz", "fileName": "cmake-3.25.2-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.2/cmake-3.25.2-macos-universal.tar.gz", "fileName": "cmake-3.25.2-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.2/cmake-3.25.2-windows-x86_64.zip", "fileName": "cmake-3.25.2-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.25.1": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.1/cmake-3.25.1-linux-aarch64.tar.gz", "fileName": "cmake-3.25.1-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.1/cmake-3.25.1-linux-x86_64.tar.gz", "fileName": "cmake-3.25.1-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.1/cmake-3.25.1-macos-universal.tar.gz", "fileName": "cmake-3.25.1-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.1/cmake-3.25.1-windows-x86_64.zip", "fileName": "cmake-3.25.1-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.25.0": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0/cmake-3.25.0-linux-aarch64.tar.gz", "fileName": "cmake-3.25.0-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0/cmake-3.25.0-linux-x86_64.tar.gz", "fileName": "cmake-3.25.0-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0/cmake-3.25.0-macos-universal.tar.gz", "fileName": "cmake-3.25.0-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0/cmake-3.25.0-windows-x86_64.zip", "fileName": "cmake-3.25.0-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.25.0-rc4": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0-rc4/cmake-3.25.0-rc4-linux-aarch64.tar.gz", "fileName": "cmake-3.25.0-rc4-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0-rc4/cmake-3.25.0-rc4-linux-x86_64.tar.gz", "fileName": "cmake-3.25.0-rc4-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0-rc4/cmake-3.25.0-rc4-macos-universal.tar.gz", "fileName": "cmake-3.25.0-rc4-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0-rc4/cmake-3.25.0-rc4-windows-x86_64.zip", "fileName": "cmake-3.25.0-rc4-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.25.0-rc3": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0-rc3/cmake-3.25.0-rc3-linux-aarch64.tar.gz", "fileName": "cmake-3.25.0-rc3-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0-rc3/cmake-3.25.0-rc3-linux-x86_64.tar.gz", "fileName": "cmake-3.25.0-rc3-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0-rc3/cmake-3.25.0-rc3-macos-universal.tar.gz", "fileName": "cmake-3.25.0-rc3-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0-rc3/cmake-3.25.0-rc3-windows-x86_64.zip", "fileName": "cmake-3.25.0-rc3-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.24.3": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.3/cmake-3.24.3-linux-aarch64.tar.gz", "fileName": "cmake-3.24.3-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.3/cmake-3.24.3-linux-x86_64.tar.gz", "fileName": "cmake-3.24.3-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.3/cmake-3.24.3-macos-universal.tar.gz", "fileName": "cmake-3.24.3-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.3/cmake-3.24.3-windows-x86_64.zip", "fileName": "cmake-3.24.3-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.23.5": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.5/cmake-3.23.5-linux-aarch64.tar.gz", "fileName": "cmake-3.23.5-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.5/cmake-3.23.5-linux-x86_64.tar.gz", "fileName": "cmake-3.23.5-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.5/cmake-3.23.5-macos-universal.tar.gz", "fileName": "cmake-3.23.5-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.5/cmake-3.23.5-windows-x86_64.zip", "fileName": "cmake-3.23.5-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.25.0-rc2": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0-rc2/cmake-3.25.0-rc2-linux-aarch64.tar.gz", "fileName": "cmake-3.25.0-rc2-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0-rc2/cmake-3.25.0-rc2-linux-x86_64.tar.gz", "fileName": "cmake-3.25.0-rc2-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0-rc2/cmake-3.25.0-rc2-macos-universal.tar.gz", "fileName": "cmake-3.25.0-rc2-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0-rc2/cmake-3.25.0-rc2-windows-x86_64.zip", "fileName": "cmake-3.25.0-rc2-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.25.0-rc1": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0-rc1/cmake-3.25.0-rc1-linux-aarch64.tar.gz", "fileName": "cmake-3.25.0-rc1-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0-rc1/cmake-3.25.0-rc1-linux-x86_64.tar.gz", "fileName": "cmake-3.25.0-rc1-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0-rc1/cmake-3.25.0-rc1-macos-universal.tar.gz", "fileName": "cmake-3.25.0-rc1-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0-rc1/cmake-3.25.0-rc1-windows-x86_64.zip", "fileName": "cmake-3.25.0-rc1-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.23.4": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.4/cmake-3.23.4-linux-aarch64.tar.gz", "fileName": "cmake-3.23.4-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.4/cmake-3.23.4-linux-x86_64.tar.gz", "fileName": "cmake-3.23.4-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.4/cmake-3.23.4-macos-universal.tar.gz", "fileName": "cmake-3.23.4-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.4/cmake-3.23.4-windows-x86_64.zip", "fileName": "cmake-3.23.4-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.24.2": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-linux-aarch64.tar.gz", "fileName": "cmake-3.24.2-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-linux-x86_64.tar.gz", "fileName": "cmake-3.24.2-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-macos-universal.tar.gz", "fileName": "cmake-3.24.2-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-windows-x86_64.zip", "fileName": "cmake-3.24.2-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.24.1": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.1/cmake-3.24.1-linux-aarch64.tar.gz", "fileName": "cmake-3.24.1-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.1/cmake-3.24.1-linux-x86_64.tar.gz", "fileName": "cmake-3.24.1-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.1/cmake-3.24.1-macos-universal.tar.gz", "fileName": "cmake-3.24.1-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.1/cmake-3.24.1-windows-x86_64.zip", "fileName": "cmake-3.24.1-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.24.0": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0/cmake-3.24.0-linux-aarch64.tar.gz", "fileName": "cmake-3.24.0-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0/cmake-3.24.0-linux-x86_64.tar.gz", "fileName": "cmake-3.24.0-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0/cmake-3.24.0-macos-universal.tar.gz", "fileName": "cmake-3.24.0-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0/cmake-3.24.0-windows-x86_64.zip", "fileName": "cmake-3.24.0-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.24.0-rc5": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc5/cmake-3.24.0-rc5-linux-aarch64.tar.gz", "fileName": "cmake-3.24.0-rc5-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc5/cmake-3.24.0-rc5-linux-x86_64.tar.gz", "fileName": "cmake-3.24.0-rc5-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc5/cmake-3.24.0-rc5-macos-universal.tar.gz", "fileName": "cmake-3.24.0-rc5-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc5/cmake-3.24.0-rc5-windows-x86_64.zip", "fileName": "cmake-3.24.0-rc5-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.23.3": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.3/cmake-3.23.3-linux-aarch64.tar.gz", "fileName": "cmake-3.23.3-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.3/cmake-3.23.3-linux-x86_64.tar.gz", "fileName": "cmake-3.23.3-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.3/cmake-3.23.3-macos-universal.tar.gz", "fileName": "cmake-3.23.3-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.3/cmake-3.23.3-windows-x86_64.zip", "fileName": "cmake-3.23.3-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.22.6": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.6/cmake-3.22.6-linux-aarch64.tar.gz", "fileName": "cmake-3.22.6-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.6/cmake-3.22.6-linux-x86_64.tar.gz", "fileName": "cmake-3.22.6-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.6/cmake-3.22.6-macos-universal.tar.gz", "fileName": "cmake-3.22.6-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.6/cmake-3.22.6-windows-x86_64.zip", "fileName": "cmake-3.22.6-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.24.0-rc4": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc4/cmake-3.24.0-rc4-linux-aarch64.tar.gz", "fileName": "cmake-3.24.0-rc4-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc4/cmake-3.24.0-rc4-linux-x86_64.tar.gz", "fileName": "cmake-3.24.0-rc4-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc4/cmake-3.24.0-rc4-macos-universal.tar.gz", "fileName": "cmake-3.24.0-rc4-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc4/cmake-3.24.0-rc4-windows-x86_64.zip", "fileName": "cmake-3.24.0-rc4-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.24.0-rc3": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc3/cmake-3.24.0-rc3-linux-aarch64.tar.gz", "fileName": "cmake-3.24.0-rc3-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc3/cmake-3.24.0-rc3-linux-x86_64.tar.gz", "fileName": "cmake-3.24.0-rc3-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc3/cmake-3.24.0-rc3-macos-universal.tar.gz", "fileName": "cmake-3.24.0-rc3-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc3/cmake-3.24.0-rc3-windows-x86_64.zip", "fileName": "cmake-3.24.0-rc3-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.24.0-rc2": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc2/cmake-3.24.0-rc2-linux-aarch64.tar.gz", "fileName": "cmake-3.24.0-rc2-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc2/cmake-3.24.0-rc2-linux-x86_64.tar.gz", "fileName": "cmake-3.24.0-rc2-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc2/cmake-3.24.0-rc2-macos-universal.tar.gz", "fileName": "cmake-3.24.0-rc2-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc2/cmake-3.24.0-rc2-windows-x86_64.zip", "fileName": "cmake-3.24.0-rc2-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.24.0-rc1": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc1/cmake-3.24.0-rc1-linux-aarch64.tar.gz", "fileName": "cmake-3.24.0-rc1-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc1/cmake-3.24.0-rc1-linux-x86_64.tar.gz", "fileName": "cmake-3.24.0-rc1-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc1/cmake-3.24.0-rc1-macos-universal.tar.gz", "fileName": "cmake-3.24.0-rc1-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc1/cmake-3.24.0-rc1-windows-x86_64.zip", "fileName": "cmake-3.24.0-rc1-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.22.5": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.5/cmake-3.22.5-linux-aarch64.tar.gz", "fileName": "cmake-3.22.5-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.5/cmake-3.22.5-linux-x86_64.tar.gz", "fileName": "cmake-3.22.5-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.5/cmake-3.22.5-macos-universal.tar.gz", "fileName": "cmake-3.22.5-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.5/cmake-3.22.5-windows-x86_64.zip", "fileName": "cmake-3.22.5-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.21.7": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.7/cmake-3.21.7-linux-aarch64.tar.gz", "fileName": "cmake-3.21.7-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.7/cmake-3.21.7-linux-x86_64.tar.gz", "fileName": "cmake-3.21.7-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.7/cmake-3.21.7-macos-universal.tar.gz", "fileName": "cmake-3.21.7-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.7/cmake-3.21.7-windows-x86_64.zip", "fileName": "cmake-3.21.7-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.23.2": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.2/cmake-3.23.2-linux-aarch64.tar.gz", "fileName": "cmake-3.23.2-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.2/cmake-3.23.2-linux-x86_64.tar.gz", "fileName": "cmake-3.23.2-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.2/cmake-3.23.2-macos-universal.tar.gz", "fileName": "cmake-3.23.2-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.2/cmake-3.23.2-windows-x86_64.zip", "fileName": "cmake-3.23.2-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.23.1": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.1/cmake-3.23.1-linux-aarch64.tar.gz", "fileName": "cmake-3.23.1-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.1/cmake-3.23.1-linux-x86_64.tar.gz", "fileName": "cmake-3.23.1-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.1/cmake-3.23.1-macos-universal.tar.gz", "fileName": "cmake-3.23.1-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.1/cmake-3.23.1-windows-x86_64.zip", "fileName": "cmake-3.23.1-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.22.4": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.4/cmake-3.22.4-linux-aarch64.tar.gz", "fileName": "cmake-3.22.4-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.4/cmake-3.22.4-linux-x86_64.tar.gz", "fileName": "cmake-3.22.4-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.4/cmake-3.22.4-macos-universal.tar.gz", "fileName": "cmake-3.22.4-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.4/cmake-3.22.4-windows-x86_64.zip", "fileName": "cmake-3.22.4-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.23.0": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0/cmake-3.23.0-linux-aarch64.tar.gz", "fileName": "cmake-3.23.0-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0/cmake-3.23.0-linux-x86_64.tar.gz", "fileName": "cmake-3.23.0-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0/cmake-3.23.0-macos-universal.tar.gz", "fileName": "cmake-3.23.0-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0/cmake-3.23.0-windows-x86_64.zip", "fileName": "cmake-3.23.0-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.23.0-rc5": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc5/cmake-3.23.0-rc5-linux-aarch64.tar.gz", "fileName": "cmake-3.23.0-rc5-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc5/cmake-3.23.0-rc5-linux-x86_64.tar.gz", "fileName": "cmake-3.23.0-rc5-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc5/cmake-3.23.0-rc5-macos-universal.tar.gz", "fileName": "cmake-3.23.0-rc5-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc5/cmake-3.23.0-rc5-windows-x86_64.zip", "fileName": "cmake-3.23.0-rc5-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.23.0-rc4": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc4/cmake-3.23.0-rc4-linux-aarch64.tar.gz", "fileName": "cmake-3.23.0-rc4-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc4/cmake-3.23.0-rc4-linux-x86_64.tar.gz", "fileName": "cmake-3.23.0-rc4-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc4/cmake-3.23.0-rc4-macos-universal.tar.gz", "fileName": "cmake-3.23.0-rc4-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc4/cmake-3.23.0-rc4-windows-x86_64.zip", "fileName": "cmake-3.23.0-rc4-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.23.0-rc3": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc3/cmake-3.23.0-rc3-linux-aarch64.tar.gz", "fileName": "cmake-3.23.0-rc3-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc3/cmake-3.23.0-rc3-linux-x86_64.tar.gz", "fileName": "cmake-3.23.0-rc3-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc3/cmake-3.23.0-rc3-macos-universal.tar.gz", "fileName": "cmake-3.23.0-rc3-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc3/cmake-3.23.0-rc3-windows-x86_64.zip", "fileName": "cmake-3.23.0-rc3-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.22.3": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.3/cmake-3.22.3-linux-aarch64.tar.gz", "fileName": "cmake-3.22.3-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.3/cmake-3.22.3-linux-x86_64.tar.gz", "fileName": "cmake-3.22.3-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.3/cmake-3.22.3-macos-universal.tar.gz", "fileName": "cmake-3.22.3-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.3/cmake-3.22.3-windows-x86_64.zip", "fileName": "cmake-3.22.3-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.21.6": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.6/cmake-3.21.6-linux-aarch64.tar.gz", "fileName": "cmake-3.21.6-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.6/cmake-3.21.6-linux-x86_64.tar.gz", "fileName": "cmake-3.21.6-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.6/cmake-3.21.6-macos-universal.tar.gz", "fileName": "cmake-3.21.6-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.6/cmake-3.21.6-windows-x86_64.zip", "fileName": "cmake-3.21.6-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.23.0-rc2": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc2/cmake-3.23.0-rc2-linux-aarch64.tar.gz", "fileName": "cmake-3.23.0-rc2-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc2/cmake-3.23.0-rc2-linux-x86_64.tar.gz", "fileName": "cmake-3.23.0-rc2-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc2/cmake-3.23.0-rc2-macos-universal.tar.gz", "fileName": "cmake-3.23.0-rc2-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc2/cmake-3.23.0-rc2-windows-x86_64.zip", "fileName": "cmake-3.23.0-rc2-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.23.0-rc1": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc1/cmake-3.23.0-rc1-linux-aarch64.tar.gz", "fileName": "cmake-3.23.0-rc1-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc1/cmake-3.23.0-rc1-linux-x86_64.tar.gz", "fileName": "cmake-3.23.0-rc1-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc1/cmake-3.23.0-rc1-macos-universal.tar.gz", "fileName": "cmake-3.23.0-rc1-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc1/cmake-3.23.0-rc1-windows-x86_64.zip", "fileName": "cmake-3.23.0-rc1-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.21.5": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.5/cmake-3.21.5-linux-aarch64.tar.gz", "fileName": "cmake-3.21.5-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.5/cmake-3.21.5-linux-x86_64.tar.gz", "fileName": "cmake-3.21.5-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.5/cmake-3.21.5-macos-universal.tar.gz", "fileName": "cmake-3.21.5-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.5/cmake-3.21.5-windows-x86_64.zip", "fileName": "cmake-3.21.5-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.22.2": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.2/cmake-3.22.2-linux-aarch64.tar.gz", "fileName": "cmake-3.22.2-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.2/cmake-3.22.2-linux-x86_64.tar.gz", "fileName": "cmake-3.22.2-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.2/cmake-3.22.2-macos-universal.tar.gz", "fileName": "cmake-3.22.2-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.2/cmake-3.22.2-windows-x86_64.zip", "fileName": "cmake-3.22.2-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.22.1": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.1/cmake-3.22.1-linux-aarch64.tar.gz", "fileName": "cmake-3.22.1-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.1/cmake-3.22.1-linux-x86_64.tar.gz", "fileName": "cmake-3.22.1-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.1/cmake-3.22.1-macos-universal.tar.gz", "fileName": "cmake-3.22.1-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.1/cmake-3.22.1-windows-x86_64.zip", "fileName": "cmake-3.22.1-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.22.0": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.0/cmake-3.22.0-linux-aarch64.tar.gz", "fileName": "cmake-3.22.0-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.0/cmake-3.22.0-linux-x86_64.tar.gz", "fileName": "cmake-3.22.0-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.0/cmake-3.22.0-macos-universal.tar.gz", "fileName": "cmake-3.22.0-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.0/cmake-3.22.0-windows-x86_64.zip", "fileName": "cmake-3.22.0-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.22.0-rc3": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.0-rc3/cmake-3.22.0-rc3-linux-aarch64.tar.gz", "fileName": "cmake-3.22.0-rc3-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.0-rc3/cmake-3.22.0-rc3-linux-x86_64.tar.gz", "fileName": "cmake-3.22.0-rc3-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.0-rc3/cmake-3.22.0-rc3-macos-universal.tar.gz", "fileName": "cmake-3.22.0-rc3-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.0-rc3/cmake-3.22.0-rc3-windows-x86_64.zip", "fileName": "cmake-3.22.0-rc3-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.22.0-rc2": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.0-rc2/cmake-3.22.0-rc2-linux-aarch64.tar.gz", "fileName": "cmake-3.22.0-rc2-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.0-rc2/cmake-3.22.0-rc2-linux-x86_64.tar.gz", "fileName": "cmake-3.22.0-rc2-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.0-rc2/cmake-3.22.0-rc2-macos-universal.tar.gz", "fileName": "cmake-3.22.0-rc2-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.0-rc2/cmake-3.22.0-rc2-windows-x86_64.zip", "fileName": "cmake-3.22.0-rc2-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.21.4": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.4/cmake-3.21.4-linux-aarch64.tar.gz", "fileName": "cmake-3.21.4-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.4/cmake-3.21.4-linux-x86_64.tar.gz", "fileName": "cmake-3.21.4-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.4/cmake-3.21.4-macos-universal.tar.gz", "fileName": "cmake-3.21.4-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.4/cmake-3.21.4-windows-x86_64.zip", "fileName": "cmake-3.21.4-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.22.0-rc1": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.0-rc1/cmake-3.22.0-rc1-linux-aarch64.tar.gz", "fileName": "cmake-3.22.0-rc1-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.0-rc1/cmake-3.22.0-rc1-linux-x86_64.tar.gz", "fileName": "cmake-3.22.0-rc1-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.0-rc1/cmake-3.22.0-rc1-macos-universal.tar.gz", "fileName": "cmake-3.22.0-rc1-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.0-rc1/cmake-3.22.0-rc1-windows-x86_64.zip", "fileName": "cmake-3.22.0-rc1-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.21.3": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.3/cmake-3.21.3-linux-aarch64.tar.gz", "fileName": "cmake-3.21.3-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.3/cmake-3.21.3-linux-x86_64.tar.gz", "fileName": "cmake-3.21.3-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.3/cmake-3.21.3-macos-universal.tar.gz", "fileName": "cmake-3.21.3-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.3/cmake-3.21.3-windows-x86_64.zip", "fileName": "cmake-3.21.3-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.20.6": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.6/cmake-3.20.6-linux-aarch64.tar.gz", "fileName": "cmake-3.20.6-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.6/cmake-3.20.6-linux-x86_64.tar.gz", "fileName": "cmake-3.20.6-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.6/cmake-3.20.6-macos-universal.tar.gz", "fileName": "cmake-3.20.6-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.6/cmake-3.20.6-windows-x86_64.zip", "fileName": "cmake-3.20.6-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.21.2": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.2/cmake-3.21.2-linux-aarch64.tar.gz", "fileName": "cmake-3.21.2-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.2/cmake-3.21.2-linux-x86_64.tar.gz", "fileName": "cmake-3.21.2-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.2/cmake-3.21.2-macos-universal.tar.gz", "fileName": "cmake-3.21.2-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.2/cmake-3.21.2-windows-x86_64.zip", "fileName": "cmake-3.21.2-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.21.1": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.1/cmake-3.21.1-linux-aarch64.tar.gz", "fileName": "cmake-3.21.1-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.1/cmake-3.21.1-linux-x86_64.tar.gz", "fileName": "cmake-3.21.1-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.1/cmake-3.21.1-macos-universal.tar.gz", "fileName": "cmake-3.21.1-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.1/cmake-3.21.1-windows-x86_64.zip", "fileName": "cmake-3.21.1-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.21.0": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.0/cmake-3.21.0-linux-aarch64.tar.gz", "fileName": "cmake-3.21.0-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.0/cmake-3.21.0-linux-x86_64.tar.gz", "fileName": "cmake-3.21.0-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.0/cmake-3.21.0-macos-universal.tar.gz", "fileName": "cmake-3.21.0-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.0/cmake-3.21.0-windows-x86_64.zip", "fileName": "cmake-3.21.0-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.21.0-rc3": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.0-rc3/cmake-3.21.0-rc3-linux-aarch64.tar.gz", "fileName": "cmake-3.21.0-rc3-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.0-rc3/cmake-3.21.0-rc3-linux-x86_64.tar.gz", "fileName": "cmake-3.21.0-rc3-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.0-rc3/cmake-3.21.0-rc3-macos-universal.tar.gz", "fileName": "cmake-3.21.0-rc3-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.0-rc3/cmake-3.21.0-rc3-windows-x86_64.zip", "fileName": "cmake-3.21.0-rc3-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.21.0-rc2": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.0-rc2/cmake-3.21.0-rc2-linux-aarch64.tar.gz", "fileName": "cmake-3.21.0-rc2-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.0-rc2/cmake-3.21.0-rc2-linux-x86_64.tar.gz", "fileName": "cmake-3.21.0-rc2-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.0-rc2/cmake-3.21.0-rc2-macos-universal.tar.gz", "fileName": "cmake-3.21.0-rc2-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.0-rc2/cmake-3.21.0-rc2-windows-x86_64.zip", "fileName": "cmake-3.21.0-rc2-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.21.0-rc1": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.0-rc1/cmake-3.21.0-rc1-linux-aarch64.tar.gz", "fileName": "cmake-3.21.0-rc1-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.0-rc1/cmake-3.21.0-rc1-linux-x86_64.tar.gz", "fileName": "cmake-3.21.0-rc1-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.0-rc1/cmake-3.21.0-rc1-macos-universal.tar.gz", "fileName": "cmake-3.21.0-rc1-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.0-rc1/cmake-3.21.0-rc1-windows-x86_64.zip", "fileName": "cmake-3.21.0-rc1-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.20.5": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.5/cmake-3.20.5-linux-aarch64.tar.gz", "fileName": "cmake-3.20.5-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.5/cmake-3.20.5-linux-x86_64.tar.gz", "fileName": "cmake-3.20.5-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.5/cmake-3.20.5-macos-universal.tar.gz", "fileName": "cmake-3.20.5-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.5/cmake-3.20.5-windows-x86_64.zip", "fileName": "cmake-3.20.5-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.20.4": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.4/cmake-3.20.4-linux-aarch64.tar.gz", "fileName": "cmake-3.20.4-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.4/cmake-3.20.4-linux-x86_64.tar.gz", "fileName": "cmake-3.20.4-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.4/cmake-3.20.4-macos-universal.tar.gz", "fileName": "cmake-3.20.4-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.4/cmake-3.20.4-windows-x86_64.zip", "fileName": "cmake-3.20.4-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.20.3": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.3/cmake-3.20.3-linux-aarch64.tar.gz", "fileName": "cmake-3.20.3-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.3/cmake-3.20.3-linux-x86_64.tar.gz", "fileName": "cmake-3.20.3-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.3/cmake-3.20.3-macos-universal.tar.gz", "fileName": "cmake-3.20.3-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.3/cmake-3.20.3-windows-x86_64.zip", "fileName": "cmake-3.20.3-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.20.2": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.2/cmake-3.20.2-linux-aarch64.tar.gz", "fileName": "cmake-3.20.2-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.2/cmake-3.20.2-linux-x86_64.tar.gz", "fileName": "cmake-3.20.2-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.2/cmake-3.20.2-macos-universal.tar.gz", "fileName": "cmake-3.20.2-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.2/cmake-3.20.2-windows-x86_64.zip", "fileName": "cmake-3.20.2-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.20.1": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.1/cmake-3.20.1-linux-aarch64.tar.gz", "fileName": "cmake-3.20.1-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.1/cmake-3.20.1-linux-x86_64.tar.gz", "fileName": "cmake-3.20.1-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.1/cmake-3.20.1-macos-universal.tar.gz", "fileName": "cmake-3.20.1-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.1/cmake-3.20.1-windows-x86_64.zip", "fileName": "cmake-3.20.1-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.19.8": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.8/cmake-3.19.8-Linux-aarch64.tar.gz", "fileName": "cmake-3.19.8-Linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.8/cmake-3.19.8-Linux-x86_64.tar.gz", "fileName": "cmake-3.19.8-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.8/cmake-3.19.8-macos-universal.tar.gz", "fileName": "cmake-3.19.8-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.8/cmake-3.19.8-win64-x64.zip", "fileName": "cmake-3.19.8-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.20.0": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0/cmake-3.20.0-linux-aarch64.tar.gz", "fileName": "cmake-3.20.0-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0/cmake-3.20.0-linux-x86_64.tar.gz", "fileName": "cmake-3.20.0-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0/cmake-3.20.0-macos-universal.tar.gz", "fileName": "cmake-3.20.0-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0/cmake-3.20.0-windows-x86_64.zip", "fileName": "cmake-3.20.0-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.20.0-rc5": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc5/cmake-3.20.0-rc5-linux-aarch64.tar.gz", "fileName": "cmake-3.20.0-rc5-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc5/cmake-3.20.0-rc5-linux-x86_64.tar.gz", "fileName": "cmake-3.20.0-rc5-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc5/cmake-3.20.0-rc5-macos-universal.tar.gz", "fileName": "cmake-3.20.0-rc5-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc5/cmake-3.20.0-rc5-windows-x86_64.zip", "fileName": "cmake-3.20.0-rc5-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.19.7": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.7/cmake-3.19.7-Linux-aarch64.tar.gz", "fileName": "cmake-3.19.7-Linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.7/cmake-3.19.7-Linux-x86_64.tar.gz", "fileName": "cmake-3.19.7-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.7/cmake-3.19.7-macos-universal.tar.gz", "fileName": "cmake-3.19.7-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.7/cmake-3.19.7-win64-x64.zip", "fileName": "cmake-3.19.7-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.20.0-rc4": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc4/cmake-3.20.0-rc4-linux-aarch64.tar.gz", "fileName": "cmake-3.20.0-rc4-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc4/cmake-3.20.0-rc4-linux-x86_64.tar.gz", "fileName": "cmake-3.20.0-rc4-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc4/cmake-3.20.0-rc4-macos-universal.tar.gz", "fileName": "cmake-3.20.0-rc4-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc4/cmake-3.20.0-rc4-windows-x86_64.zip", "fileName": "cmake-3.20.0-rc4-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.20.0-rc3": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc3/cmake-3.20.0-rc3-linux-aarch64.tar.gz", "fileName": "cmake-3.20.0-rc3-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc3/cmake-3.20.0-rc3-linux-x86_64.tar.gz", "fileName": "cmake-3.20.0-rc3-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc3/cmake-3.20.0-rc3-macos-universal.tar.gz", "fileName": "cmake-3.20.0-rc3-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc3/cmake-3.20.0-rc3-windows-x86_64.zip", "fileName": "cmake-3.20.0-rc3-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.20.0-rc2": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc2/cmake-3.20.0-rc2-linux-aarch64.tar.gz", "fileName": "cmake-3.20.0-rc2-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc2/cmake-3.20.0-rc2-linux-x86_64.tar.gz", "fileName": "cmake-3.20.0-rc2-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc2/cmake-3.20.0-rc2-macos-universal.tar.gz", "fileName": "cmake-3.20.0-rc2-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc2/cmake-3.20.0-rc2-windows-x86_64.zip", "fileName": "cmake-3.20.0-rc2-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.19.6": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.6/cmake-3.19.6-Linux-aarch64.tar.gz", "fileName": "cmake-3.19.6-Linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.6/cmake-3.19.6-Linux-x86_64.tar.gz", "fileName": "cmake-3.19.6-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.6/cmake-3.19.6-macos-universal.tar.gz", "fileName": "cmake-3.19.6-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.6/cmake-3.19.6-win64-x64.zip", "fileName": "cmake-3.19.6-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.20.0-rc1": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc1/cmake-3.20.0-rc1-linux-aarch64.tar.gz", "fileName": "cmake-3.20.0-rc1-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc1/cmake-3.20.0-rc1-linux-x86_64.tar.gz", "fileName": "cmake-3.20.0-rc1-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc1/cmake-3.20.0-rc1-macos-universal.tar.gz", "fileName": "cmake-3.20.0-rc1-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc1/cmake-3.20.0-rc1-windows-x86_64.zip", "fileName": "cmake-3.20.0-rc1-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.19.5": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.5/cmake-3.19.5-Linux-aarch64.tar.gz", "fileName": "cmake-3.19.5-Linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.5/cmake-3.19.5-Linux-x86_64.tar.gz", "fileName": "cmake-3.19.5-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.5/cmake-3.19.5-macos-universal.tar.gz", "fileName": "cmake-3.19.5-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.5/cmake-3.19.5-win64-x64.zip", "fileName": "cmake-3.19.5-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.18.6": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.6/cmake-3.18.6-Darwin-x86_64.tar.gz", "fileName": "cmake-3.18.6-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.6/cmake-3.18.6-Linux-x86_64.tar.gz", "fileName": "cmake-3.18.6-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.6/cmake-3.18.6-win64-x64.zip", "fileName": "cmake-3.18.6-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.19.4": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.4/cmake-3.19.4-Linux-aarch64.tar.gz", "fileName": "cmake-3.19.4-Linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.4/cmake-3.19.4-Linux-x86_64.tar.gz", "fileName": "cmake-3.19.4-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.4/cmake-3.19.4-macos-universal.tar.gz", "fileName": "cmake-3.19.4-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.4/cmake-3.19.4-win64-x64.zip", "fileName": "cmake-3.19.4-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.19.3": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.3/cmake-3.19.3-Linux-aarch64.tar.gz", "fileName": "cmake-3.19.3-Linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.3/cmake-3.19.3-Linux-x86_64.tar.gz", "fileName": "cmake-3.19.3-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.3/cmake-3.19.3-macos-universal.tar.gz", "fileName": "cmake-3.19.3-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.3/cmake-3.19.3-win64-x64.zip", "fileName": "cmake-3.19.3-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.19.2": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.2/cmake-3.19.2-Linux-x86_64.tar.gz", "fileName": "cmake-3.19.2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.2/cmake-3.19.2-macos-universal.tar.gz", "fileName": "cmake-3.19.2-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.2/cmake-3.19.2-win64-x64.zip", "fileName": "cmake-3.19.2-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.19.1": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.1/cmake-3.19.1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.19.1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.1/cmake-3.19.1-Linux-x86_64.tar.gz", "fileName": "cmake-3.19.1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.1/cmake-3.19.1-win64-x64.zip", "fileName": "cmake-3.19.1-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.19.0": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.0/cmake-3.19.0-Darwin-x86_64.tar.gz", "fileName": "cmake-3.19.0-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.0/cmake-3.19.0-Linux-x86_64.tar.gz", "fileName": "cmake-3.19.0-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.0/cmake-3.19.0-win64-x64.zip", "fileName": "cmake-3.19.0-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.18.5": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.5/cmake-3.18.5-Darwin-x86_64.tar.gz", "fileName": "cmake-3.18.5-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.5/cmake-3.18.5-Linux-x86_64.tar.gz", "fileName": "cmake-3.18.5-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.5/cmake-3.18.5-win64-x64.zip", "fileName": "cmake-3.18.5-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.19.0-rc3": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.0-rc3/cmake-3.19.0-rc3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.19.0-rc3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.0-rc3/cmake-3.19.0-rc3-Linux-x86_64.tar.gz", "fileName": "cmake-3.19.0-rc3-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.0-rc3/cmake-3.19.0-rc3-win64-x64.zip", "fileName": "cmake-3.19.0-rc3-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.19.0-rc2": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.0-rc2/cmake-3.19.0-rc2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.19.0-rc2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.0-rc2/cmake-3.19.0-rc2-Linux-x86_64.tar.gz", "fileName": "cmake-3.19.0-rc2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.0-rc2/cmake-3.19.0-rc2-win64-x64.zip", "fileName": "cmake-3.19.0-rc2-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.19.0-rc1": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.0-rc1/cmake-3.19.0-rc1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.19.0-rc1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.0-rc1/cmake-3.19.0-rc1-Linux-x86_64.tar.gz", "fileName": "cmake-3.19.0-rc1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.0-rc1/cmake-3.19.0-rc1-win64-x64.zip", "fileName": "cmake-3.19.0-rc1-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.18.4": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.4/cmake-3.18.4-Darwin-x86_64.tar.gz", "fileName": "cmake-3.18.4-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.4/cmake-3.18.4-Linux-x86_64.tar.gz", "fileName": "cmake-3.18.4-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.4/cmake-3.18.4-win64-x64.zip", "fileName": "cmake-3.18.4-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.18.3": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.3/cmake-3.18.3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.18.3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.3/cmake-3.18.3-Linux-x86_64.tar.gz", "fileName": "cmake-3.18.3-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.3/cmake-3.18.3-win64-x64.zip", "fileName": "cmake-3.18.3-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.17.5": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.5/cmake-3.17.5-Darwin-x86_64.tar.gz", "fileName": "cmake-3.17.5-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.5/cmake-3.17.5-Linux-x86_64.tar.gz", "fileName": "cmake-3.17.5-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.5/cmake-3.17.5-win64-x64.zip", "fileName": "cmake-3.17.5-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.16.9": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.9/cmake-3.16.9-Darwin-x86_64.tar.gz", "fileName": "cmake-3.16.9-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.9/cmake-3.16.9-Linux-x86_64.tar.gz", "fileName": "cmake-3.16.9-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.9/cmake-3.16.9-win64-x64.zip", "fileName": "cmake-3.16.9-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.18.2": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.2/cmake-3.18.2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.18.2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.2/cmake-3.18.2-Linux-x86_64.tar.gz", "fileName": "cmake-3.18.2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.2/cmake-3.18.2-win64-x64.zip", "fileName": "cmake-3.18.2-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.18.1": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.1/cmake-3.18.1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.18.1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.1/cmake-3.18.1-Linux-x86_64.tar.gz", "fileName": "cmake-3.18.1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.1/cmake-3.18.1-win64-x64.zip", "fileName": "cmake-3.18.1-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.17.4": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.4/cmake-3.17.4-Darwin-x86_64.tar.gz", "fileName": "cmake-3.17.4-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.4/cmake-3.17.4-Linux-x86_64.tar.gz", "fileName": "cmake-3.17.4-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.4/cmake-3.17.4-win64-x64.zip", "fileName": "cmake-3.17.4-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.18.0": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.0/cmake-3.18.0-Darwin-x86_64.tar.gz", "fileName": "cmake-3.18.0-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.0/cmake-3.18.0-Linux-x86_64.tar.gz", "fileName": "cmake-3.18.0-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.0/cmake-3.18.0-win64-x64.zip", "fileName": "cmake-3.18.0-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.18.0-rc4": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.0-rc4/cmake-3.18.0-rc4-Darwin-x86_64.tar.gz", "fileName": "cmake-3.18.0-rc4-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.0-rc4/cmake-3.18.0-rc4-Linux-x86_64.tar.gz", "fileName": "cmake-3.18.0-rc4-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.0-rc4/cmake-3.18.0-rc4-win64-x64.zip", "fileName": "cmake-3.18.0-rc4-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.18.0-rc3": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.0-rc3/cmake-3.18.0-rc3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.18.0-rc3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.0-rc3/cmake-3.18.0-rc3-Linux-x86_64.tar.gz", "fileName": "cmake-3.18.0-rc3-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.0-rc3/cmake-3.18.0-rc3-win64-x64.zip", "fileName": "cmake-3.18.0-rc3-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.18.0-rc2": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.0-rc2/cmake-3.18.0-rc2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.18.0-rc2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.0-rc2/cmake-3.18.0-rc2-Linux-x86_64.tar.gz", "fileName": "cmake-3.18.0-rc2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.0-rc2/cmake-3.18.0-rc2-win64-x64.zip", "fileName": "cmake-3.18.0-rc2-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.18.0-rc1": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.0-rc1/cmake-3.18.0-rc1-Linux-x86_64.tar.gz", "fileName": "cmake-3.18.0-rc1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.0-rc1/cmake-3.18.0-rc1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.18.0-rc1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.0-rc1/cmake-3.18.0-rc1-win64-x64.zip", "fileName": "cmake-3.18.0-rc1-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.16.8": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.8/cmake-3.16.8-Darwin-x86_64.tar.gz", "fileName": "cmake-3.16.8-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.8/cmake-3.16.8-win64-x64.zip", "fileName": "cmake-3.16.8-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.8/cmake-3.16.8-Linux-x86_64.tar.gz", "fileName": "cmake-3.16.8-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.17.3": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.3/cmake-3.17.3-Linux-x86_64.tar.gz", "fileName": "cmake-3.17.3-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.3/cmake-3.17.3-win32-x86.zip", "fileName": "cmake-3.17.3-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.3/cmake-3.17.3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.17.3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.16.7": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.7/cmake-3.16.7-win64-x64.zip", "fileName": "cmake-3.16.7-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.7/cmake-3.16.7-Linux-x86_64.tar.gz", "fileName": "cmake-3.16.7-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.7/cmake-3.16.7-Darwin-x86_64.tar.gz", "fileName": "cmake-3.16.7-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.17.2": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.2/cmake-3.17.2-Linux-x86_64.tar.gz", "fileName": "cmake-3.17.2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.2/cmake-3.17.2-win64-x64.zip", "fileName": "cmake-3.17.2-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.2/cmake-3.17.2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.17.2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.16.6": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.6/cmake-3.16.6-Linux-x86_64.tar.gz", "fileName": "cmake-3.16.6-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.6/cmake-3.16.6-win64-x64.zip", "fileName": "cmake-3.16.6-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.6/cmake-3.16.6-Darwin-x86_64.tar.gz", "fileName": "cmake-3.16.6-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.17.1": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.1/cmake-3.17.1-win64-x64.zip", "fileName": "cmake-3.17.1-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.1/cmake-3.17.1-Linux-x86_64.tar.gz", "fileName": "cmake-3.17.1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.1/cmake-3.17.1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.17.1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.17.0": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.0/cmake-3.17.0-win64-x64.zip", "fileName": "cmake-3.17.0-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.0/cmake-3.17.0-Darwin-x86_64.tar.gz", "fileName": "cmake-3.17.0-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.0/cmake-3.17.0-Linux-x86_64.tar.gz", "fileName": "cmake-3.17.0-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.17.0-rc3": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.0-rc3/cmake-3.17.0-rc3-Linux-x86_64.tar.gz", "fileName": "cmake-3.17.0-rc3-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.0-rc3/cmake-3.17.0-rc3-win64-x64.zip", "fileName": "cmake-3.17.0-rc3-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.0-rc3/cmake-3.17.0-rc3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.17.0-rc3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.16.5": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.5/cmake-3.16.5-win32-x86.zip", "fileName": "cmake-3.16.5-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.5/cmake-3.16.5-Linux-x86_64.tar.gz", "fileName": "cmake-3.16.5-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.5/cmake-3.16.5-Darwin-x86_64.tar.gz", "fileName": "cmake-3.16.5-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.17.0-rc2": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.0-rc2/cmake-3.17.0-rc2-win64-x64.zip", "fileName": "cmake-3.17.0-rc2-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.0-rc2/cmake-3.17.0-rc2-Linux-x86_64.tar.gz", "fileName": "cmake-3.17.0-rc2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.0-rc2/cmake-3.17.0-rc2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.17.0-rc2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.17.0-rc1": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.0-rc1/cmake-3.17.0-rc1-Linux-x86_64.tar.gz", "fileName": "cmake-3.17.0-rc1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.0-rc1/cmake-3.17.0-rc1-win32-x86.zip", "fileName": "cmake-3.17.0-rc1-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.0-rc1/cmake-3.17.0-rc1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.17.0-rc1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.16.4": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.4/cmake-3.16.4-Darwin-x86_64.tar.gz", "fileName": "cmake-3.16.4-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.4/cmake-3.16.4-win64-x64.zip", "fileName": "cmake-3.16.4-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.4/cmake-3.16.4-Linux-x86_64.tar.gz", "fileName": "cmake-3.16.4-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.15.7": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.7/cmake-3.15.7-Linux-x86_64.tar.gz", "fileName": "cmake-3.15.7-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.7/cmake-3.15.7-win32-x86.zip", "fileName": "cmake-3.15.7-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.7/cmake-3.15.7-Darwin-x86_64.tar.gz", "fileName": "cmake-3.15.7-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.16.3": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.3/cmake-3.16.3-Linux-x86_64.tar.gz", "fileName": "cmake-3.16.3-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.3/cmake-3.16.3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.16.3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.3/cmake-3.16.3-win32-x86.zip", "fileName": "cmake-3.16.3-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.16.2": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.2/cmake-3.16.2-win32-x86.zip", "fileName": "cmake-3.16.2-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.2/cmake-3.16.2-Linux-x86_64.tar.gz", "fileName": "cmake-3.16.2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.2/cmake-3.16.2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.16.2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.15.6": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.6/cmake-3.15.6-win32-x86.zip", "fileName": "cmake-3.15.6-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.6/cmake-3.15.6-Linux-x86_64.tar.gz", "fileName": "cmake-3.15.6-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.6/cmake-3.15.6-Darwin-x86_64.tar.gz", "fileName": "cmake-3.15.6-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.16.1": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.1/cmake-3.16.1-win32-x86.zip", "fileName": "cmake-3.16.1-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.1/cmake-3.16.1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.16.1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.1/cmake-3.16.1-Linux-x86_64.tar.gz", "fileName": "cmake-3.16.1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.16.0": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.0/cmake-3.16.0-Darwin-x86_64.tar.gz", "fileName": "cmake-3.16.0-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.0/cmake-3.16.0-Linux-x86_64.tar.gz", "fileName": "cmake-3.16.0-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.0/cmake-3.16.0-win32-x86.zip", "fileName": "cmake-3.16.0-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.16.0-rc4": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.0-rc4/cmake-3.16.0-rc4-Linux-x86_64.tar.gz", "fileName": "cmake-3.16.0-rc4-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.0-rc4/cmake-3.16.0-rc4-win32-x86.zip", "fileName": "cmake-3.16.0-rc4-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.0-rc4/cmake-3.16.0-rc4-Darwin-x86_64.tar.gz", "fileName": "cmake-3.16.0-rc4-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.16.0-rc3": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.0-rc3/cmake-3.16.0-rc3-win64-x64.zip", "fileName": "cmake-3.16.0-rc3-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.0-rc3/cmake-3.16.0-rc3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.16.0-rc3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.0-rc3/cmake-3.16.0-rc3-Linux-x86_64.tar.gz", "fileName": "cmake-3.16.0-rc3-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.15.5": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.5/cmake-3.15.5-win32-x86.zip", "fileName": "cmake-3.15.5-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.5/cmake-3.15.5-Darwin-x86_64.tar.gz", "fileName": "cmake-3.15.5-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.5/cmake-3.15.5-Linux-x86_64.tar.gz", "fileName": "cmake-3.15.5-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.16.0-rc2": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.0-rc2/cmake-3.16.0-rc2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.16.0-rc2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.0-rc2/cmake-3.16.0-rc2-win64-x64.zip", "fileName": "cmake-3.16.0-rc2-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.0-rc2/cmake-3.16.0-rc2-Linux-x86_64.tar.gz", "fileName": "cmake-3.16.0-rc2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.16.0-rc1": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.0-rc1/cmake-3.16.0-rc1-win32-x86.zip", "fileName": "cmake-3.16.0-rc1-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.0-rc1/cmake-3.16.0-rc1-Linux-x86_64.tar.gz", "fileName": "cmake-3.16.0-rc1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.0-rc1/cmake-3.16.0-rc1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.16.0-rc1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.15.4": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.4/cmake-3.15.4-win32-x86.zip", "fileName": "cmake-3.15.4-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.4/cmake-3.15.4-Linux-x86_64.tar.gz", "fileName": "cmake-3.15.4-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.4/cmake-3.15.4-Darwin-x86_64.tar.gz", "fileName": "cmake-3.15.4-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.14.7": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.7/cmake-3.14.7-win64-x64.zip", "fileName": "cmake-3.14.7-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.7/cmake-3.14.7-Darwin-x86_64.tar.gz", "fileName": "cmake-3.14.7-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.7/cmake-3.14.7-Linux-x86_64.tar.gz", "fileName": "cmake-3.14.7-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.15.3": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.3/cmake-3.15.3-Linux-x86_64.tar.gz", "fileName": "cmake-3.15.3-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.3/cmake-3.15.3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.15.3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.3/cmake-3.15.3-win32-x86.zip", "fileName": "cmake-3.15.3-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.15.2": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.2/cmake-3.15.2-win64-x64.zip", "fileName": "cmake-3.15.2-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.2/cmake-3.15.2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.15.2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.2/cmake-3.15.2-Linux-x86_64.tar.gz", "fileName": "cmake-3.15.2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.15.1": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.1/cmake-3.15.1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.15.1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.1/cmake-3.15.1-win32-x86.zip", "fileName": "cmake-3.15.1-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.1/cmake-3.15.1-Linux-x86_64.tar.gz", "fileName": "cmake-3.15.1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.15.0": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.0/cmake-3.15.0-Darwin-x86_64.tar.gz", "fileName": "cmake-3.15.0-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.0/cmake-3.15.0-win32-x86.zip", "fileName": "cmake-3.15.0-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.0/cmake-3.15.0-Linux-x86_64.tar.gz", "fileName": "cmake-3.15.0-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.14.6": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.6/cmake-3.14.6-win32-x86.zip", "fileName": "cmake-3.14.6-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.6/cmake-3.14.6-Darwin-x86_64.tar.gz", "fileName": "cmake-3.14.6-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.6/cmake-3.14.6-Linux-x86_64.tar.gz", "fileName": "cmake-3.14.6-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.15.0-rc4": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.0-rc4/cmake-3.15.0-rc4-win32-x86.zip", "fileName": "cmake-3.15.0-rc4-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.0-rc4/cmake-3.15.0-rc4-Linux-x86_64.tar.gz", "fileName": "cmake-3.15.0-rc4-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.0-rc4/cmake-3.15.0-rc4-Darwin-x86_64.tar.gz", "fileName": "cmake-3.15.0-rc4-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.15.0-rc3": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.0-rc3/cmake-3.15.0-rc3-win64-x64.zip", "fileName": "cmake-3.15.0-rc3-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.0-rc3/cmake-3.15.0-rc3-Linux-x86_64.tar.gz", "fileName": "cmake-3.15.0-rc3-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.0-rc3/cmake-3.15.0-rc3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.15.0-rc3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.15.0-rc2": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.0-rc2/cmake-3.15.0-rc2-Linux-x86_64.tar.gz", "fileName": "cmake-3.15.0-rc2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.0-rc2/cmake-3.15.0-rc2-win32-x86.zip", "fileName": "cmake-3.15.0-rc2-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.0-rc2/cmake-3.15.0-rc2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.15.0-rc2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.15.0-rc1": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.0-rc1/cmake-3.15.0-rc1-Linux-x86_64.tar.gz", "fileName": "cmake-3.15.0-rc1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.0-rc1/cmake-3.15.0-rc1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.15.0-rc1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.0-rc1/cmake-3.15.0-rc1-win64-x64.zip", "fileName": "cmake-3.15.0-rc1-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.14.5": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.5/cmake-3.14.5-win64-x64.zip", "fileName": "cmake-3.14.5-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.5/cmake-3.14.5-Linux-x86_64.tar.gz", "fileName": "cmake-3.14.5-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.5/cmake-3.14.5-Darwin-x86_64.tar.gz", "fileName": "cmake-3.14.5-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.14.4": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.4/cmake-3.14.4-Darwin-x86_64.tar.gz", "fileName": "cmake-3.14.4-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.4/cmake-3.14.4-win32-x86.zip", "fileName": "cmake-3.14.4-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.4/cmake-3.14.4-Linux-x86_64.tar.gz", "fileName": "cmake-3.14.4-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.13.5": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.13.5/cmake-3.13.5-Linux-x86_64.tar.gz", "fileName": "cmake-3.13.5-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.13.5/cmake-3.13.5-win32-x86.zip", "fileName": "cmake-3.13.5-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.13.5/cmake-3.13.5-Darwin-x86_64.tar.gz", "fileName": "cmake-3.13.5-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.14.3": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.3/cmake-3.14.3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.14.3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.3/cmake-3.14.3-Linux-x86_64.tar.gz", "fileName": "cmake-3.14.3-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.3/cmake-3.14.3-win64-x64.zip", "fileName": "cmake-3.14.3-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.14.2": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.2/cmake-3.14.2-win64-x64.zip", "fileName": "cmake-3.14.2-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.2/cmake-3.14.2-Linux-x86_64.tar.gz", "fileName": "cmake-3.14.2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.2/cmake-3.14.2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.14.2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.14.1": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.1/cmake-3.14.1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.14.1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.1/cmake-3.14.1-win64-x64.zip", "fileName": "cmake-3.14.1-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.1/cmake-3.14.1-Linux-x86_64.tar.gz", "fileName": "cmake-3.14.1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.14.0": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.0/cmake-3.14.0-Darwin-x86_64.tar.gz", "fileName": "cmake-3.14.0-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.0/cmake-3.14.0-win64-x64.zip", "fileName": "cmake-3.14.0-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.0/cmake-3.14.0-Linux-x86_64.tar.gz", "fileName": "cmake-3.14.0-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.14.0-rc4": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.0-rc4/cmake-3.14.0-rc4-win64-x64.zip", "fileName": "cmake-3.14.0-rc4-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.0-rc4/cmake-3.14.0-rc4-Linux-x86_64.tar.gz", "fileName": "cmake-3.14.0-rc4-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.0-rc4/cmake-3.14.0-rc4-Darwin-x86_64.tar.gz", "fileName": "cmake-3.14.0-rc4-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.14.0-rc3": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.0-rc3/cmake-3.14.0-rc3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.14.0-rc3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.0-rc3/cmake-3.14.0-rc3-Linux-x86_64.tar.gz", "fileName": "cmake-3.14.0-rc3-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.0-rc3/cmake-3.14.0-rc3-win32-x86.zip", "fileName": "cmake-3.14.0-rc3-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.14.0-rc2": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.0-rc2/cmake-3.14.0-rc2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.14.0-rc2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.0-rc2/cmake-3.14.0-rc2-win64-x64.zip", "fileName": "cmake-3.14.0-rc2-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.0-rc2/cmake-3.14.0-rc2-Linux-x86_64.tar.gz", "fileName": "cmake-3.14.0-rc2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.14.0-rc1": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.0-rc1/cmake-3.14.0-rc1-win32-x86.zip", "fileName": "cmake-3.14.0-rc1-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.0-rc1/cmake-3.14.0-rc1-Linux-x86_64.tar.gz", "fileName": "cmake-3.14.0-rc1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.0-rc1/cmake-3.14.0-rc1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.14.0-rc1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.13.4": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.13.4/cmake-3.13.4-win32-x86.zip", "fileName": "cmake-3.13.4-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.13.4/cmake-3.13.4-Darwin-x86_64.tar.gz", "fileName": "cmake-3.13.4-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.13.4/cmake-3.13.4-Linux-x86_64.tar.gz", "fileName": "cmake-3.13.4-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.13.3": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.13.3/cmake-3.13.3-win64-x64.zip", "fileName": "cmake-3.13.3-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.13.3/cmake-3.13.3-Linux-x86_64.tar.gz", "fileName": "cmake-3.13.3-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.13.3/cmake-3.13.3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.13.3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.13.2": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.13.2/cmake-3.13.2-win64-x64.zip", "fileName": "cmake-3.13.2-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.13.2/cmake-3.13.2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.13.2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.13.2/cmake-3.13.2-Linux-x86_64.tar.gz", "fileName": "cmake-3.13.2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.13.1": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.13.1/cmake-3.13.1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.13.1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.13.1/cmake-3.13.1-Linux-x86_64.tar.gz", "fileName": "cmake-3.13.1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.13.1/cmake-3.13.1-win64-x64.zip", "fileName": "cmake-3.13.1-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.13.0": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.13.0/cmake-3.13.0-Darwin-x86_64.tar.gz", "fileName": "cmake-3.13.0-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.13.0/cmake-3.13.0-Linux-x86_64.tar.gz", "fileName": "cmake-3.13.0-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.13.0/cmake-3.13.0-win64-x64.zip", "fileName": "cmake-3.13.0-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.12.4": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.12.4/cmake-3.12.4-Darwin-x86_64.tar.gz", "fileName": "cmake-3.12.4-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.12.4/cmake-3.12.4-Linux-x86_64.tar.gz", "fileName": "cmake-3.12.4-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.12.4/cmake-3.12.4-win64-x64.zip", "fileName": "cmake-3.12.4-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.12.3": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.12.3/cmake-3.12.3-win64-x64.zip", "fileName": "cmake-3.12.3-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.12.3/cmake-3.12.3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.12.3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.12.3/cmake-3.12.3-Linux-x86_64.tar.gz", "fileName": "cmake-3.12.3-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.12.2": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.12.2/cmake-3.12.2-win64-x64.zip", "fileName": "cmake-3.12.2-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.12.2/cmake-3.12.2-Linux-x86_64.tar.gz", "fileName": "cmake-3.12.2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.12.2/cmake-3.12.2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.12.2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.12.1": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.12.1/cmake-3.12.1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.12.1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.12.1/cmake-3.12.1-Linux-x86_64.tar.gz", "fileName": "cmake-3.12.1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.12.1/cmake-3.12.1-win32-x86.zip", "fileName": "cmake-3.12.1-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.12.0": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.12.0/cmake-3.12.0-win32-x86.zip", "fileName": "cmake-3.12.0-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.12.0/cmake-3.12.0-Darwin-x86_64.tar.gz", "fileName": "cmake-3.12.0-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.12.0/cmake-3.12.0-Linux-x86_64.tar.gz", "fileName": "cmake-3.12.0-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.11.4": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.11.4/cmake-3.11.4-Linux-x86_64.tar.gz", "fileName": "cmake-3.11.4-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.11.4/cmake-3.11.4-win32-x86.zip", "fileName": "cmake-3.11.4-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.11.4/cmake-3.11.4-Darwin-x86_64.tar.gz", "fileName": "cmake-3.11.4-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.11.3": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.11.3/cmake-3.11.3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.11.3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.11.3/cmake-3.11.3-win32-x86.zip", "fileName": "cmake-3.11.3-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.11.3/cmake-3.11.3-Linux-x86_64.tar.gz", "fileName": "cmake-3.11.3-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.11.2": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.11.2/cmake-3.11.2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.11.2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.11.2/cmake-3.11.2-win32-x86.zip", "fileName": "cmake-3.11.2-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.11.2/cmake-3.11.2-Linux-x86_64.tar.gz", "fileName": "cmake-3.11.2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.11.1": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.11.1/cmake-3.11.1-Linux-x86_64.tar.gz", "fileName": "cmake-3.11.1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.11.1/cmake-3.11.1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.11.1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.11.1/cmake-3.11.1-win64-x64.zip", "fileName": "cmake-3.11.1-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.11.0": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.11.0/cmake-3.11.0-Linux-x86_64.tar.gz", "fileName": "cmake-3.11.0-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.11.0/cmake-3.11.0-Darwin-x86_64.tar.gz", "fileName": "cmake-3.11.0-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.11.0/cmake-3.11.0-win32-x86.zip", "fileName": "cmake-3.11.0-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.10.3": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.10.3/cmake-3.10.3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.10.3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.10.3/cmake-3.10.3-win32-x86.zip", "fileName": "cmake-3.10.3-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.10.3/cmake-3.10.3-Linux-x86_64.tar.gz", "fileName": "cmake-3.10.3-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.10.2": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.10.2/cmake-3.10.2-win64-x64.zip", "fileName": "cmake-3.10.2-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.10.2/cmake-3.10.2-Linux-x86_64.tar.gz", "fileName": "cmake-3.10.2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.10.2/cmake-3.10.2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.10.2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.10.1": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.10.1/cmake-3.10.1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.10.1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.10.1/cmake-3.10.1-win64-x64.zip", "fileName": "cmake-3.10.1-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.10.1/cmake-3.10.1-Linux-x86_64.tar.gz", "fileName": "cmake-3.10.1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.10.0": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.10.0/cmake-3.10.0-Darwin-x86_64.tar.gz", "fileName": "cmake-3.10.0-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.10.0/cmake-3.10.0-win32-x86.zip", "fileName": "cmake-3.10.0-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.10.0/cmake-3.10.0-Linux-x86_64.tar.gz", "fileName": "cmake-3.10.0-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.9.6": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.6/cmake-3.9.6-Linux-x86_64.tar.gz", "fileName": "cmake-3.9.6-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.6/cmake-3.9.6-Darwin-x86_64.tar.gz", "fileName": "cmake-3.9.6-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.6/cmake-3.9.6-win64-x64.zip", "fileName": "cmake-3.9.6-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.9.5": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.5/cmake-3.9.5-win32-x86.zip", "fileName": "cmake-3.9.5-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.5/cmake-3.9.5-Darwin-x86_64.tar.gz", "fileName": "cmake-3.9.5-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.5/cmake-3.9.5-Linux-x86_64.tar.gz", "fileName": "cmake-3.9.5-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.9.4": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.4/cmake-3.9.4-win64-x64.zip", "fileName": "cmake-3.9.4-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.4/cmake-3.9.4-Darwin-x86_64.tar.gz", "fileName": "cmake-3.9.4-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.4/cmake-3.9.4-Linux-x86_64.tar.gz", "fileName": "cmake-3.9.4-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.9.3": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.3/cmake-3.9.3-Linux-x86_64.tar.gz", "fileName": "cmake-3.9.3-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.3/cmake-3.9.3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.9.3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.3/cmake-3.9.3-win64-x64.zip", "fileName": "cmake-3.9.3-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.9.2": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.2/cmake-3.9.2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.9.2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.2/cmake-3.9.2-Linux-x86_64.tar.gz", "fileName": "cmake-3.9.2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.2/cmake-3.9.2-win64-x64.zip", "fileName": "cmake-3.9.2-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.9.1": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.1/cmake-3.9.1-win32-x86.zip", "fileName": "cmake-3.9.1-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.1/cmake-3.9.1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.9.1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.1/cmake-3.9.1-Linux-x86_64.tar.gz", "fileName": "cmake-3.9.1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.9.0": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.0/cmake-3.9.0-win32-x86.zip", "fileName": "cmake-3.9.0-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.0/cmake-3.9.0-Darwin-x86_64.tar.gz", "fileName": "cmake-3.9.0-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.0/cmake-3.9.0-Linux-x86_64.tar.gz", "fileName": "cmake-3.9.0-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.8.2": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.8.2/cmake-3.8.2-Linux-x86_64.tar.gz", "fileName": "cmake-3.8.2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.8.2/cmake-3.8.2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.8.2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.8.2/cmake-3.8.2-win64-x64.zip", "fileName": "cmake-3.8.2-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.8.1": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.8.1/cmake-3.8.1-win64-x64.zip", "fileName": "cmake-3.8.1-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.8.1/cmake-3.8.1-Linux-x86_64.tar.gz", "fileName": "cmake-3.8.1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.8.1/cmake-3.8.1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.8.1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.8.0": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.8.0/cmake-3.8.0-Darwin-x86_64.tar.gz", "fileName": "cmake-3.8.0-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.8.0/cmake-3.8.0-Linux-x86_64.tar.gz", "fileName": "cmake-3.8.0-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.8.0/cmake-3.8.0-win64-x64.zip", "fileName": "cmake-3.8.0-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.7.2": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.7.2/cmake-3.7.2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.7.2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.7.2/cmake-3.7.2-win64-x64.zip", "fileName": "cmake-3.7.2-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.7.2/cmake-3.7.2-Linux-x86_64.tar.gz", "fileName": "cmake-3.7.2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.7.1": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.7.1/cmake-3.7.1-win64-x64.zip", "fileName": "cmake-3.7.1-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.7.1/cmake-3.7.1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.7.1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.7.1/cmake-3.7.1-Linux-x86_64.tar.gz", "fileName": "cmake-3.7.1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.7.0": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.7.0/cmake-3.7.0-win64-x64.zip", "fileName": "cmake-3.7.0-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.7.0/cmake-3.7.0-Darwin-x86_64.tar.gz", "fileName": "cmake-3.7.0-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.7.0/cmake-3.7.0-Linux-x86_64.tar.gz", "fileName": "cmake-3.7.0-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.6.3": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.6.3/cmake-3.6.3-Linux-i386.tar.gz", "fileName": "cmake-3.6.3-Linux-i386.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.6.3/cmake-3.6.3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.6.3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.6.3/cmake-3.6.3-win32-x86.zip", "fileName": "cmake-3.6.3-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.6.2": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.6.2/cmake-3.6.2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.6.2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.6.2/cmake-3.6.2-Linux-i386.tar.gz", "fileName": "cmake-3.6.2-Linux-i386.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.6.2/cmake-3.6.2-win64-x64.zip", "fileName": "cmake-3.6.2-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.6.1": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.6.1/cmake-3.6.1-Linux-x86_64.tar.gz", "fileName": "cmake-3.6.1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.6.1/cmake-3.6.1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.6.1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.6.1/cmake-3.6.1-win32-x86.zip", "fileName": "cmake-3.6.1-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.6.0": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.6.0/cmake-3.6.0-win64-x64.zip", "fileName": "cmake-3.6.0-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.6.0/cmake-3.6.0-Darwin-x86_64.tar.gz", "fileName": "cmake-3.6.0-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.6.0/cmake-3.6.0-Linux-i386.tar.gz", "fileName": "cmake-3.6.0-Linux-i386.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.5.2": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.5.2/cmake-3.5.2-Linux-x86_64.tar.gz", "fileName": "cmake-3.5.2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.5.2/cmake-3.5.2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.5.2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.5.2/cmake-3.5.2-win32-x86.zip", "fileName": "cmake-3.5.2-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.5.1": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.5.1/cmake-3.5.1-Linux-i386.tar.gz", "fileName": "cmake-3.5.1-Linux-i386.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.5.1/cmake-3.5.1-win32-x86.zip", "fileName": "cmake-3.5.1-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.5.1/cmake-3.5.1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.5.1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.5.0": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.5.0/cmake-3.5.0-win32-x86.zip", "fileName": "cmake-3.5.0-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.5.0/cmake-3.5.0-Darwin-x86_64.tar.gz", "fileName": "cmake-3.5.0-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.5.0/cmake-3.5.0-Linux-x86_64.tar.gz", "fileName": "cmake-3.5.0-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.4.3": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.4.3/cmake-3.4.3-Linux-i386.tar.gz", "fileName": "cmake-3.4.3-Linux-i386.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.4.3/cmake-3.4.3-win32-x86.zip", "fileName": "cmake-3.4.3-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.4.3/cmake-3.4.3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.4.3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.4.2": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.4.2/cmake-3.4.2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.4.2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.4.2/cmake-3.4.2-Linux-i386.tar.gz", "fileName": "cmake-3.4.2-Linux-i386.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.4.2/cmake-3.4.2-win32-x86.zip", "fileName": "cmake-3.4.2-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.4.1": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.4.1/cmake-3.4.1-Linux-x86_64.tar.gz", "fileName": "cmake-3.4.1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.4.1/cmake-3.4.1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.4.1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.4.1/cmake-3.4.1-win32-x86.zip", "fileName": "cmake-3.4.1-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.4.0": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.4.0/cmake-3.4.0-win32-x86.zip", "fileName": "cmake-3.4.0-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.4.0/cmake-3.4.0-Linux-x86_64.tar.gz", "fileName": "cmake-3.4.0-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.3.2": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.3.2/cmake-3.3.2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.3.2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.3.2/cmake-3.3.2-Linux-x86_64.tar.gz", "fileName": "cmake-3.3.2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.3.2/cmake-3.3.2-win32-x86.zip", "fileName": "cmake-3.3.2-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.3.1": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.3.1/cmake-3.3.1-win32-x86.zip", "fileName": "cmake-3.3.1-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.3.1/cmake-3.3.1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.3.1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.3.1/cmake-3.3.1-Linux-x86_64.tar.gz", "fileName": "cmake-3.3.1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.3.0": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.3.0/cmake-3.3.0-Linux-x86_64.tar.gz", "fileName": "cmake-3.3.0-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.3.0/cmake-3.3.0-Darwin-x86_64.tar.gz", "fileName": "cmake-3.3.0-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.3.0/cmake-3.3.0-win32-x86.zip", "fileName": "cmake-3.3.0-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.2.3": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.2.3/cmake-3.2.3-Linux-i386.tar.gz", "fileName": "cmake-3.2.3-Linux-i386.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.2.3/cmake-3.2.3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.2.3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.2.3/cmake-3.2.3-win32-x86.zip", "fileName": "cmake-3.2.3-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.2.2": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.2.2/cmake-3.2.2-win32-x86.zip", "fileName": "cmake-3.2.2-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.2.2/cmake-3.2.2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.2.2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.2.2/cmake-3.2.2-Linux-x86_64.tar.gz", "fileName": "cmake-3.2.2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.2.1": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.2.1/cmake-3.2.1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.2.1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.2.1/cmake-3.2.1-win32-x86.zip", "fileName": "cmake-3.2.1-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.2.1/cmake-3.2.1-Linux-x86_64.tar.gz", "fileName": "cmake-3.2.1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.2.0": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.2.0/cmake-3.2.0-Linux-i386.tar.gz", "fileName": "cmake-3.2.0-Linux-i386.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.2.0/cmake-3.2.0-win32-x86.zip", "fileName": "cmake-3.2.0-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.2.0/cmake-3.2.0-Darwin-x86_64.tar.gz", "fileName": "cmake-3.2.0-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.1.3": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.1.3/cmake-3.1.3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.1.3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.1.3/cmake-3.1.3-Linux-i386.tar.gz", "fileName": "cmake-3.1.3-Linux-i386.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.1.3/cmake-3.1.3-win32-x86.zip", "fileName": "cmake-3.1.3-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.1.2": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.1.2/cmake-3.1.2-Linux-x86_64.tar.gz", "fileName": "cmake-3.1.2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.1.2/cmake-3.1.2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.1.2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.1.2/cmake-3.1.2-win32-x86.zip", "fileName": "cmake-3.1.2-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.1.1": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.1.1/cmake-3.1.1-Linux-i386.tar.gz", "fileName": "cmake-3.1.1-Linux-i386.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.1.1/cmake-3.1.1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.1.1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.1.1/cmake-3.1.1-win32-x86.zip", "fileName": "cmake-3.1.1-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.1.0": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.1.0/cmake-3.1.0-Linux-i386.tar.gz", "fileName": "cmake-3.1.0-Linux-i386.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.1.0/cmake-3.1.0-win32-x86.zip", "fileName": "cmake-3.1.0-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.0.2": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.0.2/cmake-3.0.2-Darwin64-universal.tar.gz", "fileName": "cmake-3.0.2-Darwin64-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.0.2/cmake-3.0.2-Linux-i386.tar.gz", "fileName": "cmake-3.0.2-Linux-i386.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.0.2/cmake-3.0.2-win32-x86.zip", "fileName": "cmake-3.0.2-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.0.1": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.0.1/cmake-3.0.1-Darwin64-universal.tar.gz", "fileName": "cmake-3.0.1-Darwin64-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.0.1/cmake-3.0.1-Linux-i386.tar.gz", "fileName": "cmake-3.0.1-Linux-i386.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.0.1/cmake-3.0.1-win32-x86.zip", "fileName": "cmake-3.0.1-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.0.0": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.0.0/cmake-3.0.0-Darwin64-universal.tar.gz", "fileName": "cmake-3.0.0-Darwin64-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.0.0/cmake-3.0.0-Linux-i386.tar.gz", "fileName": "cmake-3.0.0-Linux-i386.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.0.0/cmake-3.0.0-win32-x86.zip", "fileName": "cmake-3.0.0-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "2.8.12": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v2.8.12.2/cmake-2.8.12.2-Darwin64-universal.tar.gz", "fileName": "cmake-2.8.12.2-Darwin64-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v2.8.12.2/cmake-2.8.12.2-Linux-i386.tar.gz", "fileName": "cmake-2.8.12.2-Linux-i386.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v2.8.12.2/cmake-2.8.12.2-win32-x86.zip", "fileName": "cmake-2.8.12.2-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "2.8.10": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v2.8.10.2/cmake-2.8.10.2-Darwin64-universal.tar.gz", "fileName": "cmake-2.8.10.2-Darwin64-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v2.8.10.2/cmake-2.8.10.2-Linux-i386.tar.gz", "fileName": "cmake-2.8.10.2-Linux-i386.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v2.8.10.2/cmake-2.8.10.2-win32-x86.zip", "fileName": "cmake-2.8.10.2-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "2.6.4": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v2.6.4/cmake-2.6.4-Linux-i386.tar.gz", "fileName": "cmake-2.6.4-Linux-i386.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v2.6.4/cmake-2.6.4-win32-x86.zip", "fileName": "cmake-2.6.4-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "2.4.8": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v2.4.8/cmake-2.4.8-Linux-i386.tar.gz", "fileName": "cmake-2.4.8-Linux-i386.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v2.4.8/cmake-2.4.8-win32-x86.zip", "fileName": "cmake-2.4.8-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } } }; +exports.cmakeCatalog = { "3.27.0": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0/cmake-3.27.0-linux-aarch64.tar.gz", "fileName": "cmake-3.27.0-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0/cmake-3.27.0-linux-x86_64.tar.gz", "fileName": "cmake-3.27.0-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0/cmake-3.27.0-macos-universal.tar.gz", "fileName": "cmake-3.27.0-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0/cmake-3.27.0-windows-x86_64.zip", "fileName": "cmake-3.27.0-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "latest": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0/cmake-3.27.0-linux-aarch64.tar.gz", "fileName": "cmake-3.27.0-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0/cmake-3.27.0-linux-x86_64.tar.gz", "fileName": "cmake-3.27.0-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0/cmake-3.27.0-macos-universal.tar.gz", "fileName": "cmake-3.27.0-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0/cmake-3.27.0-windows-x86_64.zip", "fileName": "cmake-3.27.0-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.27.0-rc5": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0-rc5/cmake-3.27.0-rc5-linux-aarch64.tar.gz", "fileName": "cmake-3.27.0-rc5-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0-rc5/cmake-3.27.0-rc5-linux-x86_64.tar.gz", "fileName": "cmake-3.27.0-rc5-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0-rc5/cmake-3.27.0-rc5-macos-universal.tar.gz", "fileName": "cmake-3.27.0-rc5-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0-rc5/cmake-3.27.0-rc5-windows-x86_64.zip", "fileName": "cmake-3.27.0-rc5-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "latestrc": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0-rc5/cmake-3.27.0-rc5-linux-aarch64.tar.gz", "fileName": "cmake-3.27.0-rc5-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0-rc5/cmake-3.27.0-rc5-linux-x86_64.tar.gz", "fileName": "cmake-3.27.0-rc5-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0-rc5/cmake-3.27.0-rc5-macos-universal.tar.gz", "fileName": "cmake-3.27.0-rc5-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0-rc5/cmake-3.27.0-rc5-windows-x86_64.zip", "fileName": "cmake-3.27.0-rc5-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.27.0-rc4": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0-rc4/cmake-3.27.0-rc4-linux-aarch64.tar.gz", "fileName": "cmake-3.27.0-rc4-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0-rc4/cmake-3.27.0-rc4-linux-x86_64.tar.gz", "fileName": "cmake-3.27.0-rc4-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0-rc4/cmake-3.27.0-rc4-macos-universal.tar.gz", "fileName": "cmake-3.27.0-rc4-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0-rc4/cmake-3.27.0-rc4-windows-x86_64.zip", "fileName": "cmake-3.27.0-rc4-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.27.0-rc3": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0-rc3/cmake-3.27.0-rc3-linux-aarch64.tar.gz", "fileName": "cmake-3.27.0-rc3-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0-rc3/cmake-3.27.0-rc3-linux-x86_64.tar.gz", "fileName": "cmake-3.27.0-rc3-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0-rc3/cmake-3.27.0-rc3-macos-universal.tar.gz", "fileName": "cmake-3.27.0-rc3-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0-rc3/cmake-3.27.0-rc3-windows-x86_64.zip", "fileName": "cmake-3.27.0-rc3-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.27.0-rc2": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0-rc2/cmake-3.27.0-rc2-linux-aarch64.tar.gz", "fileName": "cmake-3.27.0-rc2-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0-rc2/cmake-3.27.0-rc2-linux-x86_64.tar.gz", "fileName": "cmake-3.27.0-rc2-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0-rc2/cmake-3.27.0-rc2-macos-universal.tar.gz", "fileName": "cmake-3.27.0-rc2-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0-rc2/cmake-3.27.0-rc2-windows-x86_64.zip", "fileName": "cmake-3.27.0-rc2-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.27.0-rc1": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0-rc1/cmake-3.27.0-rc1-linux-aarch64.tar.gz", "fileName": "cmake-3.27.0-rc1-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0-rc1/cmake-3.27.0-rc1-linux-x86_64.tar.gz", "fileName": "cmake-3.27.0-rc1-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0-rc1/cmake-3.27.0-rc1-macos-universal.tar.gz", "fileName": "cmake-3.27.0-rc1-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.27.0-rc1/cmake-3.27.0-rc1-windows-x86_64.zip", "fileName": "cmake-3.27.0-rc1-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.26.4": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.4/cmake-3.26.4-linux-aarch64.tar.gz", "fileName": "cmake-3.26.4-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.4/cmake-3.26.4-linux-x86_64.tar.gz", "fileName": "cmake-3.26.4-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.4/cmake-3.26.4-macos-universal.tar.gz", "fileName": "cmake-3.26.4-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.4/cmake-3.26.4-windows-x86_64.zip", "fileName": "cmake-3.26.4-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.26.3": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.3/cmake-3.26.3-linux-aarch64.tar.gz", "fileName": "cmake-3.26.3-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.3/cmake-3.26.3-linux-x86_64.tar.gz", "fileName": "cmake-3.26.3-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.3/cmake-3.26.3-macos-universal.tar.gz", "fileName": "cmake-3.26.3-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.3/cmake-3.26.3-windows-x86_64.zip", "fileName": "cmake-3.26.3-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.26.2": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.2/cmake-3.26.2-linux-aarch64.tar.gz", "fileName": "cmake-3.26.2-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.2/cmake-3.26.2-linux-x86_64.tar.gz", "fileName": "cmake-3.26.2-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.2/cmake-3.26.2-macos-universal.tar.gz", "fileName": "cmake-3.26.2-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.2/cmake-3.26.2-windows-x86_64.zip", "fileName": "cmake-3.26.2-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.26.1": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.1/cmake-3.26.1-linux-aarch64.tar.gz", "fileName": "cmake-3.26.1-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.1/cmake-3.26.1-linux-x86_64.tar.gz", "fileName": "cmake-3.26.1-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.1/cmake-3.26.1-macos-universal.tar.gz", "fileName": "cmake-3.26.1-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.1/cmake-3.26.1-windows-x86_64.zip", "fileName": "cmake-3.26.1-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.26.0": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0/cmake-3.26.0-linux-aarch64.tar.gz", "fileName": "cmake-3.26.0-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0/cmake-3.26.0-linux-x86_64.tar.gz", "fileName": "cmake-3.26.0-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0/cmake-3.26.0-macos-universal.tar.gz", "fileName": "cmake-3.26.0-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0/cmake-3.26.0-windows-x86_64.zip", "fileName": "cmake-3.26.0-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.26.0-rc6": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc6/cmake-3.26.0-rc6-linux-aarch64.tar.gz", "fileName": "cmake-3.26.0-rc6-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc6/cmake-3.26.0-rc6-linux-x86_64.tar.gz", "fileName": "cmake-3.26.0-rc6-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc6/cmake-3.26.0-rc6-macos-universal.tar.gz", "fileName": "cmake-3.26.0-rc6-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc6/cmake-3.26.0-rc6-windows-x86_64.zip", "fileName": "cmake-3.26.0-rc6-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.25.3": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.3/cmake-3.25.3-linux-aarch64.tar.gz", "fileName": "cmake-3.25.3-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.3/cmake-3.25.3-linux-x86_64.tar.gz", "fileName": "cmake-3.25.3-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.3/cmake-3.25.3-macos-universal.tar.gz", "fileName": "cmake-3.25.3-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.3/cmake-3.25.3-windows-x86_64.zip", "fileName": "cmake-3.25.3-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.24.4": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.4/cmake-3.24.4-linux-aarch64.tar.gz", "fileName": "cmake-3.24.4-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.4/cmake-3.24.4-linux-x86_64.tar.gz", "fileName": "cmake-3.24.4-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.4/cmake-3.24.4-macos-universal.tar.gz", "fileName": "cmake-3.24.4-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.4/cmake-3.24.4-windows-x86_64.zip", "fileName": "cmake-3.24.4-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.26.0-rc5": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc5/cmake-3.26.0-rc5-linux-aarch64.tar.gz", "fileName": "cmake-3.26.0-rc5-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc5/cmake-3.26.0-rc5-linux-x86_64.tar.gz", "fileName": "cmake-3.26.0-rc5-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc5/cmake-3.26.0-rc5-macos-universal.tar.gz", "fileName": "cmake-3.26.0-rc5-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc5/cmake-3.26.0-rc5-windows-x86_64.zip", "fileName": "cmake-3.26.0-rc5-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.26.0-rc4": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc4/cmake-3.26.0-rc4-linux-aarch64.tar.gz", "fileName": "cmake-3.26.0-rc4-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc4/cmake-3.26.0-rc4-linux-x86_64.tar.gz", "fileName": "cmake-3.26.0-rc4-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc4/cmake-3.26.0-rc4-macos-universal.tar.gz", "fileName": "cmake-3.26.0-rc4-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc4/cmake-3.26.0-rc4-windows-x86_64.zip", "fileName": "cmake-3.26.0-rc4-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.26.0-rc3": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc3/cmake-3.26.0-rc3-linux-aarch64.tar.gz", "fileName": "cmake-3.26.0-rc3-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc3/cmake-3.26.0-rc3-linux-x86_64.tar.gz", "fileName": "cmake-3.26.0-rc3-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc3/cmake-3.26.0-rc3-macos-universal.tar.gz", "fileName": "cmake-3.26.0-rc3-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc3/cmake-3.26.0-rc3-windows-x86_64.zip", "fileName": "cmake-3.26.0-rc3-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.26.0-rc2": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc2/cmake-3.26.0-rc2-linux-aarch64.tar.gz", "fileName": "cmake-3.26.0-rc2-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc2/cmake-3.26.0-rc2-linux-x86_64.tar.gz", "fileName": "cmake-3.26.0-rc2-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc2/cmake-3.26.0-rc2-macos-universal.tar.gz", "fileName": "cmake-3.26.0-rc2-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc2/cmake-3.26.0-rc2-windows-x86_64.zip", "fileName": "cmake-3.26.0-rc2-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.26.0-rc1": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc1/cmake-3.26.0-rc1-linux-aarch64.tar.gz", "fileName": "cmake-3.26.0-rc1-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc1/cmake-3.26.0-rc1-linux-x86_64.tar.gz", "fileName": "cmake-3.26.0-rc1-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc1/cmake-3.26.0-rc1-macos-universal.tar.gz", "fileName": "cmake-3.26.0-rc1-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.26.0-rc1/cmake-3.26.0-rc1-windows-x86_64.zip", "fileName": "cmake-3.26.0-rc1-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.25.2": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.2/cmake-3.25.2-linux-aarch64.tar.gz", "fileName": "cmake-3.25.2-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.2/cmake-3.25.2-linux-x86_64.tar.gz", "fileName": "cmake-3.25.2-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.2/cmake-3.25.2-macos-universal.tar.gz", "fileName": "cmake-3.25.2-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.2/cmake-3.25.2-windows-x86_64.zip", "fileName": "cmake-3.25.2-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.25.1": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.1/cmake-3.25.1-linux-aarch64.tar.gz", "fileName": "cmake-3.25.1-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.1/cmake-3.25.1-linux-x86_64.tar.gz", "fileName": "cmake-3.25.1-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.1/cmake-3.25.1-macos-universal.tar.gz", "fileName": "cmake-3.25.1-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.1/cmake-3.25.1-windows-x86_64.zip", "fileName": "cmake-3.25.1-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.25.0": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0/cmake-3.25.0-linux-aarch64.tar.gz", "fileName": "cmake-3.25.0-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0/cmake-3.25.0-linux-x86_64.tar.gz", "fileName": "cmake-3.25.0-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0/cmake-3.25.0-macos-universal.tar.gz", "fileName": "cmake-3.25.0-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0/cmake-3.25.0-windows-x86_64.zip", "fileName": "cmake-3.25.0-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.25.0-rc4": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0-rc4/cmake-3.25.0-rc4-linux-aarch64.tar.gz", "fileName": "cmake-3.25.0-rc4-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0-rc4/cmake-3.25.0-rc4-linux-x86_64.tar.gz", "fileName": "cmake-3.25.0-rc4-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0-rc4/cmake-3.25.0-rc4-macos-universal.tar.gz", "fileName": "cmake-3.25.0-rc4-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0-rc4/cmake-3.25.0-rc4-windows-x86_64.zip", "fileName": "cmake-3.25.0-rc4-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.25.0-rc3": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0-rc3/cmake-3.25.0-rc3-linux-aarch64.tar.gz", "fileName": "cmake-3.25.0-rc3-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0-rc3/cmake-3.25.0-rc3-linux-x86_64.tar.gz", "fileName": "cmake-3.25.0-rc3-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0-rc3/cmake-3.25.0-rc3-macos-universal.tar.gz", "fileName": "cmake-3.25.0-rc3-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0-rc3/cmake-3.25.0-rc3-windows-x86_64.zip", "fileName": "cmake-3.25.0-rc3-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.24.3": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.3/cmake-3.24.3-linux-aarch64.tar.gz", "fileName": "cmake-3.24.3-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.3/cmake-3.24.3-linux-x86_64.tar.gz", "fileName": "cmake-3.24.3-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.3/cmake-3.24.3-macos-universal.tar.gz", "fileName": "cmake-3.24.3-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.3/cmake-3.24.3-windows-x86_64.zip", "fileName": "cmake-3.24.3-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.23.5": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.5/cmake-3.23.5-linux-aarch64.tar.gz", "fileName": "cmake-3.23.5-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.5/cmake-3.23.5-linux-x86_64.tar.gz", "fileName": "cmake-3.23.5-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.5/cmake-3.23.5-macos-universal.tar.gz", "fileName": "cmake-3.23.5-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.5/cmake-3.23.5-windows-x86_64.zip", "fileName": "cmake-3.23.5-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.25.0-rc2": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0-rc2/cmake-3.25.0-rc2-linux-aarch64.tar.gz", "fileName": "cmake-3.25.0-rc2-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0-rc2/cmake-3.25.0-rc2-linux-x86_64.tar.gz", "fileName": "cmake-3.25.0-rc2-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0-rc2/cmake-3.25.0-rc2-macos-universal.tar.gz", "fileName": "cmake-3.25.0-rc2-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0-rc2/cmake-3.25.0-rc2-windows-x86_64.zip", "fileName": "cmake-3.25.0-rc2-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.25.0-rc1": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0-rc1/cmake-3.25.0-rc1-linux-aarch64.tar.gz", "fileName": "cmake-3.25.0-rc1-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0-rc1/cmake-3.25.0-rc1-linux-x86_64.tar.gz", "fileName": "cmake-3.25.0-rc1-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0-rc1/cmake-3.25.0-rc1-macos-universal.tar.gz", "fileName": "cmake-3.25.0-rc1-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.25.0-rc1/cmake-3.25.0-rc1-windows-x86_64.zip", "fileName": "cmake-3.25.0-rc1-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.23.4": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.4/cmake-3.23.4-linux-aarch64.tar.gz", "fileName": "cmake-3.23.4-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.4/cmake-3.23.4-linux-x86_64.tar.gz", "fileName": "cmake-3.23.4-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.4/cmake-3.23.4-macos-universal.tar.gz", "fileName": "cmake-3.23.4-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.4/cmake-3.23.4-windows-x86_64.zip", "fileName": "cmake-3.23.4-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.24.2": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-linux-aarch64.tar.gz", "fileName": "cmake-3.24.2-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-linux-x86_64.tar.gz", "fileName": "cmake-3.24.2-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-macos-universal.tar.gz", "fileName": "cmake-3.24.2-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-windows-x86_64.zip", "fileName": "cmake-3.24.2-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.24.1": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.1/cmake-3.24.1-linux-aarch64.tar.gz", "fileName": "cmake-3.24.1-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.1/cmake-3.24.1-linux-x86_64.tar.gz", "fileName": "cmake-3.24.1-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.1/cmake-3.24.1-macos-universal.tar.gz", "fileName": "cmake-3.24.1-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.1/cmake-3.24.1-windows-x86_64.zip", "fileName": "cmake-3.24.1-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.24.0": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0/cmake-3.24.0-linux-aarch64.tar.gz", "fileName": "cmake-3.24.0-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0/cmake-3.24.0-linux-x86_64.tar.gz", "fileName": "cmake-3.24.0-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0/cmake-3.24.0-macos-universal.tar.gz", "fileName": "cmake-3.24.0-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0/cmake-3.24.0-windows-x86_64.zip", "fileName": "cmake-3.24.0-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.24.0-rc5": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc5/cmake-3.24.0-rc5-linux-aarch64.tar.gz", "fileName": "cmake-3.24.0-rc5-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc5/cmake-3.24.0-rc5-linux-x86_64.tar.gz", "fileName": "cmake-3.24.0-rc5-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc5/cmake-3.24.0-rc5-macos-universal.tar.gz", "fileName": "cmake-3.24.0-rc5-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc5/cmake-3.24.0-rc5-windows-x86_64.zip", "fileName": "cmake-3.24.0-rc5-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.23.3": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.3/cmake-3.23.3-linux-aarch64.tar.gz", "fileName": "cmake-3.23.3-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.3/cmake-3.23.3-linux-x86_64.tar.gz", "fileName": "cmake-3.23.3-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.3/cmake-3.23.3-macos-universal.tar.gz", "fileName": "cmake-3.23.3-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.3/cmake-3.23.3-windows-x86_64.zip", "fileName": "cmake-3.23.3-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.22.6": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.6/cmake-3.22.6-linux-aarch64.tar.gz", "fileName": "cmake-3.22.6-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.6/cmake-3.22.6-linux-x86_64.tar.gz", "fileName": "cmake-3.22.6-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.6/cmake-3.22.6-macos-universal.tar.gz", "fileName": "cmake-3.22.6-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.6/cmake-3.22.6-windows-x86_64.zip", "fileName": "cmake-3.22.6-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.24.0-rc4": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc4/cmake-3.24.0-rc4-linux-aarch64.tar.gz", "fileName": "cmake-3.24.0-rc4-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc4/cmake-3.24.0-rc4-linux-x86_64.tar.gz", "fileName": "cmake-3.24.0-rc4-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc4/cmake-3.24.0-rc4-macos-universal.tar.gz", "fileName": "cmake-3.24.0-rc4-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc4/cmake-3.24.0-rc4-windows-x86_64.zip", "fileName": "cmake-3.24.0-rc4-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.24.0-rc3": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc3/cmake-3.24.0-rc3-linux-aarch64.tar.gz", "fileName": "cmake-3.24.0-rc3-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc3/cmake-3.24.0-rc3-linux-x86_64.tar.gz", "fileName": "cmake-3.24.0-rc3-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc3/cmake-3.24.0-rc3-macos-universal.tar.gz", "fileName": "cmake-3.24.0-rc3-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc3/cmake-3.24.0-rc3-windows-x86_64.zip", "fileName": "cmake-3.24.0-rc3-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.24.0-rc2": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc2/cmake-3.24.0-rc2-linux-aarch64.tar.gz", "fileName": "cmake-3.24.0-rc2-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc2/cmake-3.24.0-rc2-linux-x86_64.tar.gz", "fileName": "cmake-3.24.0-rc2-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc2/cmake-3.24.0-rc2-macos-universal.tar.gz", "fileName": "cmake-3.24.0-rc2-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc2/cmake-3.24.0-rc2-windows-x86_64.zip", "fileName": "cmake-3.24.0-rc2-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.24.0-rc1": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc1/cmake-3.24.0-rc1-linux-aarch64.tar.gz", "fileName": "cmake-3.24.0-rc1-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc1/cmake-3.24.0-rc1-linux-x86_64.tar.gz", "fileName": "cmake-3.24.0-rc1-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc1/cmake-3.24.0-rc1-macos-universal.tar.gz", "fileName": "cmake-3.24.0-rc1-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.24.0-rc1/cmake-3.24.0-rc1-windows-x86_64.zip", "fileName": "cmake-3.24.0-rc1-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.22.5": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.5/cmake-3.22.5-linux-aarch64.tar.gz", "fileName": "cmake-3.22.5-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.5/cmake-3.22.5-linux-x86_64.tar.gz", "fileName": "cmake-3.22.5-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.5/cmake-3.22.5-macos-universal.tar.gz", "fileName": "cmake-3.22.5-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.5/cmake-3.22.5-windows-x86_64.zip", "fileName": "cmake-3.22.5-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.21.7": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.7/cmake-3.21.7-linux-aarch64.tar.gz", "fileName": "cmake-3.21.7-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.7/cmake-3.21.7-linux-x86_64.tar.gz", "fileName": "cmake-3.21.7-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.7/cmake-3.21.7-macos-universal.tar.gz", "fileName": "cmake-3.21.7-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.7/cmake-3.21.7-windows-x86_64.zip", "fileName": "cmake-3.21.7-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.23.2": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.2/cmake-3.23.2-linux-aarch64.tar.gz", "fileName": "cmake-3.23.2-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.2/cmake-3.23.2-linux-x86_64.tar.gz", "fileName": "cmake-3.23.2-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.2/cmake-3.23.2-macos-universal.tar.gz", "fileName": "cmake-3.23.2-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.2/cmake-3.23.2-windows-x86_64.zip", "fileName": "cmake-3.23.2-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.23.1": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.1/cmake-3.23.1-linux-aarch64.tar.gz", "fileName": "cmake-3.23.1-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.1/cmake-3.23.1-linux-x86_64.tar.gz", "fileName": "cmake-3.23.1-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.1/cmake-3.23.1-macos-universal.tar.gz", "fileName": "cmake-3.23.1-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.1/cmake-3.23.1-windows-x86_64.zip", "fileName": "cmake-3.23.1-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.22.4": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.4/cmake-3.22.4-linux-aarch64.tar.gz", "fileName": "cmake-3.22.4-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.4/cmake-3.22.4-linux-x86_64.tar.gz", "fileName": "cmake-3.22.4-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.4/cmake-3.22.4-macos-universal.tar.gz", "fileName": "cmake-3.22.4-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.4/cmake-3.22.4-windows-x86_64.zip", "fileName": "cmake-3.22.4-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.23.0": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0/cmake-3.23.0-linux-aarch64.tar.gz", "fileName": "cmake-3.23.0-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0/cmake-3.23.0-linux-x86_64.tar.gz", "fileName": "cmake-3.23.0-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0/cmake-3.23.0-macos-universal.tar.gz", "fileName": "cmake-3.23.0-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0/cmake-3.23.0-windows-x86_64.zip", "fileName": "cmake-3.23.0-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.23.0-rc5": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc5/cmake-3.23.0-rc5-linux-aarch64.tar.gz", "fileName": "cmake-3.23.0-rc5-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc5/cmake-3.23.0-rc5-linux-x86_64.tar.gz", "fileName": "cmake-3.23.0-rc5-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc5/cmake-3.23.0-rc5-macos-universal.tar.gz", "fileName": "cmake-3.23.0-rc5-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc5/cmake-3.23.0-rc5-windows-x86_64.zip", "fileName": "cmake-3.23.0-rc5-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.23.0-rc4": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc4/cmake-3.23.0-rc4-linux-aarch64.tar.gz", "fileName": "cmake-3.23.0-rc4-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc4/cmake-3.23.0-rc4-linux-x86_64.tar.gz", "fileName": "cmake-3.23.0-rc4-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc4/cmake-3.23.0-rc4-macos-universal.tar.gz", "fileName": "cmake-3.23.0-rc4-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc4/cmake-3.23.0-rc4-windows-x86_64.zip", "fileName": "cmake-3.23.0-rc4-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.23.0-rc3": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc3/cmake-3.23.0-rc3-linux-aarch64.tar.gz", "fileName": "cmake-3.23.0-rc3-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc3/cmake-3.23.0-rc3-linux-x86_64.tar.gz", "fileName": "cmake-3.23.0-rc3-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc3/cmake-3.23.0-rc3-macos-universal.tar.gz", "fileName": "cmake-3.23.0-rc3-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc3/cmake-3.23.0-rc3-windows-x86_64.zip", "fileName": "cmake-3.23.0-rc3-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.22.3": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.3/cmake-3.22.3-linux-aarch64.tar.gz", "fileName": "cmake-3.22.3-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.3/cmake-3.22.3-linux-x86_64.tar.gz", "fileName": "cmake-3.22.3-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.3/cmake-3.22.3-macos-universal.tar.gz", "fileName": "cmake-3.22.3-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.3/cmake-3.22.3-windows-x86_64.zip", "fileName": "cmake-3.22.3-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.21.6": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.6/cmake-3.21.6-linux-aarch64.tar.gz", "fileName": "cmake-3.21.6-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.6/cmake-3.21.6-linux-x86_64.tar.gz", "fileName": "cmake-3.21.6-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.6/cmake-3.21.6-macos-universal.tar.gz", "fileName": "cmake-3.21.6-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.6/cmake-3.21.6-windows-x86_64.zip", "fileName": "cmake-3.21.6-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.23.0-rc2": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc2/cmake-3.23.0-rc2-linux-aarch64.tar.gz", "fileName": "cmake-3.23.0-rc2-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc2/cmake-3.23.0-rc2-linux-x86_64.tar.gz", "fileName": "cmake-3.23.0-rc2-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc2/cmake-3.23.0-rc2-macos-universal.tar.gz", "fileName": "cmake-3.23.0-rc2-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc2/cmake-3.23.0-rc2-windows-x86_64.zip", "fileName": "cmake-3.23.0-rc2-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.23.0-rc1": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc1/cmake-3.23.0-rc1-linux-aarch64.tar.gz", "fileName": "cmake-3.23.0-rc1-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc1/cmake-3.23.0-rc1-linux-x86_64.tar.gz", "fileName": "cmake-3.23.0-rc1-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc1/cmake-3.23.0-rc1-macos-universal.tar.gz", "fileName": "cmake-3.23.0-rc1-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.23.0-rc1/cmake-3.23.0-rc1-windows-x86_64.zip", "fileName": "cmake-3.23.0-rc1-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.21.5": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.5/cmake-3.21.5-linux-aarch64.tar.gz", "fileName": "cmake-3.21.5-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.5/cmake-3.21.5-linux-x86_64.tar.gz", "fileName": "cmake-3.21.5-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.5/cmake-3.21.5-macos-universal.tar.gz", "fileName": "cmake-3.21.5-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.5/cmake-3.21.5-windows-x86_64.zip", "fileName": "cmake-3.21.5-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.22.2": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.2/cmake-3.22.2-linux-aarch64.tar.gz", "fileName": "cmake-3.22.2-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.2/cmake-3.22.2-linux-x86_64.tar.gz", "fileName": "cmake-3.22.2-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.2/cmake-3.22.2-macos-universal.tar.gz", "fileName": "cmake-3.22.2-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.2/cmake-3.22.2-windows-x86_64.zip", "fileName": "cmake-3.22.2-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.22.1": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.1/cmake-3.22.1-linux-aarch64.tar.gz", "fileName": "cmake-3.22.1-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.1/cmake-3.22.1-linux-x86_64.tar.gz", "fileName": "cmake-3.22.1-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.1/cmake-3.22.1-macos-universal.tar.gz", "fileName": "cmake-3.22.1-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.1/cmake-3.22.1-windows-x86_64.zip", "fileName": "cmake-3.22.1-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.22.0": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.0/cmake-3.22.0-linux-aarch64.tar.gz", "fileName": "cmake-3.22.0-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.0/cmake-3.22.0-linux-x86_64.tar.gz", "fileName": "cmake-3.22.0-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.0/cmake-3.22.0-macos-universal.tar.gz", "fileName": "cmake-3.22.0-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.0/cmake-3.22.0-windows-x86_64.zip", "fileName": "cmake-3.22.0-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.22.0-rc3": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.0-rc3/cmake-3.22.0-rc3-linux-aarch64.tar.gz", "fileName": "cmake-3.22.0-rc3-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.0-rc3/cmake-3.22.0-rc3-linux-x86_64.tar.gz", "fileName": "cmake-3.22.0-rc3-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.0-rc3/cmake-3.22.0-rc3-macos-universal.tar.gz", "fileName": "cmake-3.22.0-rc3-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.0-rc3/cmake-3.22.0-rc3-windows-x86_64.zip", "fileName": "cmake-3.22.0-rc3-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.22.0-rc2": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.0-rc2/cmake-3.22.0-rc2-linux-aarch64.tar.gz", "fileName": "cmake-3.22.0-rc2-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.0-rc2/cmake-3.22.0-rc2-linux-x86_64.tar.gz", "fileName": "cmake-3.22.0-rc2-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.0-rc2/cmake-3.22.0-rc2-macos-universal.tar.gz", "fileName": "cmake-3.22.0-rc2-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.0-rc2/cmake-3.22.0-rc2-windows-x86_64.zip", "fileName": "cmake-3.22.0-rc2-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.21.4": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.4/cmake-3.21.4-linux-aarch64.tar.gz", "fileName": "cmake-3.21.4-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.4/cmake-3.21.4-linux-x86_64.tar.gz", "fileName": "cmake-3.21.4-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.4/cmake-3.21.4-macos-universal.tar.gz", "fileName": "cmake-3.21.4-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.4/cmake-3.21.4-windows-x86_64.zip", "fileName": "cmake-3.21.4-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.22.0-rc1": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.0-rc1/cmake-3.22.0-rc1-linux-aarch64.tar.gz", "fileName": "cmake-3.22.0-rc1-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.0-rc1/cmake-3.22.0-rc1-linux-x86_64.tar.gz", "fileName": "cmake-3.22.0-rc1-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.0-rc1/cmake-3.22.0-rc1-macos-universal.tar.gz", "fileName": "cmake-3.22.0-rc1-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.22.0-rc1/cmake-3.22.0-rc1-windows-x86_64.zip", "fileName": "cmake-3.22.0-rc1-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.21.3": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.3/cmake-3.21.3-linux-aarch64.tar.gz", "fileName": "cmake-3.21.3-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.3/cmake-3.21.3-linux-x86_64.tar.gz", "fileName": "cmake-3.21.3-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.3/cmake-3.21.3-macos-universal.tar.gz", "fileName": "cmake-3.21.3-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.3/cmake-3.21.3-windows-x86_64.zip", "fileName": "cmake-3.21.3-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.20.6": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.6/cmake-3.20.6-linux-aarch64.tar.gz", "fileName": "cmake-3.20.6-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.6/cmake-3.20.6-linux-x86_64.tar.gz", "fileName": "cmake-3.20.6-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.6/cmake-3.20.6-macos-universal.tar.gz", "fileName": "cmake-3.20.6-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.6/cmake-3.20.6-windows-x86_64.zip", "fileName": "cmake-3.20.6-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.21.2": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.2/cmake-3.21.2-linux-aarch64.tar.gz", "fileName": "cmake-3.21.2-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.2/cmake-3.21.2-linux-x86_64.tar.gz", "fileName": "cmake-3.21.2-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.2/cmake-3.21.2-macos-universal.tar.gz", "fileName": "cmake-3.21.2-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.2/cmake-3.21.2-windows-x86_64.zip", "fileName": "cmake-3.21.2-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.21.1": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.1/cmake-3.21.1-linux-aarch64.tar.gz", "fileName": "cmake-3.21.1-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.1/cmake-3.21.1-linux-x86_64.tar.gz", "fileName": "cmake-3.21.1-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.1/cmake-3.21.1-macos-universal.tar.gz", "fileName": "cmake-3.21.1-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.1/cmake-3.21.1-windows-x86_64.zip", "fileName": "cmake-3.21.1-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.21.0": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.0/cmake-3.21.0-linux-aarch64.tar.gz", "fileName": "cmake-3.21.0-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.0/cmake-3.21.0-linux-x86_64.tar.gz", "fileName": "cmake-3.21.0-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.0/cmake-3.21.0-macos-universal.tar.gz", "fileName": "cmake-3.21.0-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.0/cmake-3.21.0-windows-x86_64.zip", "fileName": "cmake-3.21.0-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.21.0-rc3": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.0-rc3/cmake-3.21.0-rc3-linux-aarch64.tar.gz", "fileName": "cmake-3.21.0-rc3-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.0-rc3/cmake-3.21.0-rc3-linux-x86_64.tar.gz", "fileName": "cmake-3.21.0-rc3-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.0-rc3/cmake-3.21.0-rc3-macos-universal.tar.gz", "fileName": "cmake-3.21.0-rc3-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.0-rc3/cmake-3.21.0-rc3-windows-x86_64.zip", "fileName": "cmake-3.21.0-rc3-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.21.0-rc2": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.0-rc2/cmake-3.21.0-rc2-linux-aarch64.tar.gz", "fileName": "cmake-3.21.0-rc2-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.0-rc2/cmake-3.21.0-rc2-linux-x86_64.tar.gz", "fileName": "cmake-3.21.0-rc2-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.0-rc2/cmake-3.21.0-rc2-macos-universal.tar.gz", "fileName": "cmake-3.21.0-rc2-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.0-rc2/cmake-3.21.0-rc2-windows-x86_64.zip", "fileName": "cmake-3.21.0-rc2-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.21.0-rc1": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.0-rc1/cmake-3.21.0-rc1-linux-aarch64.tar.gz", "fileName": "cmake-3.21.0-rc1-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.0-rc1/cmake-3.21.0-rc1-linux-x86_64.tar.gz", "fileName": "cmake-3.21.0-rc1-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.0-rc1/cmake-3.21.0-rc1-macos-universal.tar.gz", "fileName": "cmake-3.21.0-rc1-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.21.0-rc1/cmake-3.21.0-rc1-windows-x86_64.zip", "fileName": "cmake-3.21.0-rc1-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.20.5": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.5/cmake-3.20.5-linux-aarch64.tar.gz", "fileName": "cmake-3.20.5-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.5/cmake-3.20.5-linux-x86_64.tar.gz", "fileName": "cmake-3.20.5-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.5/cmake-3.20.5-macos-universal.tar.gz", "fileName": "cmake-3.20.5-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.5/cmake-3.20.5-windows-x86_64.zip", "fileName": "cmake-3.20.5-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.20.4": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.4/cmake-3.20.4-linux-aarch64.tar.gz", "fileName": "cmake-3.20.4-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.4/cmake-3.20.4-linux-x86_64.tar.gz", "fileName": "cmake-3.20.4-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.4/cmake-3.20.4-macos-universal.tar.gz", "fileName": "cmake-3.20.4-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.4/cmake-3.20.4-windows-x86_64.zip", "fileName": "cmake-3.20.4-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.20.3": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.3/cmake-3.20.3-linux-aarch64.tar.gz", "fileName": "cmake-3.20.3-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.3/cmake-3.20.3-linux-x86_64.tar.gz", "fileName": "cmake-3.20.3-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.3/cmake-3.20.3-macos-universal.tar.gz", "fileName": "cmake-3.20.3-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.3/cmake-3.20.3-windows-x86_64.zip", "fileName": "cmake-3.20.3-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.20.2": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.2/cmake-3.20.2-linux-aarch64.tar.gz", "fileName": "cmake-3.20.2-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.2/cmake-3.20.2-linux-x86_64.tar.gz", "fileName": "cmake-3.20.2-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.2/cmake-3.20.2-macos-universal.tar.gz", "fileName": "cmake-3.20.2-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.2/cmake-3.20.2-windows-x86_64.zip", "fileName": "cmake-3.20.2-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.20.1": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.1/cmake-3.20.1-linux-aarch64.tar.gz", "fileName": "cmake-3.20.1-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.1/cmake-3.20.1-linux-x86_64.tar.gz", "fileName": "cmake-3.20.1-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.1/cmake-3.20.1-macos-universal.tar.gz", "fileName": "cmake-3.20.1-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.1/cmake-3.20.1-windows-x86_64.zip", "fileName": "cmake-3.20.1-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.19.8": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.8/cmake-3.19.8-Linux-aarch64.tar.gz", "fileName": "cmake-3.19.8-Linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.8/cmake-3.19.8-Linux-x86_64.tar.gz", "fileName": "cmake-3.19.8-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.8/cmake-3.19.8-macos-universal.tar.gz", "fileName": "cmake-3.19.8-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.8/cmake-3.19.8-win64-x64.zip", "fileName": "cmake-3.19.8-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.20.0": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0/cmake-3.20.0-linux-aarch64.tar.gz", "fileName": "cmake-3.20.0-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0/cmake-3.20.0-linux-x86_64.tar.gz", "fileName": "cmake-3.20.0-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0/cmake-3.20.0-macos-universal.tar.gz", "fileName": "cmake-3.20.0-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0/cmake-3.20.0-windows-x86_64.zip", "fileName": "cmake-3.20.0-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.20.0-rc5": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc5/cmake-3.20.0-rc5-linux-aarch64.tar.gz", "fileName": "cmake-3.20.0-rc5-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc5/cmake-3.20.0-rc5-linux-x86_64.tar.gz", "fileName": "cmake-3.20.0-rc5-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc5/cmake-3.20.0-rc5-macos-universal.tar.gz", "fileName": "cmake-3.20.0-rc5-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc5/cmake-3.20.0-rc5-windows-x86_64.zip", "fileName": "cmake-3.20.0-rc5-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.19.7": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.7/cmake-3.19.7-Linux-aarch64.tar.gz", "fileName": "cmake-3.19.7-Linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.7/cmake-3.19.7-Linux-x86_64.tar.gz", "fileName": "cmake-3.19.7-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.7/cmake-3.19.7-macos-universal.tar.gz", "fileName": "cmake-3.19.7-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.7/cmake-3.19.7-win64-x64.zip", "fileName": "cmake-3.19.7-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.20.0-rc4": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc4/cmake-3.20.0-rc4-linux-aarch64.tar.gz", "fileName": "cmake-3.20.0-rc4-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc4/cmake-3.20.0-rc4-linux-x86_64.tar.gz", "fileName": "cmake-3.20.0-rc4-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc4/cmake-3.20.0-rc4-macos-universal.tar.gz", "fileName": "cmake-3.20.0-rc4-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc4/cmake-3.20.0-rc4-windows-x86_64.zip", "fileName": "cmake-3.20.0-rc4-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.20.0-rc3": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc3/cmake-3.20.0-rc3-linux-aarch64.tar.gz", "fileName": "cmake-3.20.0-rc3-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc3/cmake-3.20.0-rc3-linux-x86_64.tar.gz", "fileName": "cmake-3.20.0-rc3-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc3/cmake-3.20.0-rc3-macos-universal.tar.gz", "fileName": "cmake-3.20.0-rc3-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc3/cmake-3.20.0-rc3-windows-x86_64.zip", "fileName": "cmake-3.20.0-rc3-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.20.0-rc2": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc2/cmake-3.20.0-rc2-linux-aarch64.tar.gz", "fileName": "cmake-3.20.0-rc2-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc2/cmake-3.20.0-rc2-linux-x86_64.tar.gz", "fileName": "cmake-3.20.0-rc2-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc2/cmake-3.20.0-rc2-macos-universal.tar.gz", "fileName": "cmake-3.20.0-rc2-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc2/cmake-3.20.0-rc2-windows-x86_64.zip", "fileName": "cmake-3.20.0-rc2-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.19.6": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.6/cmake-3.19.6-Linux-aarch64.tar.gz", "fileName": "cmake-3.19.6-Linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.6/cmake-3.19.6-Linux-x86_64.tar.gz", "fileName": "cmake-3.19.6-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.6/cmake-3.19.6-macos-universal.tar.gz", "fileName": "cmake-3.19.6-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.6/cmake-3.19.6-win64-x64.zip", "fileName": "cmake-3.19.6-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.20.0-rc1": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc1/cmake-3.20.0-rc1-linux-aarch64.tar.gz", "fileName": "cmake-3.20.0-rc1-linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc1/cmake-3.20.0-rc1-linux-x86_64.tar.gz", "fileName": "cmake-3.20.0-rc1-linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc1/cmake-3.20.0-rc1-macos-universal.tar.gz", "fileName": "cmake-3.20.0-rc1-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.20.0-rc1/cmake-3.20.0-rc1-windows-x86_64.zip", "fileName": "cmake-3.20.0-rc1-windows-x86_64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.19.5": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.5/cmake-3.19.5-Linux-aarch64.tar.gz", "fileName": "cmake-3.19.5-Linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.5/cmake-3.19.5-Linux-x86_64.tar.gz", "fileName": "cmake-3.19.5-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.5/cmake-3.19.5-macos-universal.tar.gz", "fileName": "cmake-3.19.5-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.5/cmake-3.19.5-win64-x64.zip", "fileName": "cmake-3.19.5-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.18.6": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.6/cmake-3.18.6-Darwin-x86_64.tar.gz", "fileName": "cmake-3.18.6-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.6/cmake-3.18.6-Linux-x86_64.tar.gz", "fileName": "cmake-3.18.6-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.6/cmake-3.18.6-win64-x64.zip", "fileName": "cmake-3.18.6-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.19.4": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.4/cmake-3.19.4-Linux-aarch64.tar.gz", "fileName": "cmake-3.19.4-Linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.4/cmake-3.19.4-Linux-x86_64.tar.gz", "fileName": "cmake-3.19.4-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.4/cmake-3.19.4-macos-universal.tar.gz", "fileName": "cmake-3.19.4-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.4/cmake-3.19.4-win64-x64.zip", "fileName": "cmake-3.19.4-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.19.3": { "linux-arm64": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.3/cmake-3.19.3-Linux-aarch64.tar.gz", "fileName": "cmake-3.19.3-Linux-aarch64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.3/cmake-3.19.3-Linux-x86_64.tar.gz", "fileName": "cmake-3.19.3-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.3/cmake-3.19.3-macos-universal.tar.gz", "fileName": "cmake-3.19.3-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.3/cmake-3.19.3-win64-x64.zip", "fileName": "cmake-3.19.3-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.19.2": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.2/cmake-3.19.2-Linux-x86_64.tar.gz", "fileName": "cmake-3.19.2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.2/cmake-3.19.2-macos-universal.tar.gz", "fileName": "cmake-3.19.2-macos-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.2/cmake-3.19.2-win64-x64.zip", "fileName": "cmake-3.19.2-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.19.1": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.1/cmake-3.19.1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.19.1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.1/cmake-3.19.1-Linux-x86_64.tar.gz", "fileName": "cmake-3.19.1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.1/cmake-3.19.1-win64-x64.zip", "fileName": "cmake-3.19.1-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.19.0": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.0/cmake-3.19.0-Darwin-x86_64.tar.gz", "fileName": "cmake-3.19.0-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.0/cmake-3.19.0-Linux-x86_64.tar.gz", "fileName": "cmake-3.19.0-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.0/cmake-3.19.0-win64-x64.zip", "fileName": "cmake-3.19.0-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.18.5": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.5/cmake-3.18.5-Darwin-x86_64.tar.gz", "fileName": "cmake-3.18.5-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.5/cmake-3.18.5-Linux-x86_64.tar.gz", "fileName": "cmake-3.18.5-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.5/cmake-3.18.5-win64-x64.zip", "fileName": "cmake-3.18.5-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.19.0-rc3": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.0-rc3/cmake-3.19.0-rc3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.19.0-rc3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.0-rc3/cmake-3.19.0-rc3-Linux-x86_64.tar.gz", "fileName": "cmake-3.19.0-rc3-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.0-rc3/cmake-3.19.0-rc3-win64-x64.zip", "fileName": "cmake-3.19.0-rc3-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.19.0-rc2": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.0-rc2/cmake-3.19.0-rc2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.19.0-rc2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.0-rc2/cmake-3.19.0-rc2-Linux-x86_64.tar.gz", "fileName": "cmake-3.19.0-rc2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.0-rc2/cmake-3.19.0-rc2-win64-x64.zip", "fileName": "cmake-3.19.0-rc2-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.19.0-rc1": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.0-rc1/cmake-3.19.0-rc1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.19.0-rc1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.0-rc1/cmake-3.19.0-rc1-Linux-x86_64.tar.gz", "fileName": "cmake-3.19.0-rc1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.19.0-rc1/cmake-3.19.0-rc1-win64-x64.zip", "fileName": "cmake-3.19.0-rc1-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.18.4": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.4/cmake-3.18.4-Darwin-x86_64.tar.gz", "fileName": "cmake-3.18.4-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.4/cmake-3.18.4-Linux-x86_64.tar.gz", "fileName": "cmake-3.18.4-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.4/cmake-3.18.4-win64-x64.zip", "fileName": "cmake-3.18.4-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.18.3": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.3/cmake-3.18.3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.18.3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.3/cmake-3.18.3-Linux-x86_64.tar.gz", "fileName": "cmake-3.18.3-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.3/cmake-3.18.3-win64-x64.zip", "fileName": "cmake-3.18.3-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.17.5": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.5/cmake-3.17.5-Darwin-x86_64.tar.gz", "fileName": "cmake-3.17.5-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.5/cmake-3.17.5-Linux-x86_64.tar.gz", "fileName": "cmake-3.17.5-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.5/cmake-3.17.5-win64-x64.zip", "fileName": "cmake-3.17.5-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.16.9": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.9/cmake-3.16.9-Darwin-x86_64.tar.gz", "fileName": "cmake-3.16.9-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.9/cmake-3.16.9-Linux-x86_64.tar.gz", "fileName": "cmake-3.16.9-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.9/cmake-3.16.9-win64-x64.zip", "fileName": "cmake-3.16.9-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.18.2": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.2/cmake-3.18.2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.18.2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.2/cmake-3.18.2-Linux-x86_64.tar.gz", "fileName": "cmake-3.18.2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.2/cmake-3.18.2-win64-x64.zip", "fileName": "cmake-3.18.2-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.18.1": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.1/cmake-3.18.1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.18.1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.1/cmake-3.18.1-Linux-x86_64.tar.gz", "fileName": "cmake-3.18.1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.1/cmake-3.18.1-win64-x64.zip", "fileName": "cmake-3.18.1-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.17.4": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.4/cmake-3.17.4-Darwin-x86_64.tar.gz", "fileName": "cmake-3.17.4-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.4/cmake-3.17.4-Linux-x86_64.tar.gz", "fileName": "cmake-3.17.4-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.4/cmake-3.17.4-win64-x64.zip", "fileName": "cmake-3.17.4-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.18.0": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.0/cmake-3.18.0-Linux-x86_64.tar.gz", "fileName": "cmake-3.18.0-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.0/cmake-3.18.0-win64-x64.zip", "fileName": "cmake-3.18.0-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.0/cmake-3.18.0-Darwin-x86_64.tar.gz", "fileName": "cmake-3.18.0-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.18.0-rc4": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.0-rc4/cmake-3.18.0-rc4-win32-x86.zip", "fileName": "cmake-3.18.0-rc4-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.0-rc4/cmake-3.18.0-rc4-Darwin-x86_64.tar.gz", "fileName": "cmake-3.18.0-rc4-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.0-rc4/cmake-3.18.0-rc4-Linux-x86_64.tar.gz", "fileName": "cmake-3.18.0-rc4-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.18.0-rc3": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.0-rc3/cmake-3.18.0-rc3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.18.0-rc3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.0-rc3/cmake-3.18.0-rc3-Linux-x86_64.tar.gz", "fileName": "cmake-3.18.0-rc3-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.0-rc3/cmake-3.18.0-rc3-win64-x64.zip", "fileName": "cmake-3.18.0-rc3-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.18.0-rc2": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.0-rc2/cmake-3.18.0-rc2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.18.0-rc2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.0-rc2/cmake-3.18.0-rc2-win32-x86.zip", "fileName": "cmake-3.18.0-rc2-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.0-rc2/cmake-3.18.0-rc2-Linux-x86_64.tar.gz", "fileName": "cmake-3.18.0-rc2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.18.0-rc1": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.0-rc1/cmake-3.18.0-rc1-Linux-x86_64.tar.gz", "fileName": "cmake-3.18.0-rc1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.0-rc1/cmake-3.18.0-rc1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.18.0-rc1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.18.0-rc1/cmake-3.18.0-rc1-win64-x64.zip", "fileName": "cmake-3.18.0-rc1-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.16.8": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.8/cmake-3.16.8-Darwin-x86_64.tar.gz", "fileName": "cmake-3.16.8-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.8/cmake-3.16.8-win64-x64.zip", "fileName": "cmake-3.16.8-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.8/cmake-3.16.8-Linux-x86_64.tar.gz", "fileName": "cmake-3.16.8-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.17.3": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.3/cmake-3.17.3-Linux-x86_64.tar.gz", "fileName": "cmake-3.17.3-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.3/cmake-3.17.3-win32-x86.zip", "fileName": "cmake-3.17.3-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.3/cmake-3.17.3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.17.3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.16.7": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.7/cmake-3.16.7-win64-x64.zip", "fileName": "cmake-3.16.7-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.7/cmake-3.16.7-Linux-x86_64.tar.gz", "fileName": "cmake-3.16.7-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.7/cmake-3.16.7-Darwin-x86_64.tar.gz", "fileName": "cmake-3.16.7-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.17.2": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.2/cmake-3.17.2-Linux-x86_64.tar.gz", "fileName": "cmake-3.17.2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.2/cmake-3.17.2-win64-x64.zip", "fileName": "cmake-3.17.2-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.2/cmake-3.17.2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.17.2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.16.6": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.6/cmake-3.16.6-Linux-x86_64.tar.gz", "fileName": "cmake-3.16.6-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.6/cmake-3.16.6-win64-x64.zip", "fileName": "cmake-3.16.6-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.6/cmake-3.16.6-Darwin-x86_64.tar.gz", "fileName": "cmake-3.16.6-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.17.1": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.1/cmake-3.17.1-win64-x64.zip", "fileName": "cmake-3.17.1-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.1/cmake-3.17.1-Linux-x86_64.tar.gz", "fileName": "cmake-3.17.1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.1/cmake-3.17.1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.17.1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.17.0": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.0/cmake-3.17.0-win64-x64.zip", "fileName": "cmake-3.17.0-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.0/cmake-3.17.0-Darwin-x86_64.tar.gz", "fileName": "cmake-3.17.0-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.0/cmake-3.17.0-Linux-x86_64.tar.gz", "fileName": "cmake-3.17.0-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.17.0-rc3": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.0-rc3/cmake-3.17.0-rc3-Linux-x86_64.tar.gz", "fileName": "cmake-3.17.0-rc3-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.0-rc3/cmake-3.17.0-rc3-win64-x64.zip", "fileName": "cmake-3.17.0-rc3-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.0-rc3/cmake-3.17.0-rc3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.17.0-rc3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.16.5": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.5/cmake-3.16.5-win32-x86.zip", "fileName": "cmake-3.16.5-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.5/cmake-3.16.5-Linux-x86_64.tar.gz", "fileName": "cmake-3.16.5-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.5/cmake-3.16.5-Darwin-x86_64.tar.gz", "fileName": "cmake-3.16.5-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.17.0-rc2": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.0-rc2/cmake-3.17.0-rc2-win64-x64.zip", "fileName": "cmake-3.17.0-rc2-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.0-rc2/cmake-3.17.0-rc2-Linux-x86_64.tar.gz", "fileName": "cmake-3.17.0-rc2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.0-rc2/cmake-3.17.0-rc2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.17.0-rc2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.17.0-rc1": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.0-rc1/cmake-3.17.0-rc1-Linux-x86_64.tar.gz", "fileName": "cmake-3.17.0-rc1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.0-rc1/cmake-3.17.0-rc1-win32-x86.zip", "fileName": "cmake-3.17.0-rc1-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.17.0-rc1/cmake-3.17.0-rc1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.17.0-rc1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.16.4": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.4/cmake-3.16.4-Darwin-x86_64.tar.gz", "fileName": "cmake-3.16.4-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.4/cmake-3.16.4-win64-x64.zip", "fileName": "cmake-3.16.4-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.4/cmake-3.16.4-Linux-x86_64.tar.gz", "fileName": "cmake-3.16.4-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.15.7": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.7/cmake-3.15.7-Linux-x86_64.tar.gz", "fileName": "cmake-3.15.7-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.7/cmake-3.15.7-win32-x86.zip", "fileName": "cmake-3.15.7-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.7/cmake-3.15.7-Darwin-x86_64.tar.gz", "fileName": "cmake-3.15.7-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.16.3": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.3/cmake-3.16.3-Linux-x86_64.tar.gz", "fileName": "cmake-3.16.3-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.3/cmake-3.16.3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.16.3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.3/cmake-3.16.3-win32-x86.zip", "fileName": "cmake-3.16.3-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.16.2": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.2/cmake-3.16.2-win32-x86.zip", "fileName": "cmake-3.16.2-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.2/cmake-3.16.2-Linux-x86_64.tar.gz", "fileName": "cmake-3.16.2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.2/cmake-3.16.2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.16.2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.15.6": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.6/cmake-3.15.6-win32-x86.zip", "fileName": "cmake-3.15.6-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.6/cmake-3.15.6-Linux-x86_64.tar.gz", "fileName": "cmake-3.15.6-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.6/cmake-3.15.6-Darwin-x86_64.tar.gz", "fileName": "cmake-3.15.6-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.16.1": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.1/cmake-3.16.1-win32-x86.zip", "fileName": "cmake-3.16.1-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.1/cmake-3.16.1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.16.1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.1/cmake-3.16.1-Linux-x86_64.tar.gz", "fileName": "cmake-3.16.1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.16.0": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.0/cmake-3.16.0-Darwin-x86_64.tar.gz", "fileName": "cmake-3.16.0-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.0/cmake-3.16.0-Linux-x86_64.tar.gz", "fileName": "cmake-3.16.0-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.0/cmake-3.16.0-win32-x86.zip", "fileName": "cmake-3.16.0-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.16.0-rc4": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.0-rc4/cmake-3.16.0-rc4-Linux-x86_64.tar.gz", "fileName": "cmake-3.16.0-rc4-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.0-rc4/cmake-3.16.0-rc4-win32-x86.zip", "fileName": "cmake-3.16.0-rc4-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.0-rc4/cmake-3.16.0-rc4-Darwin-x86_64.tar.gz", "fileName": "cmake-3.16.0-rc4-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.16.0-rc3": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.0-rc3/cmake-3.16.0-rc3-win64-x64.zip", "fileName": "cmake-3.16.0-rc3-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.0-rc3/cmake-3.16.0-rc3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.16.0-rc3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.0-rc3/cmake-3.16.0-rc3-Linux-x86_64.tar.gz", "fileName": "cmake-3.16.0-rc3-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.15.5": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.5/cmake-3.15.5-win32-x86.zip", "fileName": "cmake-3.15.5-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.5/cmake-3.15.5-Darwin-x86_64.tar.gz", "fileName": "cmake-3.15.5-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.5/cmake-3.15.5-Linux-x86_64.tar.gz", "fileName": "cmake-3.15.5-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.16.0-rc2": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.0-rc2/cmake-3.16.0-rc2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.16.0-rc2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.0-rc2/cmake-3.16.0-rc2-win64-x64.zip", "fileName": "cmake-3.16.0-rc2-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.0-rc2/cmake-3.16.0-rc2-Linux-x86_64.tar.gz", "fileName": "cmake-3.16.0-rc2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.16.0-rc1": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.0-rc1/cmake-3.16.0-rc1-win32-x86.zip", "fileName": "cmake-3.16.0-rc1-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.0-rc1/cmake-3.16.0-rc1-Linux-x86_64.tar.gz", "fileName": "cmake-3.16.0-rc1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.16.0-rc1/cmake-3.16.0-rc1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.16.0-rc1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.15.4": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.4/cmake-3.15.4-win32-x86.zip", "fileName": "cmake-3.15.4-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.4/cmake-3.15.4-Linux-x86_64.tar.gz", "fileName": "cmake-3.15.4-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.4/cmake-3.15.4-Darwin-x86_64.tar.gz", "fileName": "cmake-3.15.4-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.14.7": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.7/cmake-3.14.7-win64-x64.zip", "fileName": "cmake-3.14.7-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.7/cmake-3.14.7-Darwin-x86_64.tar.gz", "fileName": "cmake-3.14.7-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.7/cmake-3.14.7-Linux-x86_64.tar.gz", "fileName": "cmake-3.14.7-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.15.3": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.3/cmake-3.15.3-Linux-x86_64.tar.gz", "fileName": "cmake-3.15.3-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.3/cmake-3.15.3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.15.3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.3/cmake-3.15.3-win32-x86.zip", "fileName": "cmake-3.15.3-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.15.2": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.2/cmake-3.15.2-win64-x64.zip", "fileName": "cmake-3.15.2-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.2/cmake-3.15.2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.15.2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.2/cmake-3.15.2-Linux-x86_64.tar.gz", "fileName": "cmake-3.15.2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.15.1": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.1/cmake-3.15.1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.15.1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.1/cmake-3.15.1-win32-x86.zip", "fileName": "cmake-3.15.1-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.1/cmake-3.15.1-Linux-x86_64.tar.gz", "fileName": "cmake-3.15.1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.15.0": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.0/cmake-3.15.0-Darwin-x86_64.tar.gz", "fileName": "cmake-3.15.0-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.0/cmake-3.15.0-win32-x86.zip", "fileName": "cmake-3.15.0-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.0/cmake-3.15.0-Linux-x86_64.tar.gz", "fileName": "cmake-3.15.0-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.14.6": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.6/cmake-3.14.6-win32-x86.zip", "fileName": "cmake-3.14.6-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.6/cmake-3.14.6-Darwin-x86_64.tar.gz", "fileName": "cmake-3.14.6-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.6/cmake-3.14.6-Linux-x86_64.tar.gz", "fileName": "cmake-3.14.6-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.15.0-rc4": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.0-rc4/cmake-3.15.0-rc4-win32-x86.zip", "fileName": "cmake-3.15.0-rc4-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.0-rc4/cmake-3.15.0-rc4-Linux-x86_64.tar.gz", "fileName": "cmake-3.15.0-rc4-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.0-rc4/cmake-3.15.0-rc4-Darwin-x86_64.tar.gz", "fileName": "cmake-3.15.0-rc4-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.15.0-rc3": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.0-rc3/cmake-3.15.0-rc3-win64-x64.zip", "fileName": "cmake-3.15.0-rc3-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.0-rc3/cmake-3.15.0-rc3-Linux-x86_64.tar.gz", "fileName": "cmake-3.15.0-rc3-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.0-rc3/cmake-3.15.0-rc3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.15.0-rc3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.15.0-rc2": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.0-rc2/cmake-3.15.0-rc2-Linux-x86_64.tar.gz", "fileName": "cmake-3.15.0-rc2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.0-rc2/cmake-3.15.0-rc2-win32-x86.zip", "fileName": "cmake-3.15.0-rc2-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.0-rc2/cmake-3.15.0-rc2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.15.0-rc2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.15.0-rc1": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.0-rc1/cmake-3.15.0-rc1-Linux-x86_64.tar.gz", "fileName": "cmake-3.15.0-rc1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.0-rc1/cmake-3.15.0-rc1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.15.0-rc1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.15.0-rc1/cmake-3.15.0-rc1-win64-x64.zip", "fileName": "cmake-3.15.0-rc1-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.14.5": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.5/cmake-3.14.5-win64-x64.zip", "fileName": "cmake-3.14.5-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.5/cmake-3.14.5-Linux-x86_64.tar.gz", "fileName": "cmake-3.14.5-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.5/cmake-3.14.5-Darwin-x86_64.tar.gz", "fileName": "cmake-3.14.5-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.14.4": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.4/cmake-3.14.4-Darwin-x86_64.tar.gz", "fileName": "cmake-3.14.4-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.4/cmake-3.14.4-win32-x86.zip", "fileName": "cmake-3.14.4-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.4/cmake-3.14.4-Linux-x86_64.tar.gz", "fileName": "cmake-3.14.4-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.13.5": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.13.5/cmake-3.13.5-Linux-x86_64.tar.gz", "fileName": "cmake-3.13.5-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.13.5/cmake-3.13.5-win32-x86.zip", "fileName": "cmake-3.13.5-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.13.5/cmake-3.13.5-Darwin-x86_64.tar.gz", "fileName": "cmake-3.13.5-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.14.3": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.3/cmake-3.14.3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.14.3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.3/cmake-3.14.3-Linux-x86_64.tar.gz", "fileName": "cmake-3.14.3-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.3/cmake-3.14.3-win64-x64.zip", "fileName": "cmake-3.14.3-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.14.2": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.2/cmake-3.14.2-win64-x64.zip", "fileName": "cmake-3.14.2-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.2/cmake-3.14.2-Linux-x86_64.tar.gz", "fileName": "cmake-3.14.2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.2/cmake-3.14.2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.14.2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.14.1": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.1/cmake-3.14.1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.14.1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.1/cmake-3.14.1-win64-x64.zip", "fileName": "cmake-3.14.1-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.1/cmake-3.14.1-Linux-x86_64.tar.gz", "fileName": "cmake-3.14.1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.14.0": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.0/cmake-3.14.0-Darwin-x86_64.tar.gz", "fileName": "cmake-3.14.0-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.0/cmake-3.14.0-win64-x64.zip", "fileName": "cmake-3.14.0-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.0/cmake-3.14.0-Linux-x86_64.tar.gz", "fileName": "cmake-3.14.0-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.14.0-rc4": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.0-rc4/cmake-3.14.0-rc4-win64-x64.zip", "fileName": "cmake-3.14.0-rc4-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.0-rc4/cmake-3.14.0-rc4-Linux-x86_64.tar.gz", "fileName": "cmake-3.14.0-rc4-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.0-rc4/cmake-3.14.0-rc4-Darwin-x86_64.tar.gz", "fileName": "cmake-3.14.0-rc4-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.14.0-rc3": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.0-rc3/cmake-3.14.0-rc3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.14.0-rc3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.0-rc3/cmake-3.14.0-rc3-Linux-x86_64.tar.gz", "fileName": "cmake-3.14.0-rc3-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.0-rc3/cmake-3.14.0-rc3-win32-x86.zip", "fileName": "cmake-3.14.0-rc3-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.14.0-rc2": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.0-rc2/cmake-3.14.0-rc2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.14.0-rc2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.0-rc2/cmake-3.14.0-rc2-win64-x64.zip", "fileName": "cmake-3.14.0-rc2-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.0-rc2/cmake-3.14.0-rc2-Linux-x86_64.tar.gz", "fileName": "cmake-3.14.0-rc2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.14.0-rc1": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.0-rc1/cmake-3.14.0-rc1-win32-x86.zip", "fileName": "cmake-3.14.0-rc1-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.0-rc1/cmake-3.14.0-rc1-Linux-x86_64.tar.gz", "fileName": "cmake-3.14.0-rc1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.14.0-rc1/cmake-3.14.0-rc1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.14.0-rc1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.13.4": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.13.4/cmake-3.13.4-win32-x86.zip", "fileName": "cmake-3.13.4-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.13.4/cmake-3.13.4-Darwin-x86_64.tar.gz", "fileName": "cmake-3.13.4-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.13.4/cmake-3.13.4-Linux-x86_64.tar.gz", "fileName": "cmake-3.13.4-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.13.3": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.13.3/cmake-3.13.3-win64-x64.zip", "fileName": "cmake-3.13.3-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.13.3/cmake-3.13.3-Linux-x86_64.tar.gz", "fileName": "cmake-3.13.3-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.13.3/cmake-3.13.3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.13.3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.13.2": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.13.2/cmake-3.13.2-win64-x64.zip", "fileName": "cmake-3.13.2-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.13.2/cmake-3.13.2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.13.2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.13.2/cmake-3.13.2-Linux-x86_64.tar.gz", "fileName": "cmake-3.13.2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.13.1": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.13.1/cmake-3.13.1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.13.1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.13.1/cmake-3.13.1-Linux-x86_64.tar.gz", "fileName": "cmake-3.13.1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.13.1/cmake-3.13.1-win64-x64.zip", "fileName": "cmake-3.13.1-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.13.0": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.13.0/cmake-3.13.0-Darwin-x86_64.tar.gz", "fileName": "cmake-3.13.0-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.13.0/cmake-3.13.0-Linux-x86_64.tar.gz", "fileName": "cmake-3.13.0-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.13.0/cmake-3.13.0-win64-x64.zip", "fileName": "cmake-3.13.0-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.12.4": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.12.4/cmake-3.12.4-Darwin-x86_64.tar.gz", "fileName": "cmake-3.12.4-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.12.4/cmake-3.12.4-Linux-x86_64.tar.gz", "fileName": "cmake-3.12.4-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.12.4/cmake-3.12.4-win64-x64.zip", "fileName": "cmake-3.12.4-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.12.3": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.12.3/cmake-3.12.3-win64-x64.zip", "fileName": "cmake-3.12.3-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.12.3/cmake-3.12.3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.12.3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.12.3/cmake-3.12.3-Linux-x86_64.tar.gz", "fileName": "cmake-3.12.3-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.12.2": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.12.2/cmake-3.12.2-win64-x64.zip", "fileName": "cmake-3.12.2-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.12.2/cmake-3.12.2-Linux-x86_64.tar.gz", "fileName": "cmake-3.12.2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.12.2/cmake-3.12.2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.12.2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.12.1": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.12.1/cmake-3.12.1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.12.1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.12.1/cmake-3.12.1-Linux-x86_64.tar.gz", "fileName": "cmake-3.12.1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.12.1/cmake-3.12.1-win32-x86.zip", "fileName": "cmake-3.12.1-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.12.0": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.12.0/cmake-3.12.0-win32-x86.zip", "fileName": "cmake-3.12.0-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.12.0/cmake-3.12.0-Darwin-x86_64.tar.gz", "fileName": "cmake-3.12.0-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.12.0/cmake-3.12.0-Linux-x86_64.tar.gz", "fileName": "cmake-3.12.0-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.11.4": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.11.4/cmake-3.11.4-Linux-x86_64.tar.gz", "fileName": "cmake-3.11.4-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.11.4/cmake-3.11.4-win32-x86.zip", "fileName": "cmake-3.11.4-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.11.4/cmake-3.11.4-Darwin-x86_64.tar.gz", "fileName": "cmake-3.11.4-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.11.3": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.11.3/cmake-3.11.3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.11.3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.11.3/cmake-3.11.3-win32-x86.zip", "fileName": "cmake-3.11.3-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.11.3/cmake-3.11.3-Linux-x86_64.tar.gz", "fileName": "cmake-3.11.3-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.11.2": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.11.2/cmake-3.11.2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.11.2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.11.2/cmake-3.11.2-win32-x86.zip", "fileName": "cmake-3.11.2-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.11.2/cmake-3.11.2-Linux-x86_64.tar.gz", "fileName": "cmake-3.11.2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.11.1": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.11.1/cmake-3.11.1-Linux-x86_64.tar.gz", "fileName": "cmake-3.11.1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.11.1/cmake-3.11.1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.11.1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.11.1/cmake-3.11.1-win64-x64.zip", "fileName": "cmake-3.11.1-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.11.0": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.11.0/cmake-3.11.0-Linux-x86_64.tar.gz", "fileName": "cmake-3.11.0-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.11.0/cmake-3.11.0-Darwin-x86_64.tar.gz", "fileName": "cmake-3.11.0-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.11.0/cmake-3.11.0-win32-x86.zip", "fileName": "cmake-3.11.0-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.10.3": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.10.3/cmake-3.10.3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.10.3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.10.3/cmake-3.10.3-win32-x86.zip", "fileName": "cmake-3.10.3-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.10.3/cmake-3.10.3-Linux-x86_64.tar.gz", "fileName": "cmake-3.10.3-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.10.2": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.10.2/cmake-3.10.2-win64-x64.zip", "fileName": "cmake-3.10.2-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.10.2/cmake-3.10.2-Linux-x86_64.tar.gz", "fileName": "cmake-3.10.2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.10.2/cmake-3.10.2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.10.2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.10.1": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.10.1/cmake-3.10.1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.10.1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.10.1/cmake-3.10.1-win64-x64.zip", "fileName": "cmake-3.10.1-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.10.1/cmake-3.10.1-Linux-x86_64.tar.gz", "fileName": "cmake-3.10.1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.10.0": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.10.0/cmake-3.10.0-Darwin-x86_64.tar.gz", "fileName": "cmake-3.10.0-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.10.0/cmake-3.10.0-win32-x86.zip", "fileName": "cmake-3.10.0-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.10.0/cmake-3.10.0-Linux-x86_64.tar.gz", "fileName": "cmake-3.10.0-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.9.6": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.6/cmake-3.9.6-Linux-x86_64.tar.gz", "fileName": "cmake-3.9.6-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.6/cmake-3.9.6-Darwin-x86_64.tar.gz", "fileName": "cmake-3.9.6-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.6/cmake-3.9.6-win64-x64.zip", "fileName": "cmake-3.9.6-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.9.5": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.5/cmake-3.9.5-win32-x86.zip", "fileName": "cmake-3.9.5-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.5/cmake-3.9.5-Darwin-x86_64.tar.gz", "fileName": "cmake-3.9.5-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.5/cmake-3.9.5-Linux-x86_64.tar.gz", "fileName": "cmake-3.9.5-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.9.4": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.4/cmake-3.9.4-win64-x64.zip", "fileName": "cmake-3.9.4-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.4/cmake-3.9.4-Darwin-x86_64.tar.gz", "fileName": "cmake-3.9.4-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.4/cmake-3.9.4-Linux-x86_64.tar.gz", "fileName": "cmake-3.9.4-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.9.3": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.3/cmake-3.9.3-Linux-x86_64.tar.gz", "fileName": "cmake-3.9.3-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.3/cmake-3.9.3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.9.3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.3/cmake-3.9.3-win64-x64.zip", "fileName": "cmake-3.9.3-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.9.2": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.2/cmake-3.9.2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.9.2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.2/cmake-3.9.2-Linux-x86_64.tar.gz", "fileName": "cmake-3.9.2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.2/cmake-3.9.2-win64-x64.zip", "fileName": "cmake-3.9.2-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.9.1": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.1/cmake-3.9.1-win32-x86.zip", "fileName": "cmake-3.9.1-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.1/cmake-3.9.1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.9.1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.1/cmake-3.9.1-Linux-x86_64.tar.gz", "fileName": "cmake-3.9.1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.9.0": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.0/cmake-3.9.0-win32-x86.zip", "fileName": "cmake-3.9.0-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.0/cmake-3.9.0-Darwin-x86_64.tar.gz", "fileName": "cmake-3.9.0-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.9.0/cmake-3.9.0-Linux-x86_64.tar.gz", "fileName": "cmake-3.9.0-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.8.2": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.8.2/cmake-3.8.2-Linux-x86_64.tar.gz", "fileName": "cmake-3.8.2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.8.2/cmake-3.8.2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.8.2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.8.2/cmake-3.8.2-win64-x64.zip", "fileName": "cmake-3.8.2-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.8.1": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.8.1/cmake-3.8.1-win64-x64.zip", "fileName": "cmake-3.8.1-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.8.1/cmake-3.8.1-Linux-x86_64.tar.gz", "fileName": "cmake-3.8.1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.8.1/cmake-3.8.1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.8.1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.8.0": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.8.0/cmake-3.8.0-Darwin-x86_64.tar.gz", "fileName": "cmake-3.8.0-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.8.0/cmake-3.8.0-Linux-x86_64.tar.gz", "fileName": "cmake-3.8.0-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.8.0/cmake-3.8.0-win64-x64.zip", "fileName": "cmake-3.8.0-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.7.2": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.7.2/cmake-3.7.2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.7.2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.7.2/cmake-3.7.2-win64-x64.zip", "fileName": "cmake-3.7.2-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.7.2/cmake-3.7.2-Linux-x86_64.tar.gz", "fileName": "cmake-3.7.2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.7.1": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.7.1/cmake-3.7.1-win64-x64.zip", "fileName": "cmake-3.7.1-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.7.1/cmake-3.7.1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.7.1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.7.1/cmake-3.7.1-Linux-x86_64.tar.gz", "fileName": "cmake-3.7.1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.7.0": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.7.0/cmake-3.7.0-win64-x64.zip", "fileName": "cmake-3.7.0-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.7.0/cmake-3.7.0-Darwin-x86_64.tar.gz", "fileName": "cmake-3.7.0-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.7.0/cmake-3.7.0-Linux-x86_64.tar.gz", "fileName": "cmake-3.7.0-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.6.3": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.6.3/cmake-3.6.3-Linux-i386.tar.gz", "fileName": "cmake-3.6.3-Linux-i386.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.6.3/cmake-3.6.3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.6.3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.6.3/cmake-3.6.3-win32-x86.zip", "fileName": "cmake-3.6.3-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.6.2": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.6.2/cmake-3.6.2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.6.2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.6.2/cmake-3.6.2-Linux-i386.tar.gz", "fileName": "cmake-3.6.2-Linux-i386.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.6.2/cmake-3.6.2-win64-x64.zip", "fileName": "cmake-3.6.2-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.6.1": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.6.1/cmake-3.6.1-Linux-x86_64.tar.gz", "fileName": "cmake-3.6.1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.6.1/cmake-3.6.1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.6.1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.6.1/cmake-3.6.1-win32-x86.zip", "fileName": "cmake-3.6.1-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.6.0": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.6.0/cmake-3.6.0-win64-x64.zip", "fileName": "cmake-3.6.0-win64-x64.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.6.0/cmake-3.6.0-Darwin-x86_64.tar.gz", "fileName": "cmake-3.6.0-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.6.0/cmake-3.6.0-Linux-i386.tar.gz", "fileName": "cmake-3.6.0-Linux-i386.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.5.2": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.5.2/cmake-3.5.2-Linux-x86_64.tar.gz", "fileName": "cmake-3.5.2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.5.2/cmake-3.5.2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.5.2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.5.2/cmake-3.5.2-win32-x86.zip", "fileName": "cmake-3.5.2-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.5.1": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.5.1/cmake-3.5.1-Linux-i386.tar.gz", "fileName": "cmake-3.5.1-Linux-i386.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.5.1/cmake-3.5.1-win32-x86.zip", "fileName": "cmake-3.5.1-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.5.1/cmake-3.5.1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.5.1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.5.0": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.5.0/cmake-3.5.0-win32-x86.zip", "fileName": "cmake-3.5.0-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.5.0/cmake-3.5.0-Darwin-x86_64.tar.gz", "fileName": "cmake-3.5.0-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.5.0/cmake-3.5.0-Linux-x86_64.tar.gz", "fileName": "cmake-3.5.0-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.4.3": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.4.3/cmake-3.4.3-Linux-i386.tar.gz", "fileName": "cmake-3.4.3-Linux-i386.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.4.3/cmake-3.4.3-win32-x86.zip", "fileName": "cmake-3.4.3-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.4.3/cmake-3.4.3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.4.3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.4.2": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.4.2/cmake-3.4.2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.4.2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.4.2/cmake-3.4.2-Linux-i386.tar.gz", "fileName": "cmake-3.4.2-Linux-i386.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.4.2/cmake-3.4.2-win32-x86.zip", "fileName": "cmake-3.4.2-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.4.1": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.4.1/cmake-3.4.1-Linux-x86_64.tar.gz", "fileName": "cmake-3.4.1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.4.1/cmake-3.4.1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.4.1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.4.1/cmake-3.4.1-win32-x86.zip", "fileName": "cmake-3.4.1-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.4.0": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.4.0/cmake-3.4.0-win32-x86.zip", "fileName": "cmake-3.4.0-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.4.0/cmake-3.4.0-Linux-x86_64.tar.gz", "fileName": "cmake-3.4.0-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.3.2": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.3.2/cmake-3.3.2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.3.2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.3.2/cmake-3.3.2-Linux-x86_64.tar.gz", "fileName": "cmake-3.3.2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.3.2/cmake-3.3.2-win32-x86.zip", "fileName": "cmake-3.3.2-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.3.1": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.3.1/cmake-3.3.1-win32-x86.zip", "fileName": "cmake-3.3.1-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.3.1/cmake-3.3.1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.3.1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.3.1/cmake-3.3.1-Linux-x86_64.tar.gz", "fileName": "cmake-3.3.1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.3.0": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.3.0/cmake-3.3.0-Linux-x86_64.tar.gz", "fileName": "cmake-3.3.0-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.3.0/cmake-3.3.0-Darwin-x86_64.tar.gz", "fileName": "cmake-3.3.0-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.3.0/cmake-3.3.0-win32-x86.zip", "fileName": "cmake-3.3.0-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.2.3": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.2.3/cmake-3.2.3-Linux-i386.tar.gz", "fileName": "cmake-3.2.3-Linux-i386.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.2.3/cmake-3.2.3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.2.3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.2.3/cmake-3.2.3-win32-x86.zip", "fileName": "cmake-3.2.3-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.2.2": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.2.2/cmake-3.2.2-win32-x86.zip", "fileName": "cmake-3.2.2-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.2.2/cmake-3.2.2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.2.2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.2.2/cmake-3.2.2-Linux-x86_64.tar.gz", "fileName": "cmake-3.2.2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.2.1": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.2.1/cmake-3.2.1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.2.1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.2.1/cmake-3.2.1-win32-x86.zip", "fileName": "cmake-3.2.1-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.2.1/cmake-3.2.1-Linux-x86_64.tar.gz", "fileName": "cmake-3.2.1-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.2.0": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.2.0/cmake-3.2.0-Linux-i386.tar.gz", "fileName": "cmake-3.2.0-Linux-i386.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.2.0/cmake-3.2.0-win32-x86.zip", "fileName": "cmake-3.2.0-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.2.0/cmake-3.2.0-Darwin-x86_64.tar.gz", "fileName": "cmake-3.2.0-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.1.3": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.1.3/cmake-3.1.3-Darwin-x86_64.tar.gz", "fileName": "cmake-3.1.3-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.1.3/cmake-3.1.3-Linux-i386.tar.gz", "fileName": "cmake-3.1.3-Linux-i386.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.1.3/cmake-3.1.3-win32-x86.zip", "fileName": "cmake-3.1.3-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.1.2": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.1.2/cmake-3.1.2-Linux-x86_64.tar.gz", "fileName": "cmake-3.1.2-Linux-x86_64.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.1.2/cmake-3.1.2-Darwin-x86_64.tar.gz", "fileName": "cmake-3.1.2-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.1.2/cmake-3.1.2-win32-x86.zip", "fileName": "cmake-3.1.2-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.1.1": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.1.1/cmake-3.1.1-Linux-i386.tar.gz", "fileName": "cmake-3.1.1-Linux-i386.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.1.1/cmake-3.1.1-Darwin-x86_64.tar.gz", "fileName": "cmake-3.1.1-Darwin-x86_64.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.1.1/cmake-3.1.1-win32-x86.zip", "fileName": "cmake-3.1.1-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.1.0": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.1.0/cmake-3.1.0-Linux-i386.tar.gz", "fileName": "cmake-3.1.0-Linux-i386.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.1.0/cmake-3.1.0-win32-x86.zip", "fileName": "cmake-3.1.0-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "3.0.2": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.0.2/cmake-3.0.2-Darwin64-universal.tar.gz", "fileName": "cmake-3.0.2-Darwin64-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.0.2/cmake-3.0.2-win32-x86.zip", "fileName": "cmake-3.0.2-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.0.2/cmake-3.0.2-Linux-i386.tar.gz", "fileName": "cmake-3.0.2-Linux-i386.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "3.0.1": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.0.1/cmake-3.0.1-win32-x86.zip", "fileName": "cmake-3.0.1-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.0.1/cmake-3.0.1-Linux-i386.tar.gz", "fileName": "cmake-3.0.1-Linux-i386.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.0.1/cmake-3.0.1-Darwin64-universal.tar.gz", "fileName": "cmake-3.0.1-Darwin64-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "3.0.0": { "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v3.0.0/cmake-3.0.0-Darwin64-universal.tar.gz", "fileName": "cmake-3.0.0-Darwin64-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v3.0.0/cmake-3.0.0-win32-x86.zip", "fileName": "cmake-3.0.0-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v3.0.0/cmake-3.0.0-Linux-i386.tar.gz", "fileName": "cmake-3.0.0-Linux-i386.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } }, "2.8.12": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v2.8.12.2/cmake-2.8.12.2-Linux-i386.tar.gz", "fileName": "cmake-2.8.12.2-Linux-i386.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v2.8.12.2/cmake-2.8.12.2-Darwin64-universal.tar.gz", "fileName": "cmake-2.8.12.2-Darwin64-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v2.8.12.2/cmake-2.8.12.2-win32-x86.zip", "fileName": "cmake-2.8.12.2-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "2.8.10": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v2.8.10.2/cmake-2.8.10.2-win32-x86.zip", "fileName": "cmake-2.8.10.2-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v2.8.10.2/cmake-2.8.10.2-Linux-i386.tar.gz", "fileName": "cmake-2.8.10.2-Linux-i386.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/CMake/releases/download/v2.8.10.2/cmake-2.8.10.2-Darwin64-universal.tar.gz", "fileName": "cmake-2.8.10.2-Darwin64-universal.tar.gz", "binPath": "CMake.app/Contents/bin/", "dropSuffix": ".tar.gz" } }, "2.6.4": { "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v2.6.4/cmake-2.6.4-Linux-i386.tar.gz", "fileName": "cmake-2.6.4-Linux-i386.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v2.6.4/cmake-2.6.4-win32-x86.zip", "fileName": "cmake-2.6.4-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" } }, "2.4.8": { "win32": { "url": "https://github.com/Kitware/CMake/releases/download/v2.4.8/cmake-2.4.8-win32-x86.zip", "fileName": "cmake-2.4.8-win32-x86.zip", "binPath": "bin/", "dropSuffix": ".zip" }, "linux": { "url": "https://github.com/Kitware/CMake/releases/download/v2.4.8/cmake-2.4.8-Linux-i386.tar.gz", "fileName": "cmake-2.4.8-Linux-i386.tar.gz", "binPath": "bin/", "dropSuffix": ".tar.gz" } } }; exports.ninjaCatalog = { "1.11.1": { "linux-arm64": { "url": "https://github.com/Kitware/ninja/releases/download/v1.11.1.g95dee.kitware.jobserver-1/ninja-1.11.1.g95dee.kitware.jobserver-1_aarch64-linux-gnu.tar.gz", "fileName": "ninja-1.11.1.g95dee.kitware.jobserver-1_aarch64-linux-gnu.tar.gz", "binPath": "", "dropSuffix": ".tar.gz" }, "win32-arm64": { "url": "https://github.com/Kitware/ninja/releases/download/v1.11.1.g95dee.kitware.jobserver-1/ninja-1.11.1.g95dee.kitware.jobserver-1_arm64-pc-windows-msvc.zip", "fileName": "ninja-1.11.1.g95dee.kitware.jobserver-1_arm64-pc-windows-msvc.zip", "binPath": "", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/ninja/releases/download/v1.11.1.g95dee.kitware.jobserver-1/ninja-1.11.1.g95dee.kitware.jobserver-1_universal-apple-darwin.tar.gz", "fileName": "ninja-1.11.1.g95dee.kitware.jobserver-1_universal-apple-darwin.tar.gz", "binPath": "", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/ninja/releases/download/v1.11.1.g95dee.kitware.jobserver-1/ninja-1.11.1.g95dee.kitware.jobserver-1_x86_64-linux-gnu.tar.gz", "fileName": "ninja-1.11.1.g95dee.kitware.jobserver-1_x86_64-linux-gnu.tar.gz", "binPath": "", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/ninja/releases/download/v1.11.1.g95dee.kitware.jobserver-1/ninja-1.11.1.g95dee.kitware.jobserver-1_x86_64-pc-windows-msvc.zip", "fileName": "ninja-1.11.1.g95dee.kitware.jobserver-1_x86_64-pc-windows-msvc.zip", "binPath": "", "dropSuffix": ".zip" } }, "latest": { "linux-arm64": { "url": "https://github.com/Kitware/ninja/releases/download/v1.11.1.g95dee.kitware.jobserver-1/ninja-1.11.1.g95dee.kitware.jobserver-1_aarch64-linux-gnu.tar.gz", "fileName": "ninja-1.11.1.g95dee.kitware.jobserver-1_aarch64-linux-gnu.tar.gz", "binPath": "", "dropSuffix": ".tar.gz" }, "win32-arm64": { "url": "https://github.com/Kitware/ninja/releases/download/v1.11.1.g95dee.kitware.jobserver-1/ninja-1.11.1.g95dee.kitware.jobserver-1_arm64-pc-windows-msvc.zip", "fileName": "ninja-1.11.1.g95dee.kitware.jobserver-1_arm64-pc-windows-msvc.zip", "binPath": "", "dropSuffix": ".zip" }, "darwin": { "url": "https://github.com/Kitware/ninja/releases/download/v1.11.1.g95dee.kitware.jobserver-1/ninja-1.11.1.g95dee.kitware.jobserver-1_universal-apple-darwin.tar.gz", "fileName": "ninja-1.11.1.g95dee.kitware.jobserver-1_universal-apple-darwin.tar.gz", "binPath": "", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/ninja/releases/download/v1.11.1.g95dee.kitware.jobserver-1/ninja-1.11.1.g95dee.kitware.jobserver-1_x86_64-linux-gnu.tar.gz", "fileName": "ninja-1.11.1.g95dee.kitware.jobserver-1_x86_64-linux-gnu.tar.gz", "binPath": "", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/ninja/releases/download/v1.11.1.g95dee.kitware.jobserver-1/ninja-1.11.1.g95dee.kitware.jobserver-1_x86_64-pc-windows-msvc.zip", "fileName": "ninja-1.11.1.g95dee.kitware.jobserver-1_x86_64-pc-windows-msvc.zip", "binPath": "", "dropSuffix": ".zip" } }, "1.10.2": { "linux-arm64": { "url": "https://github.com/Kitware/ninja/releases/download/v1.10.2.g51db2.kitware.jobserver-1/ninja-1.10.2.g51db2.kitware.jobserver-1_aarch64-linux-gnu.tar.gz", "fileName": "ninja-1.10.2.g51db2.kitware.jobserver-1_aarch64-linux-gnu.tar.gz", "binPath": "", "dropSuffix": ".tar.gz" }, "darwin": { "url": "https://github.com/Kitware/ninja/releases/download/v1.10.2.g51db2.kitware.jobserver-1/ninja-1.10.2.g51db2.kitware.jobserver-1_universal-apple-darwin.tar.gz", "fileName": "ninja-1.10.2.g51db2.kitware.jobserver-1_universal-apple-darwin.tar.gz", "binPath": "", "dropSuffix": ".tar.gz" }, "linux": { "url": "https://github.com/Kitware/ninja/releases/download/v1.10.2.g51db2.kitware.jobserver-1/ninja-1.10.2.g51db2.kitware.jobserver-1_x86_64-linux-gnu.tar.gz", "fileName": "ninja-1.10.2.g51db2.kitware.jobserver-1_x86_64-linux-gnu.tar.gz", "binPath": "", "dropSuffix": ".tar.gz" }, "win32": { "url": "https://github.com/Kitware/ninja/releases/download/v1.10.2.g51db2.kitware.jobserver-1/ninja-1.10.2.g51db2.kitware.jobserver-1_x86_64-pc-windows-msvc.zip", "fileName": "ninja-1.10.2.g51db2.kitware.jobserver-1_x86_64-pc-windows-msvc.zip", "binPath": "", "dropSuffix": ".zip" } }, "1.10.0": { "linux": { "url": "https://github.com/Kitware/ninja/releases/download/v1.10.0.gfb670.kitware.jobserver-1/ninja-1.10.0.gfb670.kitware.jobserver-1_x86_64-linux-gnu.tar.gz", "fileName": "ninja-1.10.0.gfb670.kitware.jobserver-1_x86_64-linux-gnu.tar.gz", "binPath": "", "dropSuffix": ".tar.gz" } }, "1.9.0": { "linux": { "url": "https://github.com/Kitware/ninja/releases/download/v1.9.0.gad558.kitware.dyndep-1.jobserver-1/ninja-1.9.0.gad558.kitware.dyndep-1.jobserver-1_x86_64-linux-gnu.tar.gz", "fileName": "ninja-1.9.0.gad558.kitware.dyndep-1.jobserver-1_x86_64-linux-gnu.tar.gz", "binPath": "", "dropSuffix": ".tar.gz" } }, "1.8.2": { "linux": { "url": "https://github.com/Kitware/ninja/releases/download/v1.8.2.g972a7.kitware.dyndep-1/ninja-1.8.2.g972a7.kitware.dyndep-1_x86_64-linux-gnu.tar.gz", "fileName": "ninja-1.8.2.g972a7.kitware.dyndep-1_x86_64-linux-gnu.tar.gz", "binPath": "", "dropSuffix": ".tar.gz" } }, "1.7.2": { "linux": { "url": "https://github.com/Kitware/ninja/releases/download/v1.7.2.gcc0ea.kitware.dyndep-1/ninja-1.7.2.gcc0ea.kitware.dyndep-1_x86_64-linux-gnu.tar.gz", "fileName": "ninja-1.7.2.gcc0ea.kitware.dyndep-1_x86_64-linux-gnu.tar.gz", "binPath": "", "dropSuffix": ".tar.gz" } }, "1.7.1": { "linux": { "url": "https://github.com/Kitware/ninja/releases/download/v1.7.1.g7ca7f.kitware.dyndep-1/ninja-1.7.1.g7ca7f.kitware.dyndep-1_x86_64-linux-gnu.tar.gz", "fileName": "ninja-1.7.1.g7ca7f.kitware.dyndep-1_x86_64-linux-gnu.tar.gz", "binPath": "", "dropSuffix": ".tar.gz" } } }; //# sourceMappingURL=releases-catalog.js.map @@ -11132,7 +11132,7 @@ var coreAuth = __nccwpck_require__(9645); var os = __nccwpck_require__(2037); var http = __nccwpck_require__(3685); var https = __nccwpck_require__(5687); -var tough = __nccwpck_require__(8165); +var tough = __nccwpck_require__(7372); var abortController = __nccwpck_require__(2557); var tunnel = __nccwpck_require__(4294); var stream = __nccwpck_require__(2781); @@ -17147,5045 +17147,2565 @@ module.exports = function(dst, src) { /***/ }), -/***/ 8165: +/***/ 3415: /***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { "use strict"; -/*! - * Copyright (c) 2015-2020, Salesforce.com, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * 3. Neither the name of Salesforce.com nor the names of its contributors may - * be used to endorse or promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ -const punycode = __nccwpck_require__(9540); -const urlParse = __nccwpck_require__(5682); -const pubsuffix = __nccwpck_require__(8292); -const Store = (__nccwpck_require__(7707)/* .Store */ .y); -const MemoryCookieStore = (__nccwpck_require__(6738)/* .MemoryCookieStore */ .m); -const pathMatch = (__nccwpck_require__(807)/* .pathMatch */ .U); -const validators = __nccwpck_require__(1598); -const VERSION = __nccwpck_require__(8742); -const { fromCallback } = __nccwpck_require__(9046); -const { getCustomInspectSymbol } = __nccwpck_require__(9375); -// From RFC6265 S4.1.1 -// note that it excludes \x3B ";" -const COOKIE_OCTETS = /^[\x21\x23-\x2B\x2D-\x3A\x3C-\x5B\x5D-\x7E]+$/; +Object.defineProperty(exports, "__esModule", ({ + value: true +})); +Object.defineProperty(exports, "v1", ({ + enumerable: true, + get: function () { + return _v.default; + } +})); +Object.defineProperty(exports, "v3", ({ + enumerable: true, + get: function () { + return _v2.default; + } +})); +Object.defineProperty(exports, "v4", ({ + enumerable: true, + get: function () { + return _v3.default; + } +})); +Object.defineProperty(exports, "v5", ({ + enumerable: true, + get: function () { + return _v4.default; + } +})); +Object.defineProperty(exports, "NIL", ({ + enumerable: true, + get: function () { + return _nil.default; + } +})); +Object.defineProperty(exports, "version", ({ + enumerable: true, + get: function () { + return _version.default; + } +})); +Object.defineProperty(exports, "validate", ({ + enumerable: true, + get: function () { + return _validate.default; + } +})); +Object.defineProperty(exports, "stringify", ({ + enumerable: true, + get: function () { + return _stringify.default; + } +})); +Object.defineProperty(exports, "parse", ({ + enumerable: true, + get: function () { + return _parse.default; + } +})); -const CONTROL_CHARS = /[\x00-\x1F]/; +var _v = _interopRequireDefault(__nccwpck_require__(4757)); -// From Chromium // '\r', '\n' and '\0' should be treated as a terminator in -// the "relaxed" mode, see: -// https://github.com/ChromiumWebApps/chromium/blob/b3d3b4da8bb94c1b2e061600df106d590fda3620/net/cookies/parsed_cookie.cc#L60 -const TERMINATORS = ["\n", "\r", "\0"]; +var _v2 = _interopRequireDefault(__nccwpck_require__(9982)); -// RFC6265 S4.1.1 defines path value as 'any CHAR except CTLs or ";"' -// Note ';' is \x3B -const PATH_VALUE = /[\x20-\x3A\x3C-\x7E]+/; +var _v3 = _interopRequireDefault(__nccwpck_require__(5393)); -// date-time parsing constants (RFC6265 S5.1.1) +var _v4 = _interopRequireDefault(__nccwpck_require__(8788)); -const DATE_DELIM = /[\x09\x20-\x2F\x3B-\x40\x5B-\x60\x7B-\x7E]/; +var _nil = _interopRequireDefault(__nccwpck_require__(657)); -const MONTH_TO_NUM = { - jan: 0, - feb: 1, - mar: 2, - apr: 3, - may: 4, - jun: 5, - jul: 6, - aug: 7, - sep: 8, - oct: 9, - nov: 10, - dec: 11 -}; +var _version = _interopRequireDefault(__nccwpck_require__(7909)); -const MAX_TIME = 2147483647000; // 31-bit max -const MIN_TIME = 0; // 31-bit min -const SAME_SITE_CONTEXT_VAL_ERR = - 'Invalid sameSiteContext option for getCookies(); expected one of "strict", "lax", or "none"'; +var _validate = _interopRequireDefault(__nccwpck_require__(4418)); -function checkSameSiteContext(value) { - validators.validate(validators.isNonEmptyString(value), value); - const context = String(value).toLowerCase(); - if (context === "none" || context === "lax" || context === "strict") { - return context; - } else { - return null; - } -} +var _stringify = _interopRequireDefault(__nccwpck_require__(4794)); -const PrefixSecurityEnum = Object.freeze({ - SILENT: "silent", - STRICT: "strict", - DISABLED: "unsafe-disabled" -}); +var _parse = _interopRequireDefault(__nccwpck_require__(7079)); -// Dumped from ip-regex@4.0.0, with the following changes: -// * all capturing groups converted to non-capturing -- "(?:)" -// * support for IPv6 Scoped Literal ("%eth1") removed -// * lowercase hexadecimal only -const IP_REGEX_LOWERCASE = /(?:^(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]\d|\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]\d|\d)){3}$)|(?:^(?:(?:[a-f\d]{1,4}:){7}(?:[a-f\d]{1,4}|:)|(?:[a-f\d]{1,4}:){6}(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]\d|\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]\d|\d)){3}|:[a-f\d]{1,4}|:)|(?:[a-f\d]{1,4}:){5}(?::(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]\d|\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]\d|\d)){3}|(?::[a-f\d]{1,4}){1,2}|:)|(?:[a-f\d]{1,4}:){4}(?:(?::[a-f\d]{1,4}){0,1}:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]\d|\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]\d|\d)){3}|(?::[a-f\d]{1,4}){1,3}|:)|(?:[a-f\d]{1,4}:){3}(?:(?::[a-f\d]{1,4}){0,2}:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]\d|\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]\d|\d)){3}|(?::[a-f\d]{1,4}){1,4}|:)|(?:[a-f\d]{1,4}:){2}(?:(?::[a-f\d]{1,4}){0,3}:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]\d|\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]\d|\d)){3}|(?::[a-f\d]{1,4}){1,5}|:)|(?:[a-f\d]{1,4}:){1}(?:(?::[a-f\d]{1,4}){0,4}:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]\d|\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]\d|\d)){3}|(?::[a-f\d]{1,4}){1,6}|:)|(?::(?:(?::[a-f\d]{1,4}){0,5}:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]\d|\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]\d|\d)){3}|(?::[a-f\d]{1,4}){1,7}|:)))$)/; -const IP_V6_REGEX = ` -\\[?(?: -(?:[a-fA-F\\d]{1,4}:){7}(?:[a-fA-F\\d]{1,4}|:)| -(?:[a-fA-F\\d]{1,4}:){6}(?:(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]\\d|\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]\\d|\\d)){3}|:[a-fA-F\\d]{1,4}|:)| -(?:[a-fA-F\\d]{1,4}:){5}(?::(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]\\d|\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]\\d|\\d)){3}|(?::[a-fA-F\\d]{1,4}){1,2}|:)| -(?:[a-fA-F\\d]{1,4}:){4}(?:(?::[a-fA-F\\d]{1,4}){0,1}:(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]\\d|\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]\\d|\\d)){3}|(?::[a-fA-F\\d]{1,4}){1,3}|:)| -(?:[a-fA-F\\d]{1,4}:){3}(?:(?::[a-fA-F\\d]{1,4}){0,2}:(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]\\d|\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]\\d|\\d)){3}|(?::[a-fA-F\\d]{1,4}){1,4}|:)| -(?:[a-fA-F\\d]{1,4}:){2}(?:(?::[a-fA-F\\d]{1,4}){0,3}:(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]\\d|\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]\\d|\\d)){3}|(?::[a-fA-F\\d]{1,4}){1,5}|:)| -(?:[a-fA-F\\d]{1,4}:){1}(?:(?::[a-fA-F\\d]{1,4}){0,4}:(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]\\d|\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]\\d|\\d)){3}|(?::[a-fA-F\\d]{1,4}){1,6}|:)| -(?::(?:(?::[a-fA-F\\d]{1,4}){0,5}:(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]\\d|\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]\\d|\\d)){3}|(?::[a-fA-F\\d]{1,4}){1,7}|:)) -)(?:%[0-9a-zA-Z]{1,})?\\]? -` - .replace(/\s*\/\/.*$/gm, "") - .replace(/\n/g, "") - .trim(); -const IP_V6_REGEX_OBJECT = new RegExp(`^${IP_V6_REGEX}$`); +function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } -/* - * Parses a Natural number (i.e., non-negative integer) with either the - * *DIGIT ( non-digit *OCTET ) - * or - * *DIGIT - * grammar (RFC6265 S5.1.1). - * - * The "trailingOK" boolean controls if the grammar accepts a - * "( non-digit *OCTET )" trailer. - */ -function parseDigits(token, minDigits, maxDigits, trailingOK) { - let count = 0; - while (count < token.length) { - const c = token.charCodeAt(count); - // "non-digit = %x00-2F / %x3A-FF" - if (c <= 0x2f || c >= 0x3a) { - break; - } - count++; - } +/***/ }), - // constrain to a minimum and maximum number of digits. - if (count < minDigits || count > maxDigits) { - return null; - } +/***/ 4153: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - if (!trailingOK && count != token.length) { - return null; - } +"use strict"; - return parseInt(token.substr(0, count), 10); -} -function parseTime(token) { - const parts = token.split(":"); - const result = [0, 0, 0]; +Object.defineProperty(exports, "__esModule", ({ + value: true +})); +exports["default"] = void 0; - /* RF6256 S5.1.1: - * time = hms-time ( non-digit *OCTET ) - * hms-time = time-field ":" time-field ":" time-field - * time-field = 1*2DIGIT - */ +var _crypto = _interopRequireDefault(__nccwpck_require__(6113)); - if (parts.length !== 3) { - return null; - } +function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } - for (let i = 0; i < 3; i++) { - // "time-field" must be strictly "1*2DIGIT", HOWEVER, "hms-time" can be - // followed by "( non-digit *OCTET )" so therefore the last time-field can - // have a trailer - const trailingOK = i == 2; - const num = parseDigits(parts[i], 1, 2, trailingOK); - if (num === null) { - return null; - } - result[i] = num; +function md5(bytes) { + if (Array.isArray(bytes)) { + bytes = Buffer.from(bytes); + } else if (typeof bytes === 'string') { + bytes = Buffer.from(bytes, 'utf8'); } - return result; + return _crypto.default.createHash('md5').update(bytes).digest(); } -function parseMonth(token) { - token = String(token) - .substr(0, 3) - .toLowerCase(); - const num = MONTH_TO_NUM[token]; - return num >= 0 ? num : null; -} +var _default = md5; +exports["default"] = _default; -/* - * RFC6265 S5.1.1 date parser (see RFC for full grammar) - */ -function parseDate(str) { - if (!str) { - return; - } +/***/ }), - /* RFC6265 S5.1.1: - * 2. Process each date-token sequentially in the order the date-tokens - * appear in the cookie-date - */ - const tokens = str.split(DATE_DELIM); - if (!tokens) { - return; - } +/***/ 657: +/***/ ((__unused_webpack_module, exports) => { - let hour = null; - let minute = null; - let second = null; - let dayOfMonth = null; - let month = null; - let year = null; +"use strict"; - for (let i = 0; i < tokens.length; i++) { - const token = tokens[i].trim(); - if (!token.length) { - continue; - } - let result; +Object.defineProperty(exports, "__esModule", ({ + value: true +})); +exports["default"] = void 0; +var _default = '00000000-0000-0000-0000-000000000000'; +exports["default"] = _default; - /* 2.1. If the found-time flag is not set and the token matches the time - * production, set the found-time flag and set the hour- value, - * minute-value, and second-value to the numbers denoted by the digits in - * the date-token, respectively. Skip the remaining sub-steps and continue - * to the next date-token. - */ - if (second === null) { - result = parseTime(token); - if (result) { - hour = result[0]; - minute = result[1]; - second = result[2]; - continue; - } - } +/***/ }), - /* 2.2. If the found-day-of-month flag is not set and the date-token matches - * the day-of-month production, set the found-day-of- month flag and set - * the day-of-month-value to the number denoted by the date-token. Skip - * the remaining sub-steps and continue to the next date-token. - */ - if (dayOfMonth === null) { - // "day-of-month = 1*2DIGIT ( non-digit *OCTET )" - result = parseDigits(token, 1, 2, true); - if (result !== null) { - dayOfMonth = result; - continue; - } - } +/***/ 7079: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - /* 2.3. If the found-month flag is not set and the date-token matches the - * month production, set the found-month flag and set the month-value to - * the month denoted by the date-token. Skip the remaining sub-steps and - * continue to the next date-token. - */ - if (month === null) { - result = parseMonth(token); - if (result !== null) { - month = result; - continue; - } - } +"use strict"; - /* 2.4. If the found-year flag is not set and the date-token matches the - * year production, set the found-year flag and set the year-value to the - * number denoted by the date-token. Skip the remaining sub-steps and - * continue to the next date-token. - */ - if (year === null) { - // "year = 2*4DIGIT ( non-digit *OCTET )" - result = parseDigits(token, 2, 4, true); - if (result !== null) { - year = result; - /* From S5.1.1: - * 3. If the year-value is greater than or equal to 70 and less - * than or equal to 99, increment the year-value by 1900. - * 4. If the year-value is greater than or equal to 0 and less - * than or equal to 69, increment the year-value by 2000. - */ - if (year >= 70 && year <= 99) { - year += 1900; - } else if (year >= 0 && year <= 69) { - year += 2000; - } - } - } - } - /* RFC 6265 S5.1.1 - * "5. Abort these steps and fail to parse the cookie-date if: - * * at least one of the found-day-of-month, found-month, found- - * year, or found-time flags is not set, - * * the day-of-month-value is less than 1 or greater than 31, - * * the year-value is less than 1601, - * * the hour-value is greater than 23, - * * the minute-value is greater than 59, or - * * the second-value is greater than 59. - * (Note that leap seconds cannot be represented in this syntax.)" - * - * So, in order as above: - */ - if ( - dayOfMonth === null || - month === null || - year === null || - second === null || - dayOfMonth < 1 || - dayOfMonth > 31 || - year < 1601 || - hour > 23 || - minute > 59 || - second > 59 - ) { - return; - } +Object.defineProperty(exports, "__esModule", ({ + value: true +})); +exports["default"] = void 0; - return new Date(Date.UTC(year, month, dayOfMonth, hour, minute, second)); -} +var _validate = _interopRequireDefault(__nccwpck_require__(4418)); -function formatDate(date) { - validators.validate(validators.isDate(date), date); - return date.toUTCString(); -} +function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } -// S5.1.2 Canonicalized Host Names -function canonicalDomain(str) { - if (str == null) { - return null; +function parse(uuid) { + if (!(0, _validate.default)(uuid)) { + throw TypeError('Invalid UUID'); } - str = str.trim().replace(/^\./, ""); // S4.1.2.3 & S5.2.3: ignore leading . - if (IP_V6_REGEX_OBJECT.test(str)) { - str = str.replace("[", "").replace("]", ""); - } + let v; + const arr = new Uint8Array(16); // Parse ########-....-....-....-............ - // convert to IDN if any non-ASCII characters - if (punycode && /[^\u0001-\u007f]/.test(str)) { - str = punycode.toASCII(str); - } + arr[0] = (v = parseInt(uuid.slice(0, 8), 16)) >>> 24; + arr[1] = v >>> 16 & 0xff; + arr[2] = v >>> 8 & 0xff; + arr[3] = v & 0xff; // Parse ........-####-....-....-............ - return str.toLowerCase(); -} + arr[4] = (v = parseInt(uuid.slice(9, 13), 16)) >>> 8; + arr[5] = v & 0xff; // Parse ........-....-####-....-............ -// S5.1.3 Domain Matching -function domainMatch(str, domStr, canonicalize) { - if (str == null || domStr == null) { - return null; - } - if (canonicalize !== false) { - str = canonicalDomain(str); - domStr = canonicalDomain(domStr); - } + arr[6] = (v = parseInt(uuid.slice(14, 18), 16)) >>> 8; + arr[7] = v & 0xff; // Parse ........-....-....-####-............ - /* - * S5.1.3: - * "A string domain-matches a given domain string if at least one of the - * following conditions hold:" - * - * " o The domain string and the string are identical. (Note that both the - * domain string and the string will have been canonicalized to lower case at - * this point)" - */ - if (str == domStr) { - return true; - } + arr[8] = (v = parseInt(uuid.slice(19, 23), 16)) >>> 8; + arr[9] = v & 0xff; // Parse ........-....-....-....-############ + // (Use "/" to avoid 32-bit truncation when bit-shifting high-order bytes) - /* " o All of the following [three] conditions hold:" */ + arr[10] = (v = parseInt(uuid.slice(24, 36), 16)) / 0x10000000000 & 0xff; + arr[11] = v / 0x100000000 & 0xff; + arr[12] = v >>> 24 & 0xff; + arr[13] = v >>> 16 & 0xff; + arr[14] = v >>> 8 & 0xff; + arr[15] = v & 0xff; + return arr; +} - /* "* The domain string is a suffix of the string" */ - const idx = str.lastIndexOf(domStr); - if (idx <= 0) { - return false; // it's a non-match (-1) or prefix (0) - } +var _default = parse; +exports["default"] = _default; - // next, check it's a proper suffix - // e.g., "a.b.c".indexOf("b.c") === 2 - // 5 === 3+2 - if (str.length !== domStr.length + idx) { - return false; // it's not a suffix - } +/***/ }), - /* " * The last character of the string that is not included in the - * domain string is a %x2E (".") character." */ - if (str.substr(idx - 1, 1) !== ".") { - return false; // doesn't align on "." - } +/***/ 690: +/***/ ((__unused_webpack_module, exports) => { - /* " * The string is a host name (i.e., not an IP address)." */ - if (IP_REGEX_LOWERCASE.test(str)) { - return false; // it's an IP address - } +"use strict"; - return true; -} -// RFC6265 S5.1.4 Paths and Path-Match +Object.defineProperty(exports, "__esModule", ({ + value: true +})); +exports["default"] = void 0; +var _default = /^(?:[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}|00000000-0000-0000-0000-000000000000)$/i; +exports["default"] = _default; -/* - * "The user agent MUST use an algorithm equivalent to the following algorithm - * to compute the default-path of a cookie:" - * - * Assumption: the path (and not query part or absolute uri) is passed in. - */ -function defaultPath(path) { - // "2. If the uri-path is empty or if the first character of the uri-path is not - // a %x2F ("/") character, output %x2F ("/") and skip the remaining steps. - if (!path || path.substr(0, 1) !== "/") { - return "/"; - } +/***/ }), - // "3. If the uri-path contains no more than one %x2F ("/") character, output - // %x2F ("/") and skip the remaining step." - if (path === "/") { - return path; - } +/***/ 979: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - const rightSlash = path.lastIndexOf("/"); - if (rightSlash === 0) { - return "/"; - } +"use strict"; - // "4. Output the characters of the uri-path from the first character up to, - // but not including, the right-most %x2F ("/")." - return path.slice(0, rightSlash); -} -function trimTerminator(str) { - if (validators.isEmptyString(str)) return str; - for (let t = 0; t < TERMINATORS.length; t++) { - const terminatorIdx = str.indexOf(TERMINATORS[t]); - if (terminatorIdx !== -1) { - str = str.substr(0, terminatorIdx); - } - } +Object.defineProperty(exports, "__esModule", ({ + value: true +})); +exports["default"] = rng; - return str; -} +var _crypto = _interopRequireDefault(__nccwpck_require__(6113)); -function parseCookiePair(cookiePair, looseMode) { - cookiePair = trimTerminator(cookiePair); - validators.validate(validators.isString(cookiePair), cookiePair); +function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } - let firstEq = cookiePair.indexOf("="); - if (looseMode) { - if (firstEq === 0) { - // '=' is immediately at start - cookiePair = cookiePair.substr(1); - firstEq = cookiePair.indexOf("="); // might still need to split on '=' - } - } else { - // non-loose mode - if (firstEq <= 0) { - // no '=' or is at start - return; // needs to have non-empty "cookie-name" - } - } +const rnds8Pool = new Uint8Array(256); // # of random values to pre-allocate - let cookieName, cookieValue; - if (firstEq <= 0) { - cookieName = ""; - cookieValue = cookiePair.trim(); - } else { - cookieName = cookiePair.substr(0, firstEq).trim(); - cookieValue = cookiePair.substr(firstEq + 1).trim(); - } +let poolPtr = rnds8Pool.length; - if (CONTROL_CHARS.test(cookieName) || CONTROL_CHARS.test(cookieValue)) { - return; +function rng() { + if (poolPtr > rnds8Pool.length - 16) { + _crypto.default.randomFillSync(rnds8Pool); + + poolPtr = 0; } - const c = new Cookie(); - c.key = cookieName; - c.value = cookieValue; - return c; + return rnds8Pool.slice(poolPtr, poolPtr += 16); } -function parse(str, options) { - if (!options || typeof options !== "object") { - options = {}; - } +/***/ }), - if (validators.isEmptyString(str) || !validators.isString(str)) { - return null; - } +/***/ 6631: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - str = str.trim(); +"use strict"; - // We use a regex to parse the "name-value-pair" part of S5.2 - const firstSemi = str.indexOf(";"); // S5.2 step 1 - const cookiePair = firstSemi === -1 ? str : str.substr(0, firstSemi); - const c = parseCookiePair(cookiePair, !!options.loose); - if (!c) { - return; - } - if (firstSemi === -1) { - return c; - } +Object.defineProperty(exports, "__esModule", ({ + value: true +})); +exports["default"] = void 0; - // S5.2.3 "unparsed-attributes consist of the remainder of the set-cookie-string - // (including the %x3B (";") in question)." plus later on in the same section - // "discard the first ";" and trim". - const unparsed = str.slice(firstSemi + 1).trim(); +var _crypto = _interopRequireDefault(__nccwpck_require__(6113)); - // "If the unparsed-attributes string is empty, skip the rest of these - // steps." - if (unparsed.length === 0) { - return c; +function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } + +function sha1(bytes) { + if (Array.isArray(bytes)) { + bytes = Buffer.from(bytes); + } else if (typeof bytes === 'string') { + bytes = Buffer.from(bytes, 'utf8'); } - /* - * S5.2 says that when looping over the items "[p]rocess the attribute-name - * and attribute-value according to the requirements in the following - * subsections" for every item. Plus, for many of the individual attributes - * in S5.3 it says to use the "attribute-value of the last attribute in the - * cookie-attribute-list". Therefore, in this implementation, we overwrite - * the previous value. - */ - const cookie_avs = unparsed.split(";"); - while (cookie_avs.length) { - const av = cookie_avs.shift().trim(); - if (av.length === 0) { - // happens if ";;" appears - continue; - } - const av_sep = av.indexOf("="); - let av_key, av_value; + return _crypto.default.createHash('sha1').update(bytes).digest(); +} - if (av_sep === -1) { - av_key = av; - av_value = null; - } else { - av_key = av.substr(0, av_sep); - av_value = av.substr(av_sep + 1); - } +var _default = sha1; +exports["default"] = _default; - av_key = av_key.trim().toLowerCase(); +/***/ }), - if (av_value) { - av_value = av_value.trim(); - } +/***/ 4794: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - switch (av_key) { - case "expires": // S5.2.1 - if (av_value) { - const exp = parseDate(av_value); - // "If the attribute-value failed to parse as a cookie date, ignore the - // cookie-av." - if (exp) { - // over and underflow not realistically a concern: V8's getTime() seems to - // store something larger than a 32-bit time_t (even with 32-bit node) - c.expires = exp; - } - } - break; +"use strict"; - case "max-age": // S5.2.2 - if (av_value) { - // "If the first character of the attribute-value is not a DIGIT or a "-" - // character ...[or]... If the remainder of attribute-value contains a - // non-DIGIT character, ignore the cookie-av." - if (/^-?[0-9]+$/.test(av_value)) { - const delta = parseInt(av_value, 10); - // "If delta-seconds is less than or equal to zero (0), let expiry-time - // be the earliest representable date and time." - c.setMaxAge(delta); - } - } - break; - case "domain": // S5.2.3 - // "If the attribute-value is empty, the behavior is undefined. However, - // the user agent SHOULD ignore the cookie-av entirely." - if (av_value) { - // S5.2.3 "Let cookie-domain be the attribute-value without the leading %x2E - // (".") character." - const domain = av_value.trim().replace(/^\./, ""); - if (domain) { - // "Convert the cookie-domain to lower case." - c.domain = domain.toLowerCase(); - } - } - break; +Object.defineProperty(exports, "__esModule", ({ + value: true +})); +exports["default"] = void 0; - case "path": // S5.2.4 - /* - * "If the attribute-value is empty or if the first character of the - * attribute-value is not %x2F ("/"): - * Let cookie-path be the default-path. - * Otherwise: - * Let cookie-path be the attribute-value." - * - * We'll represent the default-path as null since it depends on the - * context of the parsing. - */ - c.path = av_value && av_value[0] === "/" ? av_value : null; - break; +var _validate = _interopRequireDefault(__nccwpck_require__(4418)); - case "secure": // S5.2.5 - /* - * "If the attribute-name case-insensitively matches the string "Secure", - * the user agent MUST append an attribute to the cookie-attribute-list - * with an attribute-name of Secure and an empty attribute-value." - */ - c.secure = true; - break; +function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } - case "httponly": // S5.2.6 -- effectively the same as 'secure' - c.httpOnly = true; - break; +/** + * Convert array of 16 byte values to UUID string format of the form: + * XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX + */ +const byteToHex = []; - case "samesite": // RFC6265bis-02 S5.3.7 - const enforcement = av_value ? av_value.toLowerCase() : ""; - switch (enforcement) { - case "strict": - c.sameSite = "strict"; - break; - case "lax": - c.sameSite = "lax"; - break; - case "none": - c.sameSite = "none"; - break; - default: - c.sameSite = undefined; - break; - } - break; +for (let i = 0; i < 256; ++i) { + byteToHex.push((i + 0x100).toString(16).substr(1)); +} - default: - c.extensions = c.extensions || []; - c.extensions.push(av); - break; - } +function stringify(arr, offset = 0) { + // Note: Be careful editing this code! It's been tuned for performance + // and works in ways you may not expect. See https://github.com/uuidjs/uuid/pull/434 + const uuid = (byteToHex[arr[offset + 0]] + byteToHex[arr[offset + 1]] + byteToHex[arr[offset + 2]] + byteToHex[arr[offset + 3]] + '-' + byteToHex[arr[offset + 4]] + byteToHex[arr[offset + 5]] + '-' + byteToHex[arr[offset + 6]] + byteToHex[arr[offset + 7]] + '-' + byteToHex[arr[offset + 8]] + byteToHex[arr[offset + 9]] + '-' + byteToHex[arr[offset + 10]] + byteToHex[arr[offset + 11]] + byteToHex[arr[offset + 12]] + byteToHex[arr[offset + 13]] + byteToHex[arr[offset + 14]] + byteToHex[arr[offset + 15]]).toLowerCase(); // Consistency check for valid UUID. If this throws, it's likely due to one + // of the following: + // - One or more input array values don't map to a hex octet (leading to + // "undefined" in the uuid) + // - Invalid input values for the RFC `version` or `variant` fields + + if (!(0, _validate.default)(uuid)) { + throw TypeError('Stringified UUID is invalid'); } - return c; + return uuid; } -/** - * If the cookie-name begins with a case-sensitive match for the - * string "__Secure-", abort these steps and ignore the cookie - * entirely unless the cookie's secure-only-flag is true. - * @param cookie - * @returns boolean - */ -function isSecurePrefixConditionMet(cookie) { - validators.validate(validators.isObject(cookie), cookie); - return !cookie.key.startsWith("__Secure-") || cookie.secure; -} +var _default = stringify; +exports["default"] = _default; -/** - * If the cookie-name begins with a case-sensitive match for the - * string "__Host-", abort these steps and ignore the cookie - * entirely unless the cookie meets all the following criteria: - * 1. The cookie's secure-only-flag is true. - * 2. The cookie's host-only-flag is true. - * 3. The cookie-attribute-list contains an attribute with an - * attribute-name of "Path", and the cookie's path is "/". - * @param cookie - * @returns boolean - */ -function isHostPrefixConditionMet(cookie) { - validators.validate(validators.isObject(cookie)); - return ( - !cookie.key.startsWith("__Host-") || - (cookie.secure && - cookie.hostOnly && - cookie.path != null && - cookie.path === "/") - ); -} +/***/ }), -// avoid the V8 deoptimization monster! -function jsonParse(str) { - let obj; - try { - obj = JSON.parse(str); - } catch (e) { - return e; - } - return obj; -} +/***/ 4757: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { -function fromJSON(str) { - if (!str || validators.isEmptyString(str)) { - return null; - } +"use strict"; - let obj; - if (typeof str === "string") { - obj = jsonParse(str); - if (obj instanceof Error) { - return null; - } - } else { - // assume it's an Object - obj = str; - } - const c = new Cookie(); - for (let i = 0; i < Cookie.serializableProperties.length; i++) { - const prop = Cookie.serializableProperties[i]; - if (obj[prop] === undefined || obj[prop] === cookieDefaults[prop]) { - continue; // leave as prototype default +Object.defineProperty(exports, "__esModule", ({ + value: true +})); +exports["default"] = void 0; + +var _rng = _interopRequireDefault(__nccwpck_require__(979)); + +var _stringify = _interopRequireDefault(__nccwpck_require__(4794)); + +function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } + +// **`v1()` - Generate time-based UUID** +// +// Inspired by https://github.com/LiosK/UUID.js +// and http://docs.python.org/library/uuid.html +let _nodeId; + +let _clockseq; // Previous uuid creation time + + +let _lastMSecs = 0; +let _lastNSecs = 0; // See https://github.com/uuidjs/uuid for API details + +function v1(options, buf, offset) { + let i = buf && offset || 0; + const b = buf || new Array(16); + options = options || {}; + let node = options.node || _nodeId; + let clockseq = options.clockseq !== undefined ? options.clockseq : _clockseq; // node and clockseq need to be initialized to random values if they're not + // specified. We do this lazily to minimize issues related to insufficient + // system entropy. See #189 + + if (node == null || clockseq == null) { + const seedBytes = options.random || (options.rng || _rng.default)(); + + if (node == null) { + // Per 4.5, create and 48-bit node id, (47 random bits + multicast bit = 1) + node = _nodeId = [seedBytes[0] | 0x01, seedBytes[1], seedBytes[2], seedBytes[3], seedBytes[4], seedBytes[5]]; } - if (prop === "expires" || prop === "creation" || prop === "lastAccessed") { - if (obj[prop] === null) { - c[prop] = null; - } else { - c[prop] = obj[prop] == "Infinity" ? "Infinity" : new Date(obj[prop]); - } - } else { - c[prop] = obj[prop]; + if (clockseq == null) { + // Per 4.2.2, randomize (14 bit) clockseq + clockseq = _clockseq = (seedBytes[6] << 8 | seedBytes[7]) & 0x3fff; } - } + } // UUID timestamps are 100 nano-second units since the Gregorian epoch, + // (1582-10-15 00:00). JSNumbers aren't precise enough for this, so + // time is handled internally as 'msecs' (integer milliseconds) and 'nsecs' + // (100-nanoseconds offset from msecs) since unix epoch, 1970-01-01 00:00. - return c; -} -/* Section 5.4 part 2: - * "* Cookies with longer paths are listed before cookies with - * shorter paths. - * - * * Among cookies that have equal-length path fields, cookies with - * earlier creation-times are listed before cookies with later - * creation-times." - */ + let msecs = options.msecs !== undefined ? options.msecs : Date.now(); // Per 4.2.1.2, use count of uuid's generated during the current clock + // cycle to simulate higher resolution clock -function cookieCompare(a, b) { - validators.validate(validators.isObject(a), a); - validators.validate(validators.isObject(b), b); - let cmp = 0; + let nsecs = options.nsecs !== undefined ? options.nsecs : _lastNSecs + 1; // Time since last uuid creation (in msecs) - // descending for length: b CMP a - const aPathLen = a.path ? a.path.length : 0; - const bPathLen = b.path ? b.path.length : 0; - cmp = bPathLen - aPathLen; - if (cmp !== 0) { - return cmp; - } + const dt = msecs - _lastMSecs + (nsecs - _lastNSecs) / 10000; // Per 4.2.1.2, Bump clockseq on clock regression - // ascending for time: a CMP b - const aTime = a.creation ? a.creation.getTime() : MAX_TIME; - const bTime = b.creation ? b.creation.getTime() : MAX_TIME; - cmp = aTime - bTime; - if (cmp !== 0) { - return cmp; - } + if (dt < 0 && options.clockseq === undefined) { + clockseq = clockseq + 1 & 0x3fff; + } // Reset nsecs if clock regresses (new clockseq) or we've moved onto a new + // time interval - // break ties for the same millisecond (precision of JavaScript's clock) - cmp = a.creationIndex - b.creationIndex; - return cmp; -} + if ((dt < 0 || msecs > _lastMSecs) && options.nsecs === undefined) { + nsecs = 0; + } // Per 4.2.1.2 Throw error if too many uuids are requested -// Gives the permutation of all possible pathMatch()es of a given path. The -// array is in longest-to-shortest order. Handy for indexing. -function permutePath(path) { - validators.validate(validators.isString(path)); - if (path === "/") { - return ["/"]; - } - const permutations = [path]; - while (path.length > 1) { - const lindex = path.lastIndexOf("/"); - if (lindex === 0) { - break; - } - path = path.substr(0, lindex); - permutations.push(path); - } - permutations.push("/"); - return permutations; -} -function getCookieContext(url) { - if (url instanceof Object) { - return url; - } - // NOTE: decodeURI will throw on malformed URIs (see GH-32). - // Therefore, we will just skip decoding for such URIs. - try { - url = decodeURI(url); - } catch (err) { - // Silently swallow error + if (nsecs >= 10000) { + throw new Error("uuid.v1(): Can't create more than 10M uuids/sec"); } - return urlParse(url); -} + _lastMSecs = msecs; + _lastNSecs = nsecs; + _clockseq = clockseq; // Per 4.1.4 - Convert from unix epoch to Gregorian epoch -const cookieDefaults = { - // the order in which the RFC has them: - key: "", - value: "", - expires: "Infinity", - maxAge: null, - domain: null, - path: null, - secure: false, - httpOnly: false, - extensions: null, - // set by the CookieJar: - hostOnly: null, - pathIsDefault: null, - creation: null, - lastAccessed: null, - sameSite: undefined -}; + msecs += 12219292800000; // `time_low` -class Cookie { - constructor(options = {}) { - const customInspectSymbol = getCustomInspectSymbol(); - if (customInspectSymbol) { - this[customInspectSymbol] = this.inspect; - } + const tl = ((msecs & 0xfffffff) * 10000 + nsecs) % 0x100000000; + b[i++] = tl >>> 24 & 0xff; + b[i++] = tl >>> 16 & 0xff; + b[i++] = tl >>> 8 & 0xff; + b[i++] = tl & 0xff; // `time_mid` - Object.assign(this, cookieDefaults, options); - this.creation = this.creation || new Date(); + const tmh = msecs / 0x100000000 * 10000 & 0xfffffff; + b[i++] = tmh >>> 8 & 0xff; + b[i++] = tmh & 0xff; // `time_high_and_version` - // used to break creation ties in cookieCompare(): - Object.defineProperty(this, "creationIndex", { - configurable: false, - enumerable: false, // important for assert.deepEqual checks - writable: true, - value: ++Cookie.cookiesCreated - }); - } + b[i++] = tmh >>> 24 & 0xf | 0x10; // include version - inspect() { - const now = Date.now(); - const hostOnly = this.hostOnly != null ? this.hostOnly : "?"; - const createAge = this.creation - ? `${now - this.creation.getTime()}ms` - : "?"; - const accessAge = this.lastAccessed - ? `${now - this.lastAccessed.getTime()}ms` - : "?"; - return `Cookie="${this.toString()}; hostOnly=${hostOnly}; aAge=${accessAge}; cAge=${createAge}"`; - } + b[i++] = tmh >>> 16 & 0xff; // `clock_seq_hi_and_reserved` (Per 4.2.2 - include variant) - toJSON() { - const obj = {}; + b[i++] = clockseq >>> 8 | 0x80; // `clock_seq_low` - for (const prop of Cookie.serializableProperties) { - if (this[prop] === cookieDefaults[prop]) { - continue; // leave as prototype default - } + b[i++] = clockseq & 0xff; // `node` - if ( - prop === "expires" || - prop === "creation" || - prop === "lastAccessed" - ) { - if (this[prop] === null) { - obj[prop] = null; - } else { - obj[prop] = - this[prop] == "Infinity" // intentionally not === - ? "Infinity" - : this[prop].toISOString(); - } - } else if (prop === "maxAge") { - if (this[prop] !== null) { - // again, intentionally not === - obj[prop] = - this[prop] == Infinity || this[prop] == -Infinity - ? this[prop].toString() - : this[prop]; - } - } else { - if (this[prop] !== cookieDefaults[prop]) { - obj[prop] = this[prop]; - } - } - } - - return obj; + for (let n = 0; n < 6; ++n) { + b[i + n] = node[n]; } - clone() { - return fromJSON(this.toJSON()); - } + return buf || (0, _stringify.default)(b); +} - validate() { - if (!COOKIE_OCTETS.test(this.value)) { - return false; - } - if ( - this.expires != Infinity && - !(this.expires instanceof Date) && - !parseDate(this.expires) - ) { - return false; - } - if (this.maxAge != null && this.maxAge <= 0) { - return false; // "Max-Age=" non-zero-digit *DIGIT - } - if (this.path != null && !PATH_VALUE.test(this.path)) { - return false; - } +var _default = v1; +exports["default"] = _default; - const cdomain = this.cdomain(); - if (cdomain) { - if (cdomain.match(/\.$/)) { - return false; // S4.1.2.3 suggests that this is bad. domainMatch() tests confirm this - } - const suffix = pubsuffix.getPublicSuffix(cdomain); - if (suffix == null) { - // it's a public suffix - return false; - } - } - return true; - } +/***/ }), - setExpires(exp) { - if (exp instanceof Date) { - this.expires = exp; - } else { - this.expires = parseDate(exp) || "Infinity"; - } - } +/***/ 9982: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - setMaxAge(age) { - if (age === Infinity || age === -Infinity) { - this.maxAge = age.toString(); // so JSON.stringify() works - } else { - this.maxAge = age; - } - } +"use strict"; - cookieString() { - let val = this.value; - if (val == null) { - val = ""; - } - if (this.key === "") { - return val; - } - return `${this.key}=${val}`; - } - // gives Set-Cookie header format - toString() { - let str = this.cookieString(); +Object.defineProperty(exports, "__esModule", ({ + value: true +})); +exports["default"] = void 0; - if (this.expires != Infinity) { - if (this.expires instanceof Date) { - str += `; Expires=${formatDate(this.expires)}`; - } else { - str += `; Expires=${this.expires}`; - } - } +var _v = _interopRequireDefault(__nccwpck_require__(4085)); - if (this.maxAge != null && this.maxAge != Infinity) { - str += `; Max-Age=${this.maxAge}`; - } +var _md = _interopRequireDefault(__nccwpck_require__(4153)); - if (this.domain && !this.hostOnly) { - str += `; Domain=${this.domain}`; - } - if (this.path) { - str += `; Path=${this.path}`; - } +function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } - if (this.secure) { - str += "; Secure"; - } - if (this.httpOnly) { - str += "; HttpOnly"; - } - if (this.sameSite && this.sameSite !== "none") { - const ssCanon = Cookie.sameSiteCanonical[this.sameSite.toLowerCase()]; - str += `; SameSite=${ssCanon ? ssCanon : this.sameSite}`; - } - if (this.extensions) { - this.extensions.forEach(ext => { - str += `; ${ext}`; - }); - } +const v3 = (0, _v.default)('v3', 0x30, _md.default); +var _default = v3; +exports["default"] = _default; - return str; - } +/***/ }), - // TTL() partially replaces the "expiry-time" parts of S5.3 step 3 (setCookie() - // elsewhere) - // S5.3 says to give the "latest representable date" for which we use Infinity - // For "expired" we use 0 - TTL(now) { - /* RFC6265 S4.1.2.2 If a cookie has both the Max-Age and the Expires - * attribute, the Max-Age attribute has precedence and controls the - * expiration date of the cookie. - * (Concurs with S5.3 step 3) - */ - if (this.maxAge != null) { - return this.maxAge <= 0 ? 0 : this.maxAge * 1000; - } +/***/ 4085: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - let expires = this.expires; - if (expires != Infinity) { - if (!(expires instanceof Date)) { - expires = parseDate(expires) || Infinity; - } +"use strict"; - if (expires == Infinity) { - return Infinity; - } - return expires.getTime() - (now || Date.now()); - } +Object.defineProperty(exports, "__esModule", ({ + value: true +})); +exports["default"] = _default; +exports.URL = exports.DNS = void 0; - return Infinity; - } +var _stringify = _interopRequireDefault(__nccwpck_require__(4794)); - // expiryTime() replaces the "expiry-time" parts of S5.3 step 3 (setCookie() - // elsewhere) - expiryTime(now) { - if (this.maxAge != null) { - const relativeTo = now || this.creation || new Date(); - const age = this.maxAge <= 0 ? -Infinity : this.maxAge * 1000; - return relativeTo.getTime() + age; - } +var _parse = _interopRequireDefault(__nccwpck_require__(7079)); - if (this.expires == Infinity) { - return Infinity; - } - return this.expires.getTime(); - } +function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } - // expiryDate() replaces the "expiry-time" parts of S5.3 step 3 (setCookie() - // elsewhere), except it returns a Date - expiryDate(now) { - const millisec = this.expiryTime(now); - if (millisec == Infinity) { - return new Date(MAX_TIME); - } else if (millisec == -Infinity) { - return new Date(MIN_TIME); - } else { - return new Date(millisec); - } - } +function stringToBytes(str) { + str = unescape(encodeURIComponent(str)); // UTF8 escape - // This replaces the "persistent-flag" parts of S5.3 step 3 - isPersistent() { - return this.maxAge != null || this.expires != Infinity; - } + const bytes = []; - // Mostly S5.1.2 and S5.2.3: - canonicalizedDomain() { - if (this.domain == null) { - return null; - } - return canonicalDomain(this.domain); + for (let i = 0; i < str.length; ++i) { + bytes.push(str.charCodeAt(i)); } - cdomain() { - return this.canonicalizedDomain(); - } + return bytes; } -Cookie.cookiesCreated = 0; -Cookie.parse = parse; -Cookie.fromJSON = fromJSON; -Cookie.serializableProperties = Object.keys(cookieDefaults); -Cookie.sameSiteLevel = { - strict: 3, - lax: 2, - none: 1 -}; - -Cookie.sameSiteCanonical = { - strict: "Strict", - lax: "Lax" -}; +const DNS = '6ba7b810-9dad-11d1-80b4-00c04fd430c8'; +exports.DNS = DNS; +const URL = '6ba7b811-9dad-11d1-80b4-00c04fd430c8'; +exports.URL = URL; -function getNormalizedPrefixSecurity(prefixSecurity) { - if (prefixSecurity != null) { - const normalizedPrefixSecurity = prefixSecurity.toLowerCase(); - /* The three supported options */ - switch (normalizedPrefixSecurity) { - case PrefixSecurityEnum.STRICT: - case PrefixSecurityEnum.SILENT: - case PrefixSecurityEnum.DISABLED: - return normalizedPrefixSecurity; +function _default(name, version, hashfunc) { + function generateUUID(value, namespace, buf, offset) { + if (typeof value === 'string') { + value = stringToBytes(value); } - } - /* Default is SILENT */ - return PrefixSecurityEnum.SILENT; -} -class CookieJar { - constructor(store, options = { rejectPublicSuffixes: true }) { - if (typeof options === "boolean") { - options = { rejectPublicSuffixes: options }; + if (typeof namespace === 'string') { + namespace = (0, _parse.default)(namespace); } - validators.validate(validators.isObject(options), options); - this.rejectPublicSuffixes = options.rejectPublicSuffixes; - this.enableLooseMode = !!options.looseMode; - this.allowSpecialUseDomain = - typeof options.allowSpecialUseDomain === "boolean" - ? options.allowSpecialUseDomain - : true; - this.store = store || new MemoryCookieStore(); - this.prefixSecurity = getNormalizedPrefixSecurity(options.prefixSecurity); - this._cloneSync = syncWrap("clone"); - this._importCookiesSync = syncWrap("_importCookies"); - this.getCookiesSync = syncWrap("getCookies"); - this.getCookieStringSync = syncWrap("getCookieString"); - this.getSetCookieStringsSync = syncWrap("getSetCookieStrings"); - this.removeAllCookiesSync = syncWrap("removeAllCookies"); - this.setCookieSync = syncWrap("setCookie"); - this.serializeSync = syncWrap("serialize"); - } - setCookie(cookie, url, options, cb) { - validators.validate(validators.isNonEmptyString(url), cb, options); - let err; + if (namespace.length !== 16) { + throw TypeError('Namespace must be array-like (16 iterable integer values, 0-255)'); + } // Compute hash of namespace and value, Per 4.3 + // Future: Use spread syntax when supported on all platforms, e.g. `bytes = + // hashfunc([...namespace, ... value])` - if (validators.isFunction(url)) { - cb = url; - return cb(new Error("No URL was specified")); - } - const context = getCookieContext(url); - if (validators.isFunction(options)) { - cb = options; - options = {}; - } + let bytes = new Uint8Array(16 + value.length); + bytes.set(namespace); + bytes.set(value, namespace.length); + bytes = hashfunc(bytes); + bytes[6] = bytes[6] & 0x0f | version; + bytes[8] = bytes[8] & 0x3f | 0x80; - validators.validate(validators.isFunction(cb), cb); + if (buf) { + offset = offset || 0; - if ( - !validators.isNonEmptyString(cookie) && - !validators.isObject(cookie) && - cookie instanceof String && - cookie.length == 0 - ) { - return cb(null); + for (let i = 0; i < 16; ++i) { + buf[offset + i] = bytes[i]; + } + + return buf; } - const host = canonicalDomain(context.hostname); - const loose = options.loose || this.enableLooseMode; + return (0, _stringify.default)(bytes); + } // Function#name is not settable on some platforms (#270) - let sameSiteContext = null; - if (options.sameSiteContext) { - sameSiteContext = checkSameSiteContext(options.sameSiteContext); - if (!sameSiteContext) { - return cb(new Error(SAME_SITE_CONTEXT_VAL_ERR)); - } - } - // S5.3 step 1 - if (typeof cookie === "string" || cookie instanceof String) { - cookie = Cookie.parse(cookie, { loose: loose }); - if (!cookie) { - err = new Error("Cookie failed to parse"); - return cb(options.ignoreError ? null : err); - } - } else if (!(cookie instanceof Cookie)) { - // If you're seeing this error, and are passing in a Cookie object, - // it *might* be a Cookie object from another loaded version of tough-cookie. - err = new Error( - "First argument to setCookie must be a Cookie object or string" - ); - return cb(options.ignoreError ? null : err); - } + try { + generateUUID.name = name; // eslint-disable-next-line no-empty + } catch (err) {} // For CommonJS default export support - // S5.3 step 2 - const now = options.now || new Date(); // will assign later to save effort in the face of errors - // S5.3 step 3: NOOP; persistent-flag and expiry-time is handled by getCookie() + generateUUID.DNS = DNS; + generateUUID.URL = URL; + return generateUUID; +} - // S5.3 step 4: NOOP; domain is null by default +/***/ }), - // S5.3 step 5: public suffixes - if (this.rejectPublicSuffixes && cookie.domain) { - const suffix = pubsuffix.getPublicSuffix(cookie.cdomain(), { - allowSpecialUseDomain: this.allowSpecialUseDomain, - ignoreError: options.ignoreError - }); - if (suffix == null && !IP_V6_REGEX_OBJECT.test(cookie.domain)) { - // e.g. "com" - err = new Error("Cookie has domain set to a public suffix"); - return cb(options.ignoreError ? null : err); - } - } +/***/ 5393: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - // S5.3 step 6: - if (cookie.domain) { - if (!domainMatch(host, cookie.cdomain(), false)) { - err = new Error( - `Cookie not in this host's domain. Cookie:${cookie.cdomain()} Request:${host}` - ); - return cb(options.ignoreError ? null : err); - } +"use strict"; - if (cookie.hostOnly == null) { - // don't reset if already set - cookie.hostOnly = false; - } - } else { - cookie.hostOnly = true; - cookie.domain = host; - } - //S5.2.4 If the attribute-value is empty or if the first character of the - //attribute-value is not %x2F ("/"): - //Let cookie-path be the default-path. - if (!cookie.path || cookie.path[0] !== "/") { - cookie.path = defaultPath(context.pathname); - cookie.pathIsDefault = true; - } +Object.defineProperty(exports, "__esModule", ({ + value: true +})); +exports["default"] = void 0; - // S5.3 step 8: NOOP; secure attribute - // S5.3 step 9: NOOP; httpOnly attribute +var _rng = _interopRequireDefault(__nccwpck_require__(979)); - // S5.3 step 10 - if (options.http === false && cookie.httpOnly) { - err = new Error("Cookie is HttpOnly and this isn't an HTTP API"); - return cb(options.ignoreError ? null : err); - } +var _stringify = _interopRequireDefault(__nccwpck_require__(4794)); - // 6252bis-02 S5.4 Step 13 & 14: - if ( - cookie.sameSite !== "none" && - cookie.sameSite !== undefined && - sameSiteContext - ) { - // "If the cookie's "same-site-flag" is not "None", and the cookie - // is being set from a context whose "site for cookies" is not an - // exact match for request-uri's host's registered domain, then - // abort these steps and ignore the newly created cookie entirely." - if (sameSiteContext === "none") { - err = new Error( - "Cookie is SameSite but this is a cross-origin request" - ); - return cb(options.ignoreError ? null : err); - } - } +function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } - /* 6265bis-02 S5.4 Steps 15 & 16 */ - const ignoreErrorForPrefixSecurity = - this.prefixSecurity === PrefixSecurityEnum.SILENT; - const prefixSecurityDisabled = - this.prefixSecurity === PrefixSecurityEnum.DISABLED; - /* If prefix checking is not disabled ...*/ - if (!prefixSecurityDisabled) { - let errorFound = false; - let errorMsg; - /* Check secure prefix condition */ - if (!isSecurePrefixConditionMet(cookie)) { - errorFound = true; - errorMsg = "Cookie has __Secure prefix but Secure attribute is not set"; - } else if (!isHostPrefixConditionMet(cookie)) { - /* Check host prefix condition */ - errorFound = true; - errorMsg = - "Cookie has __Host prefix but either Secure or HostOnly attribute is not set or Path is not '/'"; - } - if (errorFound) { - return cb( - options.ignoreError || ignoreErrorForPrefixSecurity - ? null - : new Error(errorMsg) - ); - } - } +function v4(options, buf, offset) { + options = options || {}; - const store = this.store; + const rnds = options.random || (options.rng || _rng.default)(); // Per 4.4, set bits for version and `clock_seq_hi_and_reserved` - if (!store.updateCookie) { - store.updateCookie = function(oldCookie, newCookie, cb) { - this.putCookie(newCookie, cb); - }; - } - function withCookie(err, oldCookie) { - if (err) { - return cb(err); - } + rnds[6] = rnds[6] & 0x0f | 0x40; + rnds[8] = rnds[8] & 0x3f | 0x80; // Copy bytes to buffer, if provided - const next = function(err) { - if (err) { - return cb(err); - } else { - cb(null, cookie); - } - }; + if (buf) { + offset = offset || 0; - if (oldCookie) { - // S5.3 step 11 - "If the cookie store contains a cookie with the same name, - // domain, and path as the newly created cookie:" - if (options.http === false && oldCookie.httpOnly) { - // step 11.2 - err = new Error("old Cookie is HttpOnly and this isn't an HTTP API"); - return cb(options.ignoreError ? null : err); - } - cookie.creation = oldCookie.creation; // step 11.3 - cookie.creationIndex = oldCookie.creationIndex; // preserve tie-breaker - cookie.lastAccessed = now; - // Step 11.4 (delete cookie) is implied by just setting the new one: - store.updateCookie(oldCookie, cookie, next); // step 12 - } else { - cookie.creation = cookie.lastAccessed = now; - store.putCookie(cookie, next); // step 12 - } + for (let i = 0; i < 16; ++i) { + buf[offset + i] = rnds[i]; } - store.findCookie(cookie.domain, cookie.path, cookie.key, withCookie); + return buf; } - // RFC6365 S5.4 - getCookies(url, options, cb) { - validators.validate(validators.isNonEmptyString(url), cb, url); - const context = getCookieContext(url); - if (validators.isFunction(options)) { - cb = options; - options = {}; - } - validators.validate(validators.isObject(options), cb, options); - validators.validate(validators.isFunction(cb), cb); + return (0, _stringify.default)(rnds); +} - const host = canonicalDomain(context.hostname); - const path = context.pathname || "/"; +var _default = v4; +exports["default"] = _default; - let secure = options.secure; - if ( - secure == null && - context.protocol && - (context.protocol == "https:" || context.protocol == "wss:") - ) { - secure = true; - } - - let sameSiteLevel = 0; - if (options.sameSiteContext) { - const sameSiteContext = checkSameSiteContext(options.sameSiteContext); - sameSiteLevel = Cookie.sameSiteLevel[sameSiteContext]; - if (!sameSiteLevel) { - return cb(new Error(SAME_SITE_CONTEXT_VAL_ERR)); - } - } - - let http = options.http; - if (http == null) { - http = true; - } - - const now = options.now || Date.now(); - const expireCheck = options.expire !== false; - const allPaths = !!options.allPaths; - const store = this.store; +/***/ }), - function matchingCookie(c) { - // "Either: - // The cookie's host-only-flag is true and the canonicalized - // request-host is identical to the cookie's domain. - // Or: - // The cookie's host-only-flag is false and the canonicalized - // request-host domain-matches the cookie's domain." - if (c.hostOnly) { - if (c.domain != host) { - return false; - } - } else { - if (!domainMatch(host, c.domain, false)) { - return false; - } - } +/***/ 8788: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - // "The request-uri's path path-matches the cookie's path." - if (!allPaths && !pathMatch(path, c.path)) { - return false; - } +"use strict"; - // "If the cookie's secure-only-flag is true, then the request-uri's - // scheme must denote a "secure" protocol" - if (c.secure && !secure) { - return false; - } - // "If the cookie's http-only-flag is true, then exclude the cookie if the - // cookie-string is being generated for a "non-HTTP" API" - if (c.httpOnly && !http) { - return false; - } +Object.defineProperty(exports, "__esModule", ({ + value: true +})); +exports["default"] = void 0; - // RFC6265bis-02 S5.3.7 - if (sameSiteLevel) { - const cookieLevel = Cookie.sameSiteLevel[c.sameSite || "none"]; - if (cookieLevel > sameSiteLevel) { - // only allow cookies at or below the request level - return false; - } - } +var _v = _interopRequireDefault(__nccwpck_require__(4085)); - // deferred from S5.3 - // non-RFC: allow retention of expired cookies by choice - if (expireCheck && c.expiryTime() <= now) { - store.removeCookie(c.domain, c.path, c.key, () => {}); // result ignored - return false; - } +var _sha = _interopRequireDefault(__nccwpck_require__(6631)); - return true; - } +function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } - store.findCookies( - host, - allPaths ? null : path, - this.allowSpecialUseDomain, - (err, cookies) => { - if (err) { - return cb(err); - } +const v5 = (0, _v.default)('v5', 0x50, _sha.default); +var _default = v5; +exports["default"] = _default; - cookies = cookies.filter(matchingCookie); +/***/ }), - // sorting of S5.4 part 2 - if (options.sort !== false) { - cookies = cookies.sort(cookieCompare); - } +/***/ 4418: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - // S5.4 part 3 - const now = new Date(); - for (const cookie of cookies) { - cookie.lastAccessed = now; - } - // TODO persist lastAccessed +"use strict"; - cb(null, cookies); - } - ); - } - getCookieString(...args) { - const cb = args.pop(); - validators.validate(validators.isFunction(cb), cb); - const next = function(err, cookies) { - if (err) { - cb(err); - } else { - cb( - null, - cookies - .sort(cookieCompare) - .map(c => c.cookieString()) - .join("; ") - ); - } - }; - args.push(next); - this.getCookies.apply(this, args); - } +Object.defineProperty(exports, "__esModule", ({ + value: true +})); +exports["default"] = void 0; - getSetCookieStrings(...args) { - const cb = args.pop(); - validators.validate(validators.isFunction(cb), cb); - const next = function(err, cookies) { - if (err) { - cb(err); - } else { - cb( - null, - cookies.map(c => { - return c.toString(); - }) - ); - } - }; - args.push(next); - this.getCookies.apply(this, args); - } +var _regex = _interopRequireDefault(__nccwpck_require__(690)); - serialize(cb) { - validators.validate(validators.isFunction(cb), cb); - let type = this.store.constructor.name; - if (validators.isObject(type)) { - type = null; - } +function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } - // update README.md "Serialization Format" if you change this, please! - const serialized = { - // The version of tough-cookie that serialized this jar. Generally a good - // practice since future versions can make data import decisions based on - // known past behavior. When/if this matters, use `semver`. - version: `tough-cookie@${VERSION}`, +function validate(uuid) { + return typeof uuid === 'string' && _regex.default.test(uuid); +} - // add the store type, to make humans happy: - storeType: type, +var _default = validate; +exports["default"] = _default; - // CookieJar configuration: - rejectPublicSuffixes: !!this.rejectPublicSuffixes, - enableLooseMode: !!this.enableLooseMode, - allowSpecialUseDomain: !!this.allowSpecialUseDomain, - prefixSecurity: getNormalizedPrefixSecurity(this.prefixSecurity), +/***/ }), - // this gets filled from getAllCookies: - cookies: [] - }; +/***/ 7909: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - if ( - !( - this.store.getAllCookies && - typeof this.store.getAllCookies === "function" - ) - ) { - return cb( - new Error( - "store does not support getAllCookies and cannot be serialized" - ) - ); - } +"use strict"; - this.store.getAllCookies((err, cookies) => { - if (err) { - return cb(err); - } - serialized.cookies = cookies.map(cookie => { - // convert to serialized 'raw' cookies - cookie = cookie instanceof Cookie ? cookie.toJSON() : cookie; +Object.defineProperty(exports, "__esModule", ({ + value: true +})); +exports["default"] = void 0; - // Remove the index so new ones get assigned during deserialization - delete cookie.creationIndex; +var _validate = _interopRequireDefault(__nccwpck_require__(4418)); - return cookie; - }); +function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } - return cb(null, serialized); - }); +function version(uuid) { + if (!(0, _validate.default)(uuid)) { + throw TypeError('Invalid UUID'); } - toJSON() { - return this.serializeSync(); - } + return parseInt(uuid.substr(14, 1), 16); +} - // use the class method CookieJar.deserialize instead of calling this directly - _importCookies(serialized, cb) { - let cookies = serialized.cookies; - if (!cookies || !Array.isArray(cookies)) { - return cb(new Error("serialized jar has no cookies array")); - } - cookies = cookies.slice(); // do not modify the original +var _default = version; +exports["default"] = _default; - const putNext = err => { - if (err) { - return cb(err); - } +/***/ }), - if (!cookies.length) { - return cb(err, this); - } +/***/ 7094: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - let cookie; - try { - cookie = fromJSON(cookies.shift()); - } catch (e) { - return cb(e); - } +"use strict"; - if (cookie === null) { - return putNext(null); // skip this cookie - } - this.store.putCookie(cookie, putNext); - }; +Object.defineProperty(exports, "__esModule", ({ value: true })); - putNext(); - } +var logger$1 = __nccwpck_require__(3233); +var abortController = __nccwpck_require__(2557); - clone(newStore, cb) { - if (arguments.length === 1) { - cb = newStore; - newStore = null; - } +// Copyright (c) Microsoft Corporation. +/** + * The `@azure/logger` configuration for this package. + * @internal + */ +const logger = logger$1.createClientLogger("core-lro"); - this.serialize((err, serialized) => { - if (err) { - return cb(err); - } - CookieJar.deserialize(serialized, newStore, cb); - }); - } +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +/** + * The default time interval to wait before sending the next polling request. + */ +const POLL_INTERVAL_IN_MS = 2000; +/** + * The closed set of terminal states. + */ +const terminalStates = ["succeeded", "canceled", "failed"]; - cloneSync(newStore) { - if (arguments.length === 0) { - return this._cloneSync(); - } - if (!newStore.synchronous) { - throw new Error( - "CookieJar clone destination store is not synchronous; use async API instead." - ); +// Copyright (c) Microsoft Corporation. +/** + * Deserializes the state + */ +function deserializeState(serializedState) { + try { + return JSON.parse(serializedState).state; } - return this._cloneSync(newStore); - } - - removeAllCookies(cb) { - validators.validate(validators.isFunction(cb), cb); - const store = this.store; - - // Check that the store implements its own removeAllCookies(). The default - // implementation in Store will immediately call the callback with a "not - // implemented" Error. - if ( - typeof store.removeAllCookies === "function" && - store.removeAllCookies !== Store.prototype.removeAllCookies - ) { - return store.removeAllCookies(cb); + catch (e) { + throw new Error(`Unable to deserialize input state: ${serializedState}`); } - - store.getAllCookies((err, cookies) => { - if (err) { - return cb(err); - } - - if (cookies.length === 0) { - return cb(null); - } - - let completedCount = 0; - const removeErrors = []; - - function removeCookieCb(removeErr) { - if (removeErr) { - removeErrors.push(removeErr); +} +function setStateError(inputs) { + const { state, stateProxy } = inputs; + return (error) => { + stateProxy.setError(state, error); + stateProxy.setFailed(state); + throw error; + }; +} +function processOperationStatus(result) { + const { state, stateProxy, status, isDone, processResult, response, setErrorAsResult } = result; + switch (status) { + case "succeeded": { + stateProxy.setSucceeded(state); + break; } - - completedCount++; - - if (completedCount === cookies.length) { - return cb(removeErrors.length ? removeErrors[0] : null); + case "failed": { + stateProxy.setError(state, new Error(`The long-running operation has failed`)); + stateProxy.setFailed(state); + break; + } + case "canceled": { + stateProxy.setCanceled(state); + break; } - } - - cookies.forEach(cookie => { - store.removeCookie( - cookie.domain, - cookie.path, - cookie.key, - removeCookieCb - ); - }); - }); - } - - static deserialize(strOrObj, store, cb) { - if (arguments.length !== 3) { - // store is optional - cb = store; - store = null; - } - validators.validate(validators.isFunction(cb), cb); - - let serialized; - if (typeof strOrObj === "string") { - serialized = jsonParse(strOrObj); - if (serialized instanceof Error) { - return cb(serialized); - } - } else { - serialized = strOrObj; } - - const jar = new CookieJar(store, { - rejectPublicSuffixes: serialized.rejectPublicSuffixes, - looseMode: serialized.enableLooseMode, - allowSpecialUseDomain: serialized.allowSpecialUseDomain, - prefixSecurity: serialized.prefixSecurity - }); - jar._importCookies(serialized, err => { - if (err) { - return cb(err); - } - cb(null, jar); - }); - } - - static deserializeSync(strOrObj, store) { - const serialized = - typeof strOrObj === "string" ? JSON.parse(strOrObj) : strOrObj; - const jar = new CookieJar(store, { - rejectPublicSuffixes: serialized.rejectPublicSuffixes, - looseMode: serialized.enableLooseMode - }); - - // catch this mistake early: - if (!jar.store.synchronous) { - throw new Error( - "CookieJar store is not synchronous; use async API instead." - ); + if ((isDone === null || isDone === void 0 ? void 0 : isDone(response, state)) || + (isDone === undefined && + ["succeeded", "canceled"].concat(setErrorAsResult ? [] : ["failed"]).includes(status))) { + stateProxy.setResult(state, buildResult({ + response, + state, + processResult, + })); } - - jar._importCookiesSync(serialized); - return jar; - } } -CookieJar.fromJSON = CookieJar.deserializeSync; - -[ - "_importCookies", - "clone", - "getCookies", - "getCookieString", - "getSetCookieStrings", - "removeAllCookies", - "serialize", - "setCookie" -].forEach(name => { - CookieJar.prototype[name] = fromCallback(CookieJar.prototype[name]); -}); -CookieJar.deserialize = fromCallback(CookieJar.deserialize); - -// Use a closure to provide a true imperative API for synchronous stores. -function syncWrap(method) { - return function(...args) { - if (!this.store.synchronous) { - throw new Error( - "CookieJar store is not synchronous; use async API instead." - ); - } - - let syncErr, syncResult; - this[method](...args, (err, result) => { - syncErr = err; - syncResult = result; - }); - - if (syncErr) { - throw syncErr; +function buildResult(inputs) { + const { processResult, response, state } = inputs; + return processResult ? processResult(response, state) : response; +} +/** + * Initiates the long-running operation. + */ +async function initOperation(inputs) { + const { init, stateProxy, processResult, getOperationStatus, withOperationLocation, setErrorAsResult, } = inputs; + const { operationLocation, resourceLocation, metadata, response } = await init(); + if (operationLocation) + withOperationLocation === null || withOperationLocation === void 0 ? void 0 : withOperationLocation(operationLocation, false); + const config = { + metadata, + operationLocation, + resourceLocation, + }; + logger.verbose(`LRO: Operation description:`, config); + const state = stateProxy.initState(config); + const status = getOperationStatus({ response, state, operationLocation }); + processOperationStatus({ state, status, stateProxy, response, setErrorAsResult, processResult }); + return state; +} +async function pollOperationHelper(inputs) { + const { poll, state, stateProxy, operationLocation, getOperationStatus, getResourceLocation, options, } = inputs; + const response = await poll(operationLocation, options).catch(setStateError({ + state, + stateProxy, + })); + const status = getOperationStatus(response, state); + logger.verbose(`LRO: Status:\n\tPolling from: ${state.config.operationLocation}\n\tOperation status: ${status}\n\tPolling status: ${terminalStates.includes(status) ? "Stopped" : "Running"}`); + if (status === "succeeded") { + const resourceLocation = getResourceLocation(response, state); + if (resourceLocation !== undefined) { + return { + response: await poll(resourceLocation).catch(setStateError({ state, stateProxy })), + status, + }; + } } - return syncResult; - }; + return { response, status }; } - -exports.version = VERSION; -exports.CookieJar = CookieJar; -exports.Cookie = Cookie; -exports.Store = Store; -exports.MemoryCookieStore = MemoryCookieStore; -exports.parseDate = parseDate; -exports.formatDate = formatDate; -exports.parse = parse; -exports.fromJSON = fromJSON; -exports.domainMatch = domainMatch; -exports.defaultPath = defaultPath; -exports.pathMatch = pathMatch; -exports.getPublicSuffix = pubsuffix.getPublicSuffix; -exports.cookieCompare = cookieCompare; -exports.permuteDomain = __nccwpck_require__(5696).permuteDomain; -exports.permutePath = permutePath; -exports.canonicalDomain = canonicalDomain; -exports.PrefixSecurityEnum = PrefixSecurityEnum; -exports.ParameterError = validators.ParameterError; - - -/***/ }), - -/***/ 6738: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - -"use strict"; -var __webpack_unused_export__; -/*! - * Copyright (c) 2015, Salesforce.com, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * 3. Neither the name of Salesforce.com nor the names of its contributors may - * be used to endorse or promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -const { fromCallback } = __nccwpck_require__(9046); -const Store = (__nccwpck_require__(7707)/* .Store */ .y); -const permuteDomain = (__nccwpck_require__(5696).permuteDomain); -const pathMatch = (__nccwpck_require__(807)/* .pathMatch */ .U); -const { getCustomInspectSymbol, getUtilInspect } = __nccwpck_require__(9375); - -class MemoryCookieStore extends Store { - constructor() { - super(); - this.synchronous = true; - this.idx = {}; - const customInspectSymbol = getCustomInspectSymbol(); - if (customInspectSymbol) { - this[customInspectSymbol] = this.inspect; +/** Polls the long-running operation. */ +async function pollOperation(inputs) { + const { poll, state, stateProxy, options, getOperationStatus, getResourceLocation, getOperationLocation, withOperationLocation, getPollingInterval, processResult, updateState, setDelay, isDone, setErrorAsResult, } = inputs; + const { operationLocation } = state.config; + if (operationLocation !== undefined) { + const { response, status } = await pollOperationHelper({ + poll, + getOperationStatus, + state, + stateProxy, + operationLocation, + getResourceLocation, + options, + }); + processOperationStatus({ + status, + response, + state, + stateProxy, + isDone, + processResult, + setErrorAsResult, + }); + if (!terminalStates.includes(status)) { + const intervalInMs = getPollingInterval === null || getPollingInterval === void 0 ? void 0 : getPollingInterval(response); + if (intervalInMs) + setDelay(intervalInMs); + const location = getOperationLocation === null || getOperationLocation === void 0 ? void 0 : getOperationLocation(response, state); + if (location !== undefined) { + const isUpdated = operationLocation !== location; + state.config.operationLocation = location; + withOperationLocation === null || withOperationLocation === void 0 ? void 0 : withOperationLocation(location, isUpdated); + } + else + withOperationLocation === null || withOperationLocation === void 0 ? void 0 : withOperationLocation(operationLocation, false); + } + updateState === null || updateState === void 0 ? void 0 : updateState(state, response); } - } - - inspect() { - const util = { inspect: getUtilInspect(inspectFallback) }; - return `{ idx: ${util.inspect(this.idx, false, 2)} }`; - } +} - findCookie(domain, path, key, cb) { - if (!this.idx[domain]) { - return cb(null, undefined); +// Copyright (c) Microsoft Corporation. +function getOperationLocationPollingUrl(inputs) { + const { azureAsyncOperation, operationLocation } = inputs; + return operationLocation !== null && operationLocation !== void 0 ? operationLocation : azureAsyncOperation; +} +function getLocationHeader(rawResponse) { + return rawResponse.headers["location"]; +} +function getOperationLocationHeader(rawResponse) { + return rawResponse.headers["operation-location"]; +} +function getAzureAsyncOperationHeader(rawResponse) { + return rawResponse.headers["azure-asyncoperation"]; +} +function findResourceLocation(inputs) { + const { location, requestMethod, requestPath, resourceLocationConfig } = inputs; + switch (requestMethod) { + case "PUT": { + return requestPath; + } + case "DELETE": { + return undefined; + } + default: { + switch (resourceLocationConfig) { + case "azure-async-operation": { + return undefined; + } + case "original-uri": { + return requestPath; + } + case "location": + default: { + return location; + } + } + } } - if (!this.idx[domain][path]) { - return cb(null, undefined); +} +function inferLroMode(inputs) { + const { rawResponse, requestMethod, requestPath, resourceLocationConfig } = inputs; + const operationLocation = getOperationLocationHeader(rawResponse); + const azureAsyncOperation = getAzureAsyncOperationHeader(rawResponse); + const pollingUrl = getOperationLocationPollingUrl({ operationLocation, azureAsyncOperation }); + const location = getLocationHeader(rawResponse); + const normalizedRequestMethod = requestMethod === null || requestMethod === void 0 ? void 0 : requestMethod.toLocaleUpperCase(); + if (pollingUrl !== undefined) { + return { + mode: "OperationLocation", + operationLocation: pollingUrl, + resourceLocation: findResourceLocation({ + requestMethod: normalizedRequestMethod, + location, + requestPath, + resourceLocationConfig, + }), + }; } - return cb(null, this.idx[domain][path][key] || null); - } - findCookies(domain, path, allowSpecialUseDomain, cb) { - const results = []; - if (typeof allowSpecialUseDomain === "function") { - cb = allowSpecialUseDomain; - allowSpecialUseDomain = true; + else if (location !== undefined) { + return { + mode: "ResourceLocation", + operationLocation: location, + }; } - if (!domain) { - return cb(null, []); + else if (normalizedRequestMethod === "PUT" && requestPath) { + return { + mode: "Body", + operationLocation: requestPath, + }; } - - let pathMatcher; - if (!path) { - // null means "all paths" - pathMatcher = function matchAll(domainIndex) { - for (const curPath in domainIndex) { - const pathIndex = domainIndex[curPath]; - for (const key in pathIndex) { - results.push(pathIndex[key]); - } + else { + return undefined; + } +} +function transformStatus(inputs) { + const { status, statusCode } = inputs; + if (typeof status !== "string" && status !== undefined) { + throw new Error(`Polling was unsuccessful. Expected status to have a string value or no value but it has instead: ${status}. This doesn't necessarily indicate the operation has failed. Check your Azure subscription or resource status for more information.`); + } + switch (status === null || status === void 0 ? void 0 : status.toLocaleLowerCase()) { + case undefined: + return toOperationStatus(statusCode); + case "succeeded": + return "succeeded"; + case "failed": + return "failed"; + case "running": + case "accepted": + case "started": + case "canceling": + case "cancelling": + return "running"; + case "canceled": + case "cancelled": + return "canceled"; + default: { + logger.warning(`LRO: unrecognized operation status: ${status}`); + return status; } - }; - } else { - pathMatcher = function matchRFC(domainIndex) { - //NOTE: we should use path-match algorithm from S5.1.4 here - //(see : https://github.com/ChromiumWebApps/chromium/blob/b3d3b4da8bb94c1b2e061600df106d590fda3620/net/cookies/canonical_cookie.cc#L299) - Object.keys(domainIndex).forEach(cookiePath => { - if (pathMatch(path, cookiePath)) { - const pathIndex = domainIndex[cookiePath]; - for (const key in pathIndex) { - results.push(pathIndex[key]); - } - } - }); - }; } - - const domains = permuteDomain(domain, allowSpecialUseDomain) || [domain]; - const idx = this.idx; - domains.forEach(curDomain => { - const domainIndex = idx[curDomain]; - if (!domainIndex) { - return; - } - pathMatcher(domainIndex); - }); - - cb(null, results); - } - - putCookie(cookie, cb) { - if (!this.idx[cookie.domain]) { - this.idx[cookie.domain] = {}; +} +function getStatus(rawResponse) { + var _a; + const { status } = (_a = rawResponse.body) !== null && _a !== void 0 ? _a : {}; + return transformStatus({ status, statusCode: rawResponse.statusCode }); +} +function getProvisioningState(rawResponse) { + var _a, _b; + const { properties, provisioningState } = (_a = rawResponse.body) !== null && _a !== void 0 ? _a : {}; + const status = (_b = properties === null || properties === void 0 ? void 0 : properties.provisioningState) !== null && _b !== void 0 ? _b : provisioningState; + return transformStatus({ status, statusCode: rawResponse.statusCode }); +} +function toOperationStatus(statusCode) { + if (statusCode === 202) { + return "running"; } - if (!this.idx[cookie.domain][cookie.path]) { - this.idx[cookie.domain][cookie.path] = {}; + else if (statusCode < 300) { + return "succeeded"; } - this.idx[cookie.domain][cookie.path][cookie.key] = cookie; - cb(null); - } - updateCookie(oldCookie, newCookie, cb) { - // updateCookie() may avoid updating cookies that are identical. For example, - // lastAccessed may not be important to some stores and an equality - // comparison could exclude that field. - this.putCookie(newCookie, cb); - } - removeCookie(domain, path, key, cb) { - if ( - this.idx[domain] && - this.idx[domain][path] && - this.idx[domain][path][key] - ) { - delete this.idx[domain][path][key]; + else { + return "failed"; } - cb(null); - } - removeCookies(domain, path, cb) { - if (this.idx[domain]) { - if (path) { - delete this.idx[domain][path]; - } else { - delete this.idx[domain]; - } +} +function parseRetryAfter({ rawResponse }) { + const retryAfter = rawResponse.headers["retry-after"]; + if (retryAfter !== undefined) { + // Retry-After header value is either in HTTP date format, or in seconds + const retryAfterInSeconds = parseInt(retryAfter); + return isNaN(retryAfterInSeconds) + ? calculatePollingIntervalFromDate(new Date(retryAfter)) + : retryAfterInSeconds * 1000; } - return cb(null); - } - removeAllCookies(cb) { - this.idx = {}; - return cb(null); - } - getAllCookies(cb) { - const cookies = []; - const idx = this.idx; - - const domains = Object.keys(idx); - domains.forEach(domain => { - const paths = Object.keys(idx[domain]); - paths.forEach(path => { - const keys = Object.keys(idx[domain][path]); - keys.forEach(key => { - if (key !== null) { - cookies.push(idx[domain][path][key]); - } - }); - }); - }); - - // Sort by creationIndex so deserializing retains the creation order. - // When implementing your own store, this SHOULD retain the order too - cookies.sort((a, b) => { - return (a.creationIndex || 0) - (b.creationIndex || 0); + return undefined; +} +function calculatePollingIntervalFromDate(retryAfterDate) { + const timeNow = Math.floor(new Date().getTime()); + const retryAfterTime = retryAfterDate.getTime(); + if (timeNow < retryAfterTime) { + return retryAfterTime - timeNow; + } + return undefined; +} +function getStatusFromInitialResponse(inputs) { + const { response, state, operationLocation } = inputs; + function helper() { + var _a; + const mode = (_a = state.config.metadata) === null || _a === void 0 ? void 0 : _a["mode"]; + switch (mode) { + case undefined: + return toOperationStatus(response.rawResponse.statusCode); + case "Body": + return getOperationStatus(response, state); + default: + return "running"; + } + } + const status = helper(); + return status === "running" && operationLocation === undefined ? "succeeded" : status; +} +/** + * Initiates the long-running operation. + */ +async function initHttpOperation(inputs) { + const { stateProxy, resourceLocationConfig, processResult, lro, setErrorAsResult } = inputs; + return initOperation({ + init: async () => { + const response = await lro.sendInitialRequest(); + const config = inferLroMode({ + rawResponse: response.rawResponse, + requestPath: lro.requestPath, + requestMethod: lro.requestMethod, + resourceLocationConfig, + }); + return Object.assign({ response, operationLocation: config === null || config === void 0 ? void 0 : config.operationLocation, resourceLocation: config === null || config === void 0 ? void 0 : config.resourceLocation }, ((config === null || config === void 0 ? void 0 : config.mode) ? { metadata: { mode: config.mode } } : {})); + }, + stateProxy, + processResult: processResult + ? ({ flatResponse }, state) => processResult(flatResponse, state) + : ({ flatResponse }) => flatResponse, + getOperationStatus: getStatusFromInitialResponse, + setErrorAsResult, }); - - cb(null, cookies); - } } - -[ - "findCookie", - "findCookies", - "putCookie", - "updateCookie", - "removeCookie", - "removeCookies", - "removeAllCookies", - "getAllCookies" -].forEach(name => { - MemoryCookieStore.prototype[name] = fromCallback( - MemoryCookieStore.prototype[name] - ); -}); - -exports.m = MemoryCookieStore; - -function inspectFallback(val) { - const domains = Object.keys(val); - if (domains.length === 0) { - return "{}"; - } - let result = "{\n"; - Object.keys(val).forEach((domain, i) => { - result += formatDomain(domain, val[domain]); - if (i < domains.length - 1) { - result += ","; +function getOperationLocation({ rawResponse }, state) { + var _a; + const mode = (_a = state.config.metadata) === null || _a === void 0 ? void 0 : _a["mode"]; + switch (mode) { + case "OperationLocation": { + return getOperationLocationPollingUrl({ + operationLocation: getOperationLocationHeader(rawResponse), + azureAsyncOperation: getAzureAsyncOperationHeader(rawResponse), + }); + } + case "ResourceLocation": { + return getLocationHeader(rawResponse); + } + case "Body": + default: { + return undefined; + } } - result += "\n"; - }); - result += "}"; - return result; } - -function formatDomain(domainName, domainValue) { - const indent = " "; - let result = `${indent}'${domainName}': {\n`; - Object.keys(domainValue).forEach((path, i, paths) => { - result += formatPath(path, domainValue[path]); - if (i < paths.length - 1) { - result += ","; +function getOperationStatus({ rawResponse }, state) { + var _a; + const mode = (_a = state.config.metadata) === null || _a === void 0 ? void 0 : _a["mode"]; + switch (mode) { + case "OperationLocation": { + return getStatus(rawResponse); + } + case "ResourceLocation": { + return toOperationStatus(rawResponse.statusCode); + } + case "Body": { + return getProvisioningState(rawResponse); + } + default: + throw new Error(`Internal error: Unexpected operation mode: ${mode}`); } - result += "\n"; - }); - result += `${indent}}`; - return result; } - -function formatPath(pathName, pathValue) { - const indent = " "; - let result = `${indent}'${pathName}': {\n`; - Object.keys(pathValue).forEach((cookieName, i, cookieNames) => { - const cookie = pathValue[cookieName]; - result += ` ${cookieName}: ${cookie.inspect()}`; - if (i < cookieNames.length - 1) { - result += ","; +function getResourceLocation({ flatResponse }, state) { + if (typeof flatResponse === "object") { + const resourceLocation = flatResponse.resourceLocation; + if (resourceLocation !== undefined) { + state.config.resourceLocation = resourceLocation; + } } - result += "\n"; - }); - result += `${indent}}`; - return result; + return state.config.resourceLocation; +} +/** Polls the long-running operation. */ +async function pollHttpOperation(inputs) { + const { lro, stateProxy, options, processResult, updateState, setDelay, state, setErrorAsResult, } = inputs; + return pollOperation({ + state, + stateProxy, + setDelay, + processResult: processResult + ? ({ flatResponse }, inputState) => processResult(flatResponse, inputState) + : ({ flatResponse }) => flatResponse, + updateState, + getPollingInterval: parseRetryAfter, + getOperationLocation, + getOperationStatus, + getResourceLocation, + options, + /** + * The expansion here is intentional because `lro` could be an object that + * references an inner this, so we need to preserve a reference to it. + */ + poll: async (location, inputOptions) => lro.sendPollRequest(location, inputOptions), + setErrorAsResult, + }); } -__webpack_unused_export__ = inspectFallback; - - -/***/ }), - -/***/ 807: -/***/ ((__unused_webpack_module, exports) => { - -"use strict"; -/*! - * Copyright (c) 2015, Salesforce.com, Inc. - * All rights reserved. +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +/** + * Map an optional value through a function + * @internal + */ +const maybemap = (value, f) => value === undefined ? undefined : f(value); +const INTERRUPTED = new Error("The poller is already stopped"); +/** + * A promise that delays resolution until a certain amount of time (in milliseconds) has passed, with facilities for + * robust cancellation. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: + * ### Example: * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * ```javascript + * let toCancel; * - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. + * // Wait 20 seconds, and optionally allow the function to be cancelled. + * await delayMs(20000, (cancel) => { toCancel = cancel }); * - * 3. Neither the name of Salesforce.com nor the names of its contributors may - * be used to endorse or promote products derived from this software without - * specific prior written permission. + * // ... if `toCancel` is called before the 20 second timer expires, then the delayMs promise will reject. + * ``` * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. + * @internal + * @param ms - the number of milliseconds to wait before resolving + * @param cb - a callback that can provide the caller with a cancellation function */ +function delayMs(ms) { + let aborted = false; + let toReject; + return Object.assign(new Promise((resolve, reject) => { + let token; + toReject = () => { + maybemap(token, clearTimeout); + reject(INTERRUPTED); + }; + // In the rare case that the operation is _already_ aborted, we will reject instantly. This could happen, for + // example, if the user calls the cancellation function immediately without yielding execution. + if (aborted) { + toReject(); + } + else { + token = setTimeout(resolve, ms); + } + }), { + cancel: () => { + aborted = true; + toReject === null || toReject === void 0 ? void 0 : toReject(); + }, + }); +} -/* - * "A request-path path-matches a given cookie-path if at least one of the - * following conditions holds:" +// Copyright (c) Microsoft Corporation. +const createStateProxy$1 = () => ({ + /** + * The state at this point is created to be of type OperationState. + * It will be updated later to be of type TState when the + * customer-provided callback, `updateState`, is called during polling. + */ + initState: (config) => ({ status: "running", config }), + setCanceled: (state) => (state.status = "canceled"), + setError: (state, error) => (state.error = error), + setResult: (state, result) => (state.result = result), + setRunning: (state) => (state.status = "running"), + setSucceeded: (state) => (state.status = "succeeded"), + setFailed: (state) => (state.status = "failed"), + getError: (state) => state.error, + getResult: (state) => state.result, + isCanceled: (state) => state.status === "canceled", + isFailed: (state) => state.status === "failed", + isRunning: (state) => state.status === "running", + isSucceeded: (state) => state.status === "succeeded", +}); +/** + * Returns a poller factory. */ -function pathMatch(reqPath, cookiePath) { - // "o The cookie-path and the request-path are identical." - if (cookiePath === reqPath) { - return true; - } +function buildCreatePoller(inputs) { + const { getOperationLocation, getStatusFromInitialResponse, getStatusFromPollResponse, getResourceLocation, getPollingInterval, resolveOnUnsuccessful, } = inputs; + return async ({ init, poll }, options) => { + const { processResult, updateState, withOperationLocation: withOperationLocationCallback, intervalInMs = POLL_INTERVAL_IN_MS, restoreFrom, } = options || {}; + const stateProxy = createStateProxy$1(); + const withOperationLocation = withOperationLocationCallback + ? (() => { + let called = false; + return (operationLocation, isUpdated) => { + if (isUpdated) + withOperationLocationCallback(operationLocation); + else if (!called) + withOperationLocationCallback(operationLocation); + called = true; + }; + })() + : undefined; + const state = restoreFrom + ? deserializeState(restoreFrom) + : await initOperation({ + init, + stateProxy, + processResult, + getOperationStatus: getStatusFromInitialResponse, + withOperationLocation, + setErrorAsResult: !resolveOnUnsuccessful, + }); + let resultPromise; + let cancelJob; + const abortController$1 = new abortController.AbortController(); + const handlers = new Map(); + const handleProgressEvents = async () => handlers.forEach((h) => h(state)); + let currentPollIntervalInMs = intervalInMs; + const poller = { + getOperationState: () => state, + getResult: () => state.result, + isDone: () => ["succeeded", "failed", "canceled"].includes(state.status), + isStopped: () => resultPromise === undefined, + stopPolling: () => { + abortController$1.abort(); + cancelJob === null || cancelJob === void 0 ? void 0 : cancelJob(); + }, + toString: () => JSON.stringify({ + state, + }), + onProgress: (callback) => { + const s = Symbol(); + handlers.set(s, callback); + return () => handlers.delete(s); + }, + pollUntilDone: (pollOptions) => (resultPromise !== null && resultPromise !== void 0 ? resultPromise : (resultPromise = (async () => { + const { abortSignal: inputAbortSignal } = pollOptions || {}; + const { signal: abortSignal } = inputAbortSignal + ? new abortController.AbortController([inputAbortSignal, abortController$1.signal]) + : abortController$1; + if (!poller.isDone()) { + await poller.poll({ abortSignal }); + while (!poller.isDone()) { + const delay = delayMs(currentPollIntervalInMs); + cancelJob = delay.cancel; + await delay; + await poller.poll({ abortSignal }); + } + } + switch (state.status) { + case "succeeded": { + return poller.getResult(); + } + case "canceled": { + if (!resolveOnUnsuccessful) + throw new Error("Operation was canceled"); + return poller.getResult(); + } + case "failed": { + if (!resolveOnUnsuccessful) + throw state.error; + return poller.getResult(); + } + case "notStarted": + case "running": { + // Unreachable + throw new Error(`polling completed without succeeding or failing`); + } + } + })().finally(() => { + resultPromise = undefined; + }))), + async poll(pollOptions) { + await pollOperation({ + poll, + state, + stateProxy, + getOperationLocation, + withOperationLocation, + getPollingInterval, + getOperationStatus: getStatusFromPollResponse, + getResourceLocation, + processResult, + updateState, + options: pollOptions, + setDelay: (pollIntervalInMs) => { + currentPollIntervalInMs = pollIntervalInMs; + }, + setErrorAsResult: !resolveOnUnsuccessful, + }); + await handleProgressEvents(); + if (state.status === "canceled" && !resolveOnUnsuccessful) { + throw new Error("Operation was canceled"); + } + if (state.status === "failed" && !resolveOnUnsuccessful) { + throw state.error; + } + }, + }; + return poller; + }; +} - const idx = reqPath.indexOf(cookiePath); - if (idx === 0) { - // "o The cookie-path is a prefix of the request-path, and the last - // character of the cookie-path is %x2F ("/")." - if (cookiePath.substr(-1) === "/") { - return true; - } +// Copyright (c) Microsoft Corporation. +/** + * Creates a poller that can be used to poll a long-running operation. + * @param lro - Description of the long-running operation + * @param options - options to configure the poller + * @returns an initialized poller + */ +async function createHttpPoller(lro, options) { + const { resourceLocationConfig, intervalInMs, processResult, restoreFrom, updateState, withOperationLocation, resolveOnUnsuccessful = false, } = options || {}; + return buildCreatePoller({ + getStatusFromInitialResponse, + getStatusFromPollResponse: getOperationStatus, + getOperationLocation, + getResourceLocation, + getPollingInterval: parseRetryAfter, + resolveOnUnsuccessful, + })({ + init: async () => { + const response = await lro.sendInitialRequest(); + const config = inferLroMode({ + rawResponse: response.rawResponse, + requestPath: lro.requestPath, + requestMethod: lro.requestMethod, + resourceLocationConfig, + }); + return Object.assign({ response, operationLocation: config === null || config === void 0 ? void 0 : config.operationLocation, resourceLocation: config === null || config === void 0 ? void 0 : config.resourceLocation }, ((config === null || config === void 0 ? void 0 : config.mode) ? { metadata: { mode: config.mode } } : {})); + }, + poll: lro.sendPollRequest, + }, { + intervalInMs, + withOperationLocation, + restoreFrom, + updateState, + processResult: processResult + ? ({ flatResponse }, state) => processResult(flatResponse, state) + : ({ flatResponse }) => flatResponse, + }); +} - // " o The cookie-path is a prefix of the request-path, and the first - // character of the request-path that is not included in the cookie- path - // is a %x2F ("/") character." - if (reqPath.substr(cookiePath.length, 1) === "/") { - return true; +// Copyright (c) Microsoft Corporation. +const createStateProxy = () => ({ + initState: (config) => ({ config, isStarted: true }), + setCanceled: (state) => (state.isCancelled = true), + setError: (state, error) => (state.error = error), + setResult: (state, result) => (state.result = result), + setRunning: (state) => (state.isStarted = true), + setSucceeded: (state) => (state.isCompleted = true), + setFailed: () => { + /** empty body */ + }, + getError: (state) => state.error, + getResult: (state) => state.result, + isCanceled: (state) => !!state.isCancelled, + isFailed: (state) => !!state.error, + isRunning: (state) => !!state.isStarted, + isSucceeded: (state) => Boolean(state.isCompleted && !state.isCancelled && !state.error), +}); +class GenericPollOperation { + constructor(state, lro, setErrorAsResult, lroResourceLocationConfig, processResult, updateState, isDone) { + this.state = state; + this.lro = lro; + this.setErrorAsResult = setErrorAsResult; + this.lroResourceLocationConfig = lroResourceLocationConfig; + this.processResult = processResult; + this.updateState = updateState; + this.isDone = isDone; + } + setPollerConfig(pollerConfig) { + this.pollerConfig = pollerConfig; + } + async update(options) { + var _a; + const stateProxy = createStateProxy(); + if (!this.state.isStarted) { + this.state = Object.assign(Object.assign({}, this.state), (await initHttpOperation({ + lro: this.lro, + stateProxy, + resourceLocationConfig: this.lroResourceLocationConfig, + processResult: this.processResult, + setErrorAsResult: this.setErrorAsResult, + }))); + } + const updateState = this.updateState; + const isDone = this.isDone; + if (!this.state.isCompleted && this.state.error === undefined) { + await pollHttpOperation({ + lro: this.lro, + state: this.state, + stateProxy, + processResult: this.processResult, + updateState: updateState + ? (state, { rawResponse }) => updateState(state, rawResponse) + : undefined, + isDone: isDone + ? ({ flatResponse }, state) => isDone(flatResponse, state) + : undefined, + options, + setDelay: (intervalInMs) => { + this.pollerConfig.intervalInMs = intervalInMs; + }, + setErrorAsResult: this.setErrorAsResult, + }); + } + (_a = options === null || options === void 0 ? void 0 : options.fireProgress) === null || _a === void 0 ? void 0 : _a.call(options, this.state); + return this; + } + async cancel() { + logger.error("`cancelOperation` is deprecated because it wasn't implemented"); + return this; + } + /** + * Serializes the Poller operation. + */ + toString() { + return JSON.stringify({ + state: this.state, + }); } - } - - return false; } -exports.U = pathMatch; - - -/***/ }), - -/***/ 5696: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - -"use strict"; -/*! - * Copyright (c) 2015, Salesforce.com, Inc. - * All rights reserved. +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +/** + * When a poller is manually stopped through the `stopPolling` method, + * the poller will be rejected with an instance of the PollerStoppedError. + */ +class PollerStoppedError extends Error { + constructor(message) { + super(message); + this.name = "PollerStoppedError"; + Object.setPrototypeOf(this, PollerStoppedError.prototype); + } +} +/** + * When the operation is cancelled, the poller will be rejected with an instance + * of the PollerCancelledError. + */ +class PollerCancelledError extends Error { + constructor(message) { + super(message); + this.name = "PollerCancelledError"; + Object.setPrototypeOf(this, PollerCancelledError.prototype); + } +} +/** + * A class that represents the definition of a program that polls through consecutive requests + * until it reaches a state of completion. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: + * A poller can be executed manually, by polling request by request by calling to the `poll()` method repeatedly, until its operation is completed. + * It also provides a way to wait until the operation completes, by calling `pollUntilDone()` and waiting until the operation finishes. + * Pollers can also request the cancellation of the ongoing process to whom is providing the underlying long running operation. * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * ```ts + * const poller = new MyPoller(); * - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. + * // Polling just once: + * await poller.poll(); * - * 3. Neither the name of Salesforce.com nor the names of its contributors may - * be used to endorse or promote products derived from this software without - * specific prior written permission. + * // We can try to cancel the request here, by calling: + * // + * // await poller.cancelOperation(); + * // * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -const pubsuffix = __nccwpck_require__(8292); - -// Gives the permutation of all possible domainMatch()es of a given domain. The -// array is in shortest-to-longest order. Handy for indexing. - -function permuteDomain(domain, allowSpecialUseDomain) { - const pubSuf = pubsuffix.getPublicSuffix(domain, { - allowSpecialUseDomain: allowSpecialUseDomain - }); - - if (!pubSuf) { - return null; - } - if (pubSuf == domain) { - return [domain]; - } - - // Nuke trailing dot - if (domain.slice(-1) == ".") { - domain = domain.slice(0, -1); - } - - const prefix = domain.slice(0, -(pubSuf.length + 1)); // ".example.com" - const parts = prefix.split(".").reverse(); - let cur = pubSuf; - const permutations = [cur]; - while (parts.length) { - cur = `${parts.shift()}.${cur}`; - permutations.push(cur); - } - return permutations; -} - -exports.permuteDomain = permuteDomain; - - -/***/ }), - -/***/ 8292: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - -"use strict"; -/*! - * Copyright (c) 2018, Salesforce.com, Inc. - * All rights reserved. + * // Getting the final result: + * const result = await poller.pollUntilDone(); + * ``` * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: + * The Poller is defined by two types, a type representing the state of the poller, which + * must include a basic set of properties from `PollOperationState`, + * and a return type defined by `TResult`, which can be anything. * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * The Poller class implements the `PollerLike` interface, which allows poller implementations to avoid having + * to export the Poller's class directly, and instead only export the already instantiated poller with the PollerLike type. * - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. + * ```ts + * class Client { + * public async makePoller: PollerLike { + * const poller = new MyPoller({}); + * // It might be preferred to return the poller after the first request is made, + * // so that some information can be obtained right away. + * await poller.poll(); + * return poller; + * } + * } * - * 3. Neither the name of Salesforce.com nor the names of its contributors may - * be used to endorse or promote products derived from this software without - * specific prior written permission. + * const poller: PollerLike = myClient.makePoller(); + * ``` * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -const psl = __nccwpck_require__(9975); - -// RFC 6761 -const SPECIAL_USE_DOMAINS = [ - "local", - "example", - "invalid", - "localhost", - "test" -]; - -const SPECIAL_TREATMENT_DOMAINS = ["localhost", "invalid"]; - -function getPublicSuffix(domain, options = {}) { - const domainParts = domain.split("."); - const topLevelDomain = domainParts[domainParts.length - 1]; - const allowSpecialUseDomain = !!options.allowSpecialUseDomain; - const ignoreError = !!options.ignoreError; - - if (allowSpecialUseDomain && SPECIAL_USE_DOMAINS.includes(topLevelDomain)) { - if (domainParts.length > 1) { - const secondLevelDomain = domainParts[domainParts.length - 2]; - // In aforementioned example, the eTLD/pubSuf will be apple.localhost - return `${secondLevelDomain}.${topLevelDomain}`; - } else if (SPECIAL_TREATMENT_DOMAINS.includes(topLevelDomain)) { - // For a single word special use domain, e.g. 'localhost' or 'invalid', per RFC 6761, - // "Application software MAY recognize {localhost/invalid} names as special, or - // MAY pass them to name resolution APIs as they would for other domain names." - return `${topLevelDomain}`; - } - } - - if (!ignoreError && SPECIAL_USE_DOMAINS.includes(topLevelDomain)) { - throw new Error( - `Cookie has domain set to the public suffix "${topLevelDomain}" which is a special use domain. To allow this, configure your CookieJar with {allowSpecialUseDomain:true, rejectPublicSuffixes: false}.` - ); - } - - return psl.get(domain); -} - -exports.getPublicSuffix = getPublicSuffix; - - -/***/ }), - -/***/ 7707: -/***/ ((__unused_webpack_module, exports) => { - -"use strict"; -/*! - * Copyright (c) 2015, Salesforce.com, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: + * A poller can be created through its constructor, then it can be polled until it's completed. + * At any point in time, the state of the poller can be obtained without delay through the getOperationState method. + * At any point in time, the intermediate forms of the result type can be requested without delay. + * Once the underlying operation is marked as completed, the poller will stop and the final value will be returned. * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. + * ```ts + * const poller = myClient.makePoller(); + * const state: MyOperationState = poller.getOperationState(); * - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. + * // The intermediate result can be obtained at any time. + * const result: MyResult | undefined = poller.getResult(); * - * 3. Neither the name of Salesforce.com nor the names of its contributors may - * be used to endorse or promote products derived from this software without - * specific prior written permission. + * // The final result can only be obtained after the poller finishes. + * const result: MyResult = await poller.pollUntilDone(); + * ``` * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. */ +// eslint-disable-next-line no-use-before-define +class Poller { + /** + * A poller needs to be initialized by passing in at least the basic properties of the `PollOperation`. + * + * When writing an implementation of a Poller, this implementation needs to deal with the initialization + * of any custom state beyond the basic definition of the poller. The basic poller assumes that the poller's + * operation has already been defined, at least its basic properties. The code below shows how to approach + * the definition of the constructor of a new custom poller. + * + * ```ts + * export class MyPoller extends Poller { + * constructor({ + * // Anything you might need outside of the basics + * }) { + * let state: MyOperationState = { + * privateProperty: private, + * publicProperty: public, + * }; + * + * const operation = { + * state, + * update, + * cancel, + * toString + * } + * + * // Sending the operation to the parent's constructor. + * super(operation); + * + * // You can assign more local properties here. + * } + * } + * ``` + * + * Inside of this constructor, a new promise is created. This will be used to + * tell the user when the poller finishes (see `pollUntilDone()`). The promise's + * resolve and reject methods are also used internally to control when to resolve + * or reject anyone waiting for the poller to finish. + * + * The constructor of a custom implementation of a poller is where any serialized version of + * a previous poller's operation should be deserialized into the operation sent to the + * base constructor. For example: + * + * ```ts + * export class MyPoller extends Poller { + * constructor( + * baseOperation: string | undefined + * ) { + * let state: MyOperationState = {}; + * if (baseOperation) { + * state = { + * ...JSON.parse(baseOperation).state, + * ...state + * }; + * } + * const operation = { + * state, + * // ... + * } + * super(operation); + * } + * } + * ``` + * + * @param operation - Must contain the basic properties of `PollOperation`. + */ + constructor(operation) { + /** controls whether to throw an error if the operation failed or was canceled. */ + this.resolveOnUnsuccessful = false; + this.stopped = true; + this.pollProgressCallbacks = []; + this.operation = operation; + this.promise = new Promise((resolve, reject) => { + this.resolve = resolve; + this.reject = reject; + }); + // This prevents the UnhandledPromiseRejectionWarning in node.js from being thrown. + // The above warning would get thrown if `poller.poll` is called, it returns an error, + // and pullUntilDone did not have a .catch or await try/catch on it's return value. + this.promise.catch(() => { + /* intentionally blank */ + }); + } + /** + * Starts a loop that will break only if the poller is done + * or if the poller is stopped. + */ + async startPolling(pollOptions = {}) { + if (this.stopped) { + this.stopped = false; + } + while (!this.isStopped() && !this.isDone()) { + await this.poll(pollOptions); + await this.delay(); + } + } + /** + * pollOnce does one polling, by calling to the update method of the underlying + * poll operation to make any relevant change effective. + * + * It only optionally receives an object with an abortSignal property, from \@azure/abort-controller's AbortSignalLike. + * + * @param options - Optional properties passed to the operation's update method. + */ + async pollOnce(options = {}) { + if (!this.isDone()) { + this.operation = await this.operation.update({ + abortSignal: options.abortSignal, + fireProgress: this.fireProgress.bind(this), + }); + } + this.processUpdatedState(); + } + /** + * fireProgress calls the functions passed in via onProgress the method of the poller. + * + * It loops over all of the callbacks received from onProgress, and executes them, sending them + * the current operation state. + * + * @param state - The current operation state. + */ + fireProgress(state) { + for (const callback of this.pollProgressCallbacks) { + callback(state); + } + } + /** + * Invokes the underlying operation's cancel method. + */ + async cancelOnce(options = {}) { + this.operation = await this.operation.cancel(options); + } + /** + * Returns a promise that will resolve once a single polling request finishes. + * It does this by calling the update method of the Poller's operation. + * + * It only optionally receives an object with an abortSignal property, from \@azure/abort-controller's AbortSignalLike. + * + * @param options - Optional properties passed to the operation's update method. + */ + poll(options = {}) { + if (!this.pollOncePromise) { + this.pollOncePromise = this.pollOnce(options); + const clearPollOncePromise = () => { + this.pollOncePromise = undefined; + }; + this.pollOncePromise.then(clearPollOncePromise, clearPollOncePromise).catch(this.reject); + } + return this.pollOncePromise; + } + processUpdatedState() { + if (this.operation.state.error) { + this.stopped = true; + if (!this.resolveOnUnsuccessful) { + this.reject(this.operation.state.error); + throw this.operation.state.error; + } + } + if (this.operation.state.isCancelled) { + this.stopped = true; + if (!this.resolveOnUnsuccessful) { + const error = new PollerCancelledError("Operation was canceled"); + this.reject(error); + throw error; + } + } + if (this.isDone() && this.resolve) { + // If the poller has finished polling, this means we now have a result. + // However, it can be the case that TResult is instantiated to void, so + // we are not expecting a result anyway. To assert that we might not + // have a result eventually after finishing polling, we cast the result + // to TResult. + this.resolve(this.getResult()); + } + } + /** + * Returns a promise that will resolve once the underlying operation is completed. + */ + async pollUntilDone(pollOptions = {}) { + if (this.stopped) { + this.startPolling(pollOptions).catch(this.reject); + } + // This is needed because the state could have been updated by + // `cancelOperation`, e.g. the operation is canceled or an error occurred. + this.processUpdatedState(); + return this.promise; + } + /** + * Invokes the provided callback after each polling is completed, + * sending the current state of the poller's operation. + * + * It returns a method that can be used to stop receiving updates on the given callback function. + */ + onProgress(callback) { + this.pollProgressCallbacks.push(callback); + return () => { + this.pollProgressCallbacks = this.pollProgressCallbacks.filter((c) => c !== callback); + }; + } + /** + * Returns true if the poller has finished polling. + */ + isDone() { + const state = this.operation.state; + return Boolean(state.isCompleted || state.isCancelled || state.error); + } + /** + * Stops the poller from continuing to poll. + */ + stopPolling() { + if (!this.stopped) { + this.stopped = true; + if (this.reject) { + this.reject(new PollerStoppedError("This poller is already stopped")); + } + } + } + /** + * Returns true if the poller is stopped. + */ + isStopped() { + return this.stopped; + } + /** + * Attempts to cancel the underlying operation. + * + * It only optionally receives an object with an abortSignal property, from \@azure/abort-controller's AbortSignalLike. + * + * If it's called again before it finishes, it will throw an error. + * + * @param options - Optional properties passed to the operation's update method. + */ + cancelOperation(options = {}) { + if (!this.cancelPromise) { + this.cancelPromise = this.cancelOnce(options); + } + else if (options.abortSignal) { + throw new Error("A cancel request is currently pending"); + } + return this.cancelPromise; + } + /** + * Returns the state of the operation. + * + * Even though TState will be the same type inside any of the methods of any extension of the Poller class, + * implementations of the pollers can customize what's shared with the public by writing their own + * version of the `getOperationState` method, and by defining two types, one representing the internal state of the poller + * and a public type representing a safe to share subset of the properties of the internal state. + * Their definition of getOperationState can then return their public type. + * + * Example: + * + * ```ts + * // Let's say we have our poller's operation state defined as: + * interface MyOperationState extends PollOperationState { + * privateProperty?: string; + * publicProperty?: string; + * } + * + * // To allow us to have a true separation of public and private state, we have to define another interface: + * interface PublicState extends PollOperationState { + * publicProperty?: string; + * } + * + * // Then, we define our Poller as follows: + * export class MyPoller extends Poller { + * // ... More content is needed here ... + * + * public getOperationState(): PublicState { + * const state: PublicState = this.operation.state; + * return { + * // Properties from PollOperationState + * isStarted: state.isStarted, + * isCompleted: state.isCompleted, + * isCancelled: state.isCancelled, + * error: state.error, + * result: state.result, + * + * // The only other property needed by PublicState. + * publicProperty: state.publicProperty + * } + * } + * } + * ``` + * + * You can see this in the tests of this repository, go to the file: + * `../test/utils/testPoller.ts` + * and look for the getOperationState implementation. + */ + getOperationState() { + return this.operation.state; + } + /** + * Returns the result value of the operation, + * regardless of the state of the poller. + * It can return undefined or an incomplete form of the final TResult value + * depending on the implementation. + */ + getResult() { + const state = this.operation.state; + return state.result; + } + /** + * Returns a serialized version of the poller's operation + * by invoking the operation's toString method. + */ + toString() { + return this.operation.toString(); + } +} -/*jshint unused:false */ - -class Store { - constructor() { - this.synchronous = false; - } - - findCookie(domain, path, key, cb) { - throw new Error("findCookie is not implemented"); - } - - findCookies(domain, path, allowSpecialUseDomain, cb) { - throw new Error("findCookies is not implemented"); - } - - putCookie(cookie, cb) { - throw new Error("putCookie is not implemented"); - } - - updateCookie(oldCookie, newCookie, cb) { - // recommended default implementation: - // return this.putCookie(newCookie, cb); - throw new Error("updateCookie is not implemented"); - } - - removeCookie(domain, path, key, cb) { - throw new Error("removeCookie is not implemented"); - } - - removeCookies(domain, path, cb) { - throw new Error("removeCookies is not implemented"); - } - - removeAllCookies(cb) { - throw new Error("removeAllCookies is not implemented"); - } - - getAllCookies(cb) { - throw new Error( - "getAllCookies is not implemented (therefore jar cannot be serialized)" - ); - } +// Copyright (c) Microsoft Corporation. +/** + * The LRO Engine, a class that performs polling. + */ +class LroEngine extends Poller { + constructor(lro, options) { + const { intervalInMs = POLL_INTERVAL_IN_MS, resumeFrom, resolveOnUnsuccessful = false, isDone, lroResourceLocationConfig, processResult, updateState, } = options || {}; + const state = resumeFrom + ? deserializeState(resumeFrom) + : {}; + const operation = new GenericPollOperation(state, lro, !resolveOnUnsuccessful, lroResourceLocationConfig, processResult, updateState, isDone); + super(operation); + this.resolveOnUnsuccessful = resolveOnUnsuccessful; + this.config = { intervalInMs: intervalInMs }; + operation.setPollerConfig(this.config); + } + /** + * The method used by the poller to wait before attempting to update its operation. + */ + delay() { + return new Promise((resolve) => setTimeout(() => resolve(), this.config.intervalInMs)); + } } -exports.y = Store; +exports.LroEngine = LroEngine; +exports.Poller = Poller; +exports.PollerCancelledError = PollerCancelledError; +exports.PollerStoppedError = PollerStoppedError; +exports.createHttpPoller = createHttpPoller; +//# sourceMappingURL=index.js.map /***/ }), -/***/ 9375: +/***/ 4559: /***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { -function requireUtil() { - try { - // eslint-disable-next-line no-restricted-modules - return __nccwpck_require__(3837); - } catch (e) { - return null; - } -} - -// for v10.12.0+ -function lookupCustomInspectSymbol() { - return Symbol.for("nodejs.util.inspect.custom"); -} - -// for older node environments -function tryReadingCustomSymbolFromUtilInspect(options) { - const _requireUtil = options.requireUtil || requireUtil; - const util = _requireUtil(); - return util ? util.inspect.custom : null; -} - -exports.getUtilInspect = function getUtilInspect(fallback, options = {}) { - const _requireUtil = options.requireUtil || requireUtil; - const util = _requireUtil(); - return function inspect(value, showHidden, depth) { - return util ? util.inspect(value, showHidden, depth) : fallback(value); - }; -}; - -exports.getCustomInspectSymbol = function getCustomInspectSymbol(options = {}) { - const _lookupCustomInspectSymbol = - options.lookupCustomInspectSymbol || lookupCustomInspectSymbol; - - // get custom inspect symbol for node environments - return ( - _lookupCustomInspectSymbol() || - tryReadingCustomSymbolFromUtilInspect(options) - ); -}; +"use strict"; -/***/ }), +Object.defineProperty(exports, "__esModule", ({ value: true })); -/***/ 1598: -/***/ ((__unused_webpack_module, exports) => { +var tslib = __nccwpck_require__(4351); -"use strict"; -/* ************************************************************************************ -Extracted from check-types.js -https://gitlab.com/philbooth/check-types.js +// Copyright (c) Microsoft Corporation. +/** + * returns an async iterator that iterates over results. It also has a `byPage` + * method that returns pages of items at once. + * + * @param pagedResult - an object that specifies how to get pages. + * @returns a paged async iterator that iterates over results. + */ +function getPagedAsyncIterator(pagedResult) { + var _a; + const iter = getItemAsyncIterator(pagedResult); + return { + next() { + return iter.next(); + }, + [Symbol.asyncIterator]() { + return this; + }, + byPage: (_a = pagedResult === null || pagedResult === void 0 ? void 0 : pagedResult.byPage) !== null && _a !== void 0 ? _a : ((settings) => { + const { continuationToken, maxPageSize } = settings !== null && settings !== void 0 ? settings : {}; + return getPageAsyncIterator(pagedResult, { + pageLink: continuationToken, + maxPageSize, + }); + }), + }; +} +function getItemAsyncIterator(pagedResult) { + return tslib.__asyncGenerator(this, arguments, function* getItemAsyncIterator_1() { + var e_1, _a; + const pages = getPageAsyncIterator(pagedResult); + const firstVal = yield tslib.__await(pages.next()); + // if the result does not have an array shape, i.e. TPage = TElement, then we return it as is + if (!Array.isArray(firstVal.value)) { + yield yield tslib.__await(firstVal.value); + // `pages` is of type `AsyncIterableIterator` but TPage = TElement in this case + yield tslib.__await(yield* tslib.__asyncDelegator(tslib.__asyncValues(pages))); + } + else { + yield tslib.__await(yield* tslib.__asyncDelegator(tslib.__asyncValues(firstVal.value))); + try { + for (var pages_1 = tslib.__asyncValues(pages), pages_1_1; pages_1_1 = yield tslib.__await(pages_1.next()), !pages_1_1.done;) { + const page = pages_1_1.value; + // pages is of type `AsyncIterableIterator` so `page` is of type `TPage`. In this branch, + // it must be the case that `TPage = TElement[]` + yield tslib.__await(yield* tslib.__asyncDelegator(tslib.__asyncValues(page))); + } + } + catch (e_1_1) { e_1 = { error: e_1_1 }; } + finally { + try { + if (pages_1_1 && !pages_1_1.done && (_a = pages_1.return)) yield tslib.__await(_a.call(pages_1)); + } + finally { if (e_1) throw e_1.error; } + } + } + }); +} +function getPageAsyncIterator(pagedResult, options = {}) { + return tslib.__asyncGenerator(this, arguments, function* getPageAsyncIterator_1() { + const { pageLink, maxPageSize } = options; + let response = yield tslib.__await(pagedResult.getPage(pageLink !== null && pageLink !== void 0 ? pageLink : pagedResult.firstPageLink, maxPageSize)); + yield yield tslib.__await(response.page); + while (response.nextPageLink) { + response = yield tslib.__await(pagedResult.getPage(response.nextPageLink, maxPageSize)); + yield yield tslib.__await(response.page); + } + }); +} -MIT License +exports.getPagedAsyncIterator = getPagedAsyncIterator; +//# sourceMappingURL=index.js.map -Copyright (c) 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019 Phil Booth -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: +/***/ }), -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +/***/ 4175: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +"use strict"; -************************************************************************************ */ +Object.defineProperty(exports, "__esModule", ({ value: true })); -/* Validation functions copied from check-types package - https://www.npmjs.com/package/check-types */ -function isFunction(data) { - return typeof data === "function"; -} +var api = __nccwpck_require__(5163); -function isNonEmptyString(data) { - return isString(data) && data !== ""; +// Copyright (c) Microsoft Corporation. +(function (SpanKind) { + /** Default value. Indicates that the span is used internally. */ + SpanKind[SpanKind["INTERNAL"] = 0] = "INTERNAL"; + /** + * Indicates that the span covers server-side handling of an RPC or other + * remote request. + */ + SpanKind[SpanKind["SERVER"] = 1] = "SERVER"; + /** + * Indicates that the span covers the client-side wrapper around an RPC or + * other remote request. + */ + SpanKind[SpanKind["CLIENT"] = 2] = "CLIENT"; + /** + * Indicates that the span describes producer sending a message to a + * broker. Unlike client and server, there is no direct critical path latency + * relationship between producer and consumer spans. + */ + SpanKind[SpanKind["PRODUCER"] = 3] = "PRODUCER"; + /** + * Indicates that the span describes consumer receiving a message from a + * broker. Unlike client and server, there is no direct critical path latency + * relationship between producer and consumer spans. + */ + SpanKind[SpanKind["CONSUMER"] = 4] = "CONSUMER"; +})(exports.SpanKind || (exports.SpanKind = {})); +/** + * Return the span if one exists + * + * @param context - context to get span from + */ +function getSpan(context) { + return api.trace.getSpan(context); } - -function isDate(data) { - return isInstanceStrict(data, Date) && isInteger(data.getTime()); +/** + * Set the span on a context + * + * @param context - context to use as parent + * @param span - span to set active + */ +function setSpan(context, span) { + return api.trace.setSpan(context, span); } - -function isEmptyString(data) { - return data === "" || (data instanceof String && data.toString() === ""); +/** + * Wrap span context in a NoopSpan and set as span in a new + * context + * + * @param context - context to set active span on + * @param spanContext - span context to be wrapped + */ +function setSpanContext(context, spanContext) { + return api.trace.setSpanContext(context, spanContext); } - -function isString(data) { - return typeof data === "string" || data instanceof String; +/** + * Get the span context of the span if it exists. + * + * @param context - context to get values from + */ +function getSpanContext(context) { + return api.trace.getSpanContext(context); } - -function isObject(data) { - return toString.call(data) === "[object Object]"; +/** + * Returns true of the given {@link SpanContext} is valid. + * A valid {@link SpanContext} is one which has a valid trace ID and span ID as per the spec. + * + * @param context - the {@link SpanContext} to validate. + * + * @returns true if the {@link SpanContext} is valid, false otherwise. + */ +function isSpanContextValid(context) { + return api.trace.isSpanContextValid(context); } -function isInstanceStrict(data, prototype) { - try { - return data instanceof prototype; - } catch (error) { - return false; - } +function getTracer(name, version) { + return api.trace.getTracer(name || "azure/core-tracing", version); } +/** Entrypoint for context API */ +const context = api.context; +(function (SpanStatusCode) { + /** + * The default status. + */ + SpanStatusCode[SpanStatusCode["UNSET"] = 0] = "UNSET"; + /** + * The operation has been validated by an Application developer or + * Operator to have completed successfully. + */ + SpanStatusCode[SpanStatusCode["OK"] = 1] = "OK"; + /** + * The operation contains an error. + */ + SpanStatusCode[SpanStatusCode["ERROR"] = 2] = "ERROR"; +})(exports.SpanStatusCode || (exports.SpanStatusCode = {})); -function isInteger(data) { - return typeof data === "number" && data % 1 === 0; +// Copyright (c) Microsoft Corporation. +function isTracingDisabled() { + var _a; + if (typeof process === "undefined") { + // not supported in browser for now without polyfills + return false; + } + const azureTracingDisabledValue = (_a = process.env.AZURE_TRACING_DISABLED) === null || _a === void 0 ? void 0 : _a.toLowerCase(); + if (azureTracingDisabledValue === "false" || azureTracingDisabledValue === "0") { + return false; + } + return Boolean(azureTracingDisabledValue); +} +/** + * Creates a function that can be used to create spans using the global tracer. + * + * Usage: + * + * ```typescript + * // once + * const createSpan = createSpanFunction({ packagePrefix: "Azure.Data.AppConfiguration", namespace: "Microsoft.AppConfiguration" }); + * + * // in each operation + * const span = createSpan("deleteConfigurationSetting", operationOptions); + * // code... + * span.end(); + * ``` + * + * @hidden + * @param args - allows configuration of the prefix for each span as well as the az.namespace field. + */ +function createSpanFunction(args) { + return function (operationName, operationOptions) { + const tracer = getTracer(); + const tracingOptions = (operationOptions === null || operationOptions === void 0 ? void 0 : operationOptions.tracingOptions) || {}; + const spanOptions = Object.assign({ kind: exports.SpanKind.INTERNAL }, tracingOptions.spanOptions); + const spanName = args.packagePrefix ? `${args.packagePrefix}.${operationName}` : operationName; + let span; + if (isTracingDisabled()) { + span = api.trace.wrapSpanContext(api.INVALID_SPAN_CONTEXT); + } + else { + span = tracer.startSpan(spanName, spanOptions, tracingOptions.tracingContext); + } + if (args.namespace) { + span.setAttribute("az.namespace", args.namespace); + } + let newSpanOptions = tracingOptions.spanOptions || {}; + if (span.isRecording() && args.namespace) { + newSpanOptions = Object.assign(Object.assign({}, tracingOptions.spanOptions), { attributes: Object.assign(Object.assign({}, spanOptions.attributes), { "az.namespace": args.namespace }) }); + } + const newTracingOptions = Object.assign(Object.assign({}, tracingOptions), { spanOptions: newSpanOptions, tracingContext: setSpan(tracingOptions.tracingContext || context.active(), span) }); + const newOperationOptions = Object.assign(Object.assign({}, operationOptions), { tracingOptions: newTracingOptions }); + return { + span, + updatedOptions: newOperationOptions + }; + }; } -/* End validation functions */ -function validate(bool, cb, options) { - if (!isFunction(cb)) { - options = cb; - cb = null; - } - if (!isObject(options)) options = { Error: "Failed Check" }; - if (!bool) { - if (cb) { - cb(new ParameterError(options)); - } else { - throw new ParameterError(options); +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +const VERSION = "00"; +/** + * Generates a `SpanContext` given a `traceparent` header value. + * @param traceParent - Serialized span context data as a `traceparent` header value. + * @returns The `SpanContext` generated from the `traceparent` value. + */ +function extractSpanContextFromTraceParentHeader(traceParentHeader) { + const parts = traceParentHeader.split("-"); + if (parts.length !== 4) { + return; } - } + const [version, traceId, spanId, traceOptions] = parts; + if (version !== VERSION) { + return; + } + const traceFlags = parseInt(traceOptions, 16); + const spanContext = { + spanId, + traceId, + traceFlags + }; + return spanContext; } - -class ParameterError extends Error { - constructor(...params) { - super(...params); - } +/** + * Generates a `traceparent` value given a span context. + * @param spanContext - Contains context for a specific span. + * @returns The `spanContext` represented as a `traceparent` value. + */ +function getTraceParentHeader(spanContext) { + const missingFields = []; + if (!spanContext.traceId) { + missingFields.push("traceId"); + } + if (!spanContext.spanId) { + missingFields.push("spanId"); + } + if (missingFields.length) { + return; + } + const flags = spanContext.traceFlags || 0 /* NONE */; + const hexFlags = flags.toString(16); + const traceFlags = hexFlags.length === 1 ? `0${hexFlags}` : hexFlags; + // https://www.w3.org/TR/trace-context/#traceparent-header-field-values + return `${VERSION}-${spanContext.traceId}-${spanContext.spanId}-${traceFlags}`; } -exports.ParameterError = ParameterError; -exports.isFunction = isFunction; -exports.isNonEmptyString = isNonEmptyString; -exports.isDate = isDate; -exports.isEmptyString = isEmptyString; -exports.isString = isString; -exports.isObject = isObject; -exports.validate = validate; - - -/***/ }), - -/***/ 8742: -/***/ ((module) => { - -// generated by genversion -module.exports = '4.1.2' - - -/***/ }), - -/***/ 3415: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - -"use strict"; - - -Object.defineProperty(exports, "__esModule", ({ - value: true -})); -Object.defineProperty(exports, "v1", ({ - enumerable: true, - get: function () { - return _v.default; - } -})); -Object.defineProperty(exports, "v3", ({ - enumerable: true, - get: function () { - return _v2.default; - } -})); -Object.defineProperty(exports, "v4", ({ - enumerable: true, - get: function () { - return _v3.default; - } -})); -Object.defineProperty(exports, "v5", ({ - enumerable: true, - get: function () { - return _v4.default; - } -})); -Object.defineProperty(exports, "NIL", ({ - enumerable: true, - get: function () { - return _nil.default; - } -})); -Object.defineProperty(exports, "version", ({ - enumerable: true, - get: function () { - return _version.default; - } -})); -Object.defineProperty(exports, "validate", ({ - enumerable: true, - get: function () { - return _validate.default; - } -})); -Object.defineProperty(exports, "stringify", ({ - enumerable: true, - get: function () { - return _stringify.default; - } -})); -Object.defineProperty(exports, "parse", ({ - enumerable: true, - get: function () { - return _parse.default; - } -})); - -var _v = _interopRequireDefault(__nccwpck_require__(4757)); - -var _v2 = _interopRequireDefault(__nccwpck_require__(9982)); - -var _v3 = _interopRequireDefault(__nccwpck_require__(5393)); - -var _v4 = _interopRequireDefault(__nccwpck_require__(8788)); - -var _nil = _interopRequireDefault(__nccwpck_require__(657)); - -var _version = _interopRequireDefault(__nccwpck_require__(7909)); - -var _validate = _interopRequireDefault(__nccwpck_require__(4418)); - -var _stringify = _interopRequireDefault(__nccwpck_require__(4794)); - -var _parse = _interopRequireDefault(__nccwpck_require__(7079)); +exports.context = context; +exports.createSpanFunction = createSpanFunction; +exports.extractSpanContextFromTraceParentHeader = extractSpanContextFromTraceParentHeader; +exports.getSpan = getSpan; +exports.getSpanContext = getSpanContext; +exports.getTraceParentHeader = getTraceParentHeader; +exports.getTracer = getTracer; +exports.isSpanContextValid = isSpanContextValid; +exports.setSpan = setSpan; +exports.setSpanContext = setSpanContext; +//# sourceMappingURL=index.js.map -function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } /***/ }), -/***/ 4153: +/***/ 1333: /***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { "use strict"; -Object.defineProperty(exports, "__esModule", ({ - value: true -})); -exports["default"] = void 0; - -var _crypto = _interopRequireDefault(__nccwpck_require__(6113)); +Object.defineProperty(exports, "__esModule", ({ value: true })); -function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } +var abortController = __nccwpck_require__(2557); +var crypto = __nccwpck_require__(6113); -function md5(bytes) { - if (Array.isArray(bytes)) { - bytes = Buffer.from(bytes); - } else if (typeof bytes === 'string') { - bytes = Buffer.from(bytes, 'utf8'); - } +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +var _a; +/** + * A constant that indicates whether the environment the code is running is Node.JS. + */ +const isNode = typeof process !== "undefined" && Boolean(process.version) && Boolean((_a = process.versions) === null || _a === void 0 ? void 0 : _a.node); - return _crypto.default.createHash('md5').update(bytes).digest(); +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +/** + * Helper TypeGuard that checks if something is defined or not. + * @param thing - Anything + */ +function isDefined(thing) { + return typeof thing !== "undefined" && thing !== null; +} +/** + * Helper TypeGuard that checks if the input is an object with the specified properties. + * @param thing - Anything. + * @param properties - The name of the properties that should appear in the object. + */ +function isObjectWithProperties(thing, properties) { + if (!isDefined(thing) || typeof thing !== "object") { + return false; + } + for (const property of properties) { + if (!objectHasProperty(thing, property)) { + return false; + } + } + return true; +} +/** + * Helper TypeGuard that checks if the input is an object with the specified property. + * @param thing - Any object. + * @param property - The name of the property that should appear in the object. + */ +function objectHasProperty(thing, property) { + return (isDefined(thing) && typeof thing === "object" && property in thing); } -var _default = md5; -exports["default"] = _default; - -/***/ }), - -/***/ 657: -/***/ ((__unused_webpack_module, exports) => { - -"use strict"; - - -Object.defineProperty(exports, "__esModule", ({ - value: true -})); -exports["default"] = void 0; -var _default = '00000000-0000-0000-0000-000000000000'; -exports["default"] = _default; - -/***/ }), - -/***/ 7079: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - -"use strict"; - - -Object.defineProperty(exports, "__esModule", ({ - value: true -})); -exports["default"] = void 0; - -var _validate = _interopRequireDefault(__nccwpck_require__(4418)); - -function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } - -function parse(uuid) { - if (!(0, _validate.default)(uuid)) { - throw TypeError('Invalid UUID'); - } - - let v; - const arr = new Uint8Array(16); // Parse ########-....-....-....-............ - - arr[0] = (v = parseInt(uuid.slice(0, 8), 16)) >>> 24; - arr[1] = v >>> 16 & 0xff; - arr[2] = v >>> 8 & 0xff; - arr[3] = v & 0xff; // Parse ........-####-....-....-............ - - arr[4] = (v = parseInt(uuid.slice(9, 13), 16)) >>> 8; - arr[5] = v & 0xff; // Parse ........-....-####-....-............ - - arr[6] = (v = parseInt(uuid.slice(14, 18), 16)) >>> 8; - arr[7] = v & 0xff; // Parse ........-....-....-####-............ - - arr[8] = (v = parseInt(uuid.slice(19, 23), 16)) >>> 8; - arr[9] = v & 0xff; // Parse ........-....-....-....-############ - // (Use "/" to avoid 32-bit truncation when bit-shifting high-order bytes) - - arr[10] = (v = parseInt(uuid.slice(24, 36), 16)) / 0x10000000000 & 0xff; - arr[11] = v / 0x100000000 & 0xff; - arr[12] = v >>> 24 & 0xff; - arr[13] = v >>> 16 & 0xff; - arr[14] = v >>> 8 & 0xff; - arr[15] = v & 0xff; - return arr; +// Copyright (c) Microsoft Corporation. +const StandardAbortMessage = "The operation was aborted."; +/** + * A wrapper for setTimeout that resolves a promise after timeInMs milliseconds. + * @param timeInMs - The number of milliseconds to be delayed. + * @param options - The options for delay - currently abort options + * @returns Promise that is resolved after timeInMs + */ +function delay(timeInMs, options) { + return new Promise((resolve, reject) => { + let timer = undefined; + let onAborted = undefined; + const rejectOnAbort = () => { + var _a; + return reject(new abortController.AbortError((_a = options === null || options === void 0 ? void 0 : options.abortErrorMsg) !== null && _a !== void 0 ? _a : StandardAbortMessage)); + }; + const removeListeners = () => { + if ((options === null || options === void 0 ? void 0 : options.abortSignal) && onAborted) { + options.abortSignal.removeEventListener("abort", onAborted); + } + }; + onAborted = () => { + if (isDefined(timer)) { + clearTimeout(timer); + } + removeListeners(); + return rejectOnAbort(); + }; + if ((options === null || options === void 0 ? void 0 : options.abortSignal) && options.abortSignal.aborted) { + return rejectOnAbort(); + } + timer = setTimeout(() => { + removeListeners(); + resolve(); + }, timeInMs); + if (options === null || options === void 0 ? void 0 : options.abortSignal) { + options.abortSignal.addEventListener("abort", onAborted); + } + }); } -var _default = parse; -exports["default"] = _default; +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +/** + * Returns a random integer value between a lower and upper bound, + * inclusive of both bounds. + * Note that this uses Math.random and isn't secure. If you need to use + * this for any kind of security purpose, find a better source of random. + * @param min - The smallest integer value allowed. + * @param max - The largest integer value allowed. + */ +function getRandomIntegerInclusive(min, max) { + // Make sure inputs are integers. + min = Math.ceil(min); + max = Math.floor(max); + // Pick a random offset from zero to the size of the range. + // Since Math.random() can never return 1, we have to make the range one larger + // in order to be inclusive of the maximum value after we take the floor. + const offset = Math.floor(Math.random() * (max - min + 1)); + return offset + min; +} -/***/ }), +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +/** + * Helper to determine when an input is a generic JS object. + * @returns true when input is an object type that is not null, Array, RegExp, or Date. + */ +function isObject(input) { + return (typeof input === "object" && + input !== null && + !Array.isArray(input) && + !(input instanceof RegExp) && + !(input instanceof Date)); +} -/***/ 690: -/***/ ((__unused_webpack_module, exports) => { +// Copyright (c) Microsoft Corporation. +/** + * Typeguard for an error object shape (has name and message) + * @param e - Something caught by a catch clause. + */ +function isError(e) { + if (isObject(e)) { + const hasName = typeof e.name === "string"; + const hasMessage = typeof e.message === "string"; + return hasName && hasMessage; + } + return false; +} +/** + * Given what is thought to be an error object, return the message if possible. + * If the message is missing, returns a stringified version of the input. + * @param e - Something thrown from a try block + * @returns The error message or a string of the input + */ +function getErrorMessage(e) { + if (isError(e)) { + return e.message; + } + else { + let stringified; + try { + if (typeof e === "object" && e) { + stringified = JSON.stringify(e); + } + else { + stringified = String(e); + } + } + catch (err) { + stringified = "[unable to stringify input]"; + } + return `Unknown error ${stringified}`; + } +} -"use strict"; +// Copyright (c) Microsoft Corporation. +/** + * Generates a SHA-256 HMAC signature. + * @param key - The HMAC key represented as a base64 string, used to generate the cryptographic HMAC hash. + * @param stringToSign - The data to be signed. + * @param encoding - The textual encoding to use for the returned HMAC digest. + */ +async function computeSha256Hmac(key, stringToSign, encoding) { + const decodedKey = Buffer.from(key, "base64"); + return crypto.createHmac("sha256", decodedKey).update(stringToSign).digest(encoding); +} +/** + * Generates a SHA-256 hash. + * @param content - The data to be included in the hash. + * @param encoding - The textual encoding to use for the returned hash. + */ +async function computeSha256Hash(content, encoding) { + return crypto.createHash("sha256").update(content).digest(encoding); +} +exports.computeSha256Hash = computeSha256Hash; +exports.computeSha256Hmac = computeSha256Hmac; +exports.delay = delay; +exports.getErrorMessage = getErrorMessage; +exports.getRandomIntegerInclusive = getRandomIntegerInclusive; +exports.isDefined = isDefined; +exports.isError = isError; +exports.isNode = isNode; +exports.isObject = isObject; +exports.isObjectWithProperties = isObjectWithProperties; +exports.objectHasProperty = objectHasProperty; +//# sourceMappingURL=index.js.map -Object.defineProperty(exports, "__esModule", ({ - value: true -})); -exports["default"] = void 0; -var _default = /^(?:[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}|00000000-0000-0000-0000-000000000000)$/i; -exports["default"] = _default; /***/ }), -/***/ 979: +/***/ 3233: /***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { "use strict"; -Object.defineProperty(exports, "__esModule", ({ - value: true -})); -exports["default"] = rng; - -var _crypto = _interopRequireDefault(__nccwpck_require__(6113)); - -function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } - -const rnds8Pool = new Uint8Array(256); // # of random values to pre-allocate - -let poolPtr = rnds8Pool.length; +Object.defineProperty(exports, "__esModule", ({ value: true })); -function rng() { - if (poolPtr > rnds8Pool.length - 16) { - _crypto.default.randomFillSync(rnds8Pool); +function _interopDefault (ex) { return (ex && (typeof ex === 'object') && 'default' in ex) ? ex['default'] : ex; } - poolPtr = 0; - } +var util = _interopDefault(__nccwpck_require__(3837)); +var os = __nccwpck_require__(2037); - return rnds8Pool.slice(poolPtr, poolPtr += 16); +// Copyright (c) Microsoft Corporation. +function log(message, ...args) { + process.stderr.write(`${util.format(message, ...args)}${os.EOL}`); } -/***/ }), - -/***/ 6631: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - -"use strict"; - - -Object.defineProperty(exports, "__esModule", ({ - value: true -})); -exports["default"] = void 0; - -var _crypto = _interopRequireDefault(__nccwpck_require__(6113)); - -function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } - -function sha1(bytes) { - if (Array.isArray(bytes)) { - bytes = Buffer.from(bytes); - } else if (typeof bytes === 'string') { - bytes = Buffer.from(bytes, 'utf8'); - } - - return _crypto.default.createHash('sha1').update(bytes).digest(); +// Copyright (c) Microsoft Corporation. +const debugEnvVariable = (typeof process !== "undefined" && process.env && process.env.DEBUG) || undefined; +let enabledString; +let enabledNamespaces = []; +let skippedNamespaces = []; +const debuggers = []; +if (debugEnvVariable) { + enable(debugEnvVariable); +} +const debugObj = Object.assign((namespace) => { + return createDebugger(namespace); +}, { + enable, + enabled, + disable, + log +}); +function enable(namespaces) { + enabledString = namespaces; + enabledNamespaces = []; + skippedNamespaces = []; + const wildcard = /\*/g; + const namespaceList = namespaces.split(",").map((ns) => ns.trim().replace(wildcard, ".*?")); + for (const ns of namespaceList) { + if (ns.startsWith("-")) { + skippedNamespaces.push(new RegExp(`^${ns.substr(1)}$`)); + } + else { + enabledNamespaces.push(new RegExp(`^${ns}$`)); + } + } + for (const instance of debuggers) { + instance.enabled = enabled(instance.namespace); + } +} +function enabled(namespace) { + if (namespace.endsWith("*")) { + return true; + } + for (const skipped of skippedNamespaces) { + if (skipped.test(namespace)) { + return false; + } + } + for (const enabledNamespace of enabledNamespaces) { + if (enabledNamespace.test(namespace)) { + return true; + } + } + return false; +} +function disable() { + const result = enabledString || ""; + enable(""); + return result; +} +function createDebugger(namespace) { + const newDebugger = Object.assign(debug, { + enabled: enabled(namespace), + destroy, + log: debugObj.log, + namespace, + extend + }); + function debug(...args) { + if (!newDebugger.enabled) { + return; + } + if (args.length > 0) { + args[0] = `${namespace} ${args[0]}`; + } + newDebugger.log(...args); + } + debuggers.push(newDebugger); + return newDebugger; +} +function destroy() { + const index = debuggers.indexOf(this); + if (index >= 0) { + debuggers.splice(index, 1); + return true; + } + return false; +} +function extend(namespace) { + const newDebugger = createDebugger(`${this.namespace}:${namespace}`); + newDebugger.log = this.log; + return newDebugger; } -var _default = sha1; -exports["default"] = _default; - -/***/ }), - -/***/ 4794: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - -"use strict"; - - -Object.defineProperty(exports, "__esModule", ({ - value: true -})); -exports["default"] = void 0; - -var _validate = _interopRequireDefault(__nccwpck_require__(4418)); - -function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } - +// Copyright (c) Microsoft Corporation. +const registeredLoggers = new Set(); +const logLevelFromEnv = (typeof process !== "undefined" && process.env && process.env.AZURE_LOG_LEVEL) || undefined; +let azureLogLevel; /** - * Convert array of 16 byte values to UUID string format of the form: - * XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX + * The AzureLogger provides a mechanism for overriding where logs are output to. + * By default, logs are sent to stderr. + * Override the `log` method to redirect logs to another location. */ -const byteToHex = []; - -for (let i = 0; i < 256; ++i) { - byteToHex.push((i + 0x100).toString(16).substr(1)); +const AzureLogger = debugObj("azure"); +AzureLogger.log = (...args) => { + debugObj.log(...args); +}; +const AZURE_LOG_LEVELS = ["verbose", "info", "warning", "error"]; +if (logLevelFromEnv) { + // avoid calling setLogLevel because we don't want a mis-set environment variable to crash + if (isAzureLogLevel(logLevelFromEnv)) { + setLogLevel(logLevelFromEnv); + } + else { + console.error(`AZURE_LOG_LEVEL set to unknown log level '${logLevelFromEnv}'; logging is not enabled. Acceptable values: ${AZURE_LOG_LEVELS.join(", ")}.`); + } } - -function stringify(arr, offset = 0) { - // Note: Be careful editing this code! It's been tuned for performance - // and works in ways you may not expect. See https://github.com/uuidjs/uuid/pull/434 - const uuid = (byteToHex[arr[offset + 0]] + byteToHex[arr[offset + 1]] + byteToHex[arr[offset + 2]] + byteToHex[arr[offset + 3]] + '-' + byteToHex[arr[offset + 4]] + byteToHex[arr[offset + 5]] + '-' + byteToHex[arr[offset + 6]] + byteToHex[arr[offset + 7]] + '-' + byteToHex[arr[offset + 8]] + byteToHex[arr[offset + 9]] + '-' + byteToHex[arr[offset + 10]] + byteToHex[arr[offset + 11]] + byteToHex[arr[offset + 12]] + byteToHex[arr[offset + 13]] + byteToHex[arr[offset + 14]] + byteToHex[arr[offset + 15]]).toLowerCase(); // Consistency check for valid UUID. If this throws, it's likely due to one - // of the following: - // - One or more input array values don't map to a hex octet (leading to - // "undefined" in the uuid) - // - Invalid input values for the RFC `version` or `variant` fields - - if (!(0, _validate.default)(uuid)) { - throw TypeError('Stringified UUID is invalid'); - } - - return uuid; +/** + * Immediately enables logging at the specified log level. + * @param level - The log level to enable for logging. + * Options from most verbose to least verbose are: + * - verbose + * - info + * - warning + * - error + */ +function setLogLevel(level) { + if (level && !isAzureLogLevel(level)) { + throw new Error(`Unknown log level '${level}'. Acceptable values: ${AZURE_LOG_LEVELS.join(",")}`); + } + azureLogLevel = level; + const enabledNamespaces = []; + for (const logger of registeredLoggers) { + if (shouldEnable(logger)) { + enabledNamespaces.push(logger.namespace); + } + } + debugObj.enable(enabledNamespaces.join(",")); +} +/** + * Retrieves the currently specified log level. + */ +function getLogLevel() { + return azureLogLevel; +} +const levelMap = { + verbose: 400, + info: 300, + warning: 200, + error: 100 +}; +/** + * Creates a logger for use by the Azure SDKs that inherits from `AzureLogger`. + * @param namespace - The name of the SDK package. + * @hidden + */ +function createClientLogger(namespace) { + const clientRootLogger = AzureLogger.extend(namespace); + patchLogMethod(AzureLogger, clientRootLogger); + return { + error: createLogger(clientRootLogger, "error"), + warning: createLogger(clientRootLogger, "warning"), + info: createLogger(clientRootLogger, "info"), + verbose: createLogger(clientRootLogger, "verbose") + }; +} +function patchLogMethod(parent, child) { + child.log = (...args) => { + parent.log(...args); + }; +} +function createLogger(parent, level) { + const logger = Object.assign(parent.extend(level), { + level + }); + patchLogMethod(parent, logger); + if (shouldEnable(logger)) { + const enabledNamespaces = debugObj.disable(); + debugObj.enable(enabledNamespaces + "," + logger.namespace); + } + registeredLoggers.add(logger); + return logger; +} +function shouldEnable(logger) { + if (azureLogLevel && levelMap[logger.level] <= levelMap[azureLogLevel]) { + return true; + } + else { + return false; + } +} +function isAzureLogLevel(logLevel) { + return AZURE_LOG_LEVELS.includes(logLevel); } -var _default = stringify; -exports["default"] = _default; +exports.AzureLogger = AzureLogger; +exports.createClientLogger = createClientLogger; +exports.getLogLevel = getLogLevel; +exports.setLogLevel = setLogLevel; +//# sourceMappingURL=index.js.map + /***/ }), -/***/ 4757: +/***/ 4100: /***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { "use strict"; -Object.defineProperty(exports, "__esModule", ({ - value: true -})); -exports["default"] = void 0; +Object.defineProperty(exports, "__esModule", ({ value: true })); -var _rng = _interopRequireDefault(__nccwpck_require__(979)); - -var _stringify = _interopRequireDefault(__nccwpck_require__(4794)); - -function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } - -// **`v1()` - Generate time-based UUID** -// -// Inspired by https://github.com/LiosK/UUID.js -// and http://docs.python.org/library/uuid.html -let _nodeId; - -let _clockseq; // Previous uuid creation time - - -let _lastMSecs = 0; -let _lastNSecs = 0; // See https://github.com/uuidjs/uuid for API details - -function v1(options, buf, offset) { - let i = buf && offset || 0; - const b = buf || new Array(16); - options = options || {}; - let node = options.node || _nodeId; - let clockseq = options.clockseq !== undefined ? options.clockseq : _clockseq; // node and clockseq need to be initialized to random values if they're not - // specified. We do this lazily to minimize issues related to insufficient - // system entropy. See #189 - - if (node == null || clockseq == null) { - const seedBytes = options.random || (options.rng || _rng.default)(); - - if (node == null) { - // Per 4.5, create and 48-bit node id, (47 random bits + multicast bit = 1) - node = _nodeId = [seedBytes[0] | 0x01, seedBytes[1], seedBytes[2], seedBytes[3], seedBytes[4], seedBytes[5]]; - } - - if (clockseq == null) { - // Per 4.2.2, randomize (14 bit) clockseq - clockseq = _clockseq = (seedBytes[6] << 8 | seedBytes[7]) & 0x3fff; - } - } // UUID timestamps are 100 nano-second units since the Gregorian epoch, - // (1582-10-15 00:00). JSNumbers aren't precise enough for this, so - // time is handled internally as 'msecs' (integer milliseconds) and 'nsecs' - // (100-nanoseconds offset from msecs) since unix epoch, 1970-01-01 00:00. - - - let msecs = options.msecs !== undefined ? options.msecs : Date.now(); // Per 4.2.1.2, use count of uuid's generated during the current clock - // cycle to simulate higher resolution clock - - let nsecs = options.nsecs !== undefined ? options.nsecs : _lastNSecs + 1; // Time since last uuid creation (in msecs) - - const dt = msecs - _lastMSecs + (nsecs - _lastNSecs) / 10000; // Per 4.2.1.2, Bump clockseq on clock regression - - if (dt < 0 && options.clockseq === undefined) { - clockseq = clockseq + 1 & 0x3fff; - } // Reset nsecs if clock regresses (new clockseq) or we've moved onto a new - // time interval - - - if ((dt < 0 || msecs > _lastMSecs) && options.nsecs === undefined) { - nsecs = 0; - } // Per 4.2.1.2 Throw error if too many uuids are requested - - - if (nsecs >= 10000) { - throw new Error("uuid.v1(): Can't create more than 10M uuids/sec"); - } - - _lastMSecs = msecs; - _lastNSecs = nsecs; - _clockseq = clockseq; // Per 4.1.4 - Convert from unix epoch to Gregorian epoch - - msecs += 12219292800000; // `time_low` - - const tl = ((msecs & 0xfffffff) * 10000 + nsecs) % 0x100000000; - b[i++] = tl >>> 24 & 0xff; - b[i++] = tl >>> 16 & 0xff; - b[i++] = tl >>> 8 & 0xff; - b[i++] = tl & 0xff; // `time_mid` - - const tmh = msecs / 0x100000000 * 10000 & 0xfffffff; - b[i++] = tmh >>> 8 & 0xff; - b[i++] = tmh & 0xff; // `time_high_and_version` - - b[i++] = tmh >>> 24 & 0xf | 0x10; // include version - - b[i++] = tmh >>> 16 & 0xff; // `clock_seq_hi_and_reserved` (Per 4.2.2 - include variant) - - b[i++] = clockseq >>> 8 | 0x80; // `clock_seq_low` - - b[i++] = clockseq & 0xff; // `node` - - for (let n = 0; n < 6; ++n) { - b[i + n] = node[n]; - } - - return buf || (0, _stringify.default)(b); -} - -var _default = v1; -exports["default"] = _default; - -/***/ }), - -/***/ 9982: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - -"use strict"; - - -Object.defineProperty(exports, "__esModule", ({ - value: true -})); -exports["default"] = void 0; - -var _v = _interopRequireDefault(__nccwpck_require__(4085)); - -var _md = _interopRequireDefault(__nccwpck_require__(4153)); - -function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } - -const v3 = (0, _v.default)('v3', 0x30, _md.default); -var _default = v3; -exports["default"] = _default; - -/***/ }), - -/***/ 4085: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - -"use strict"; - - -Object.defineProperty(exports, "__esModule", ({ - value: true -})); -exports["default"] = _default; -exports.URL = exports.DNS = void 0; - -var _stringify = _interopRequireDefault(__nccwpck_require__(4794)); - -var _parse = _interopRequireDefault(__nccwpck_require__(7079)); - -function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } - -function stringToBytes(str) { - str = unescape(encodeURIComponent(str)); // UTF8 escape - - const bytes = []; - - for (let i = 0; i < str.length; ++i) { - bytes.push(str.charCodeAt(i)); - } - - return bytes; -} - -const DNS = '6ba7b810-9dad-11d1-80b4-00c04fd430c8'; -exports.DNS = DNS; -const URL = '6ba7b811-9dad-11d1-80b4-00c04fd430c8'; -exports.URL = URL; - -function _default(name, version, hashfunc) { - function generateUUID(value, namespace, buf, offset) { - if (typeof value === 'string') { - value = stringToBytes(value); - } - - if (typeof namespace === 'string') { - namespace = (0, _parse.default)(namespace); - } - - if (namespace.length !== 16) { - throw TypeError('Namespace must be array-like (16 iterable integer values, 0-255)'); - } // Compute hash of namespace and value, Per 4.3 - // Future: Use spread syntax when supported on all platforms, e.g. `bytes = - // hashfunc([...namespace, ... value])` - - - let bytes = new Uint8Array(16 + value.length); - bytes.set(namespace); - bytes.set(value, namespace.length); - bytes = hashfunc(bytes); - bytes[6] = bytes[6] & 0x0f | version; - bytes[8] = bytes[8] & 0x3f | 0x80; - - if (buf) { - offset = offset || 0; - - for (let i = 0; i < 16; ++i) { - buf[offset + i] = bytes[i]; - } - - return buf; - } - - return (0, _stringify.default)(bytes); - } // Function#name is not settable on some platforms (#270) - - - try { - generateUUID.name = name; // eslint-disable-next-line no-empty - } catch (err) {} // For CommonJS default export support - - - generateUUID.DNS = DNS; - generateUUID.URL = URL; - return generateUUID; -} - -/***/ }), - -/***/ 5393: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - -"use strict"; - - -Object.defineProperty(exports, "__esModule", ({ - value: true -})); -exports["default"] = void 0; - -var _rng = _interopRequireDefault(__nccwpck_require__(979)); - -var _stringify = _interopRequireDefault(__nccwpck_require__(4794)); - -function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } - -function v4(options, buf, offset) { - options = options || {}; - - const rnds = options.random || (options.rng || _rng.default)(); // Per 4.4, set bits for version and `clock_seq_hi_and_reserved` - - - rnds[6] = rnds[6] & 0x0f | 0x40; - rnds[8] = rnds[8] & 0x3f | 0x80; // Copy bytes to buffer, if provided - - if (buf) { - offset = offset || 0; - - for (let i = 0; i < 16; ++i) { - buf[offset + i] = rnds[i]; - } - - return buf; - } - - return (0, _stringify.default)(rnds); -} - -var _default = v4; -exports["default"] = _default; - -/***/ }), - -/***/ 8788: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - -"use strict"; - - -Object.defineProperty(exports, "__esModule", ({ - value: true -})); -exports["default"] = void 0; - -var _v = _interopRequireDefault(__nccwpck_require__(4085)); - -var _sha = _interopRequireDefault(__nccwpck_require__(6631)); - -function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } - -const v5 = (0, _v.default)('v5', 0x50, _sha.default); -var _default = v5; -exports["default"] = _default; - -/***/ }), - -/***/ 4418: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - -"use strict"; - - -Object.defineProperty(exports, "__esModule", ({ - value: true -})); -exports["default"] = void 0; - -var _regex = _interopRequireDefault(__nccwpck_require__(690)); - -function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } - -function validate(uuid) { - return typeof uuid === 'string' && _regex.default.test(uuid); -} - -var _default = validate; -exports["default"] = _default; - -/***/ }), - -/***/ 7909: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - -"use strict"; - - -Object.defineProperty(exports, "__esModule", ({ - value: true -})); -exports["default"] = void 0; - -var _validate = _interopRequireDefault(__nccwpck_require__(4418)); - -function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } - -function version(uuid) { - if (!(0, _validate.default)(uuid)) { - throw TypeError('Invalid UUID'); - } - - return parseInt(uuid.substr(14, 1), 16); -} - -var _default = version; -exports["default"] = _default; - -/***/ }), - -/***/ 7094: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - -"use strict"; - - -Object.defineProperty(exports, "__esModule", ({ value: true })); - -var logger$1 = __nccwpck_require__(3233); -var abortController = __nccwpck_require__(2557); - -// Copyright (c) Microsoft Corporation. -/** - * The `@azure/logger` configuration for this package. - * @internal - */ -const logger = logger$1.createClientLogger("core-lro"); - -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. -/** - * The default time interval to wait before sending the next polling request. - */ -const POLL_INTERVAL_IN_MS = 2000; -/** - * The closed set of terminal states. - */ -const terminalStates = ["succeeded", "canceled", "failed"]; - -// Copyright (c) Microsoft Corporation. -/** - * Deserializes the state - */ -function deserializeState(serializedState) { - try { - return JSON.parse(serializedState).state; - } - catch (e) { - throw new Error(`Unable to deserialize input state: ${serializedState}`); - } -} -function setStateError(inputs) { - const { state, stateProxy } = inputs; - return (error) => { - stateProxy.setError(state, error); - stateProxy.setFailed(state); - throw error; - }; -} -function processOperationStatus(result) { - const { state, stateProxy, status, isDone, processResult, response, setErrorAsResult } = result; - switch (status) { - case "succeeded": { - stateProxy.setSucceeded(state); - break; - } - case "failed": { - stateProxy.setError(state, new Error(`The long-running operation has failed`)); - stateProxy.setFailed(state); - break; - } - case "canceled": { - stateProxy.setCanceled(state); - break; - } - } - if ((isDone === null || isDone === void 0 ? void 0 : isDone(response, state)) || - (isDone === undefined && - ["succeeded", "canceled"].concat(setErrorAsResult ? [] : ["failed"]).includes(status))) { - stateProxy.setResult(state, buildResult({ - response, - state, - processResult, - })); - } -} -function buildResult(inputs) { - const { processResult, response, state } = inputs; - return processResult ? processResult(response, state) : response; -} -/** - * Initiates the long-running operation. - */ -async function initOperation(inputs) { - const { init, stateProxy, processResult, getOperationStatus, withOperationLocation, setErrorAsResult, } = inputs; - const { operationLocation, resourceLocation, metadata, response } = await init(); - if (operationLocation) - withOperationLocation === null || withOperationLocation === void 0 ? void 0 : withOperationLocation(operationLocation, false); - const config = { - metadata, - operationLocation, - resourceLocation, - }; - logger.verbose(`LRO: Operation description:`, config); - const state = stateProxy.initState(config); - const status = getOperationStatus({ response, state, operationLocation }); - processOperationStatus({ state, status, stateProxy, response, setErrorAsResult, processResult }); - return state; -} -async function pollOperationHelper(inputs) { - const { poll, state, stateProxy, operationLocation, getOperationStatus, getResourceLocation, options, } = inputs; - const response = await poll(operationLocation, options).catch(setStateError({ - state, - stateProxy, - })); - const status = getOperationStatus(response, state); - logger.verbose(`LRO: Status:\n\tPolling from: ${state.config.operationLocation}\n\tOperation status: ${status}\n\tPolling status: ${terminalStates.includes(status) ? "Stopped" : "Running"}`); - if (status === "succeeded") { - const resourceLocation = getResourceLocation(response, state); - if (resourceLocation !== undefined) { - return { - response: await poll(resourceLocation).catch(setStateError({ state, stateProxy })), - status, - }; - } - } - return { response, status }; -} -/** Polls the long-running operation. */ -async function pollOperation(inputs) { - const { poll, state, stateProxy, options, getOperationStatus, getResourceLocation, getOperationLocation, withOperationLocation, getPollingInterval, processResult, updateState, setDelay, isDone, setErrorAsResult, } = inputs; - const { operationLocation } = state.config; - if (operationLocation !== undefined) { - const { response, status } = await pollOperationHelper({ - poll, - getOperationStatus, - state, - stateProxy, - operationLocation, - getResourceLocation, - options, - }); - processOperationStatus({ - status, - response, - state, - stateProxy, - isDone, - processResult, - setErrorAsResult, - }); - if (!terminalStates.includes(status)) { - const intervalInMs = getPollingInterval === null || getPollingInterval === void 0 ? void 0 : getPollingInterval(response); - if (intervalInMs) - setDelay(intervalInMs); - const location = getOperationLocation === null || getOperationLocation === void 0 ? void 0 : getOperationLocation(response, state); - if (location !== undefined) { - const isUpdated = operationLocation !== location; - state.config.operationLocation = location; - withOperationLocation === null || withOperationLocation === void 0 ? void 0 : withOperationLocation(location, isUpdated); - } - else - withOperationLocation === null || withOperationLocation === void 0 ? void 0 : withOperationLocation(operationLocation, false); - } - updateState === null || updateState === void 0 ? void 0 : updateState(state, response); - } -} - -// Copyright (c) Microsoft Corporation. -function getOperationLocationPollingUrl(inputs) { - const { azureAsyncOperation, operationLocation } = inputs; - return operationLocation !== null && operationLocation !== void 0 ? operationLocation : azureAsyncOperation; -} -function getLocationHeader(rawResponse) { - return rawResponse.headers["location"]; -} -function getOperationLocationHeader(rawResponse) { - return rawResponse.headers["operation-location"]; -} -function getAzureAsyncOperationHeader(rawResponse) { - return rawResponse.headers["azure-asyncoperation"]; -} -function findResourceLocation(inputs) { - const { location, requestMethod, requestPath, resourceLocationConfig } = inputs; - switch (requestMethod) { - case "PUT": { - return requestPath; - } - case "DELETE": { - return undefined; - } - default: { - switch (resourceLocationConfig) { - case "azure-async-operation": { - return undefined; - } - case "original-uri": { - return requestPath; - } - case "location": - default: { - return location; - } - } - } - } -} -function inferLroMode(inputs) { - const { rawResponse, requestMethod, requestPath, resourceLocationConfig } = inputs; - const operationLocation = getOperationLocationHeader(rawResponse); - const azureAsyncOperation = getAzureAsyncOperationHeader(rawResponse); - const pollingUrl = getOperationLocationPollingUrl({ operationLocation, azureAsyncOperation }); - const location = getLocationHeader(rawResponse); - const normalizedRequestMethod = requestMethod === null || requestMethod === void 0 ? void 0 : requestMethod.toLocaleUpperCase(); - if (pollingUrl !== undefined) { - return { - mode: "OperationLocation", - operationLocation: pollingUrl, - resourceLocation: findResourceLocation({ - requestMethod: normalizedRequestMethod, - location, - requestPath, - resourceLocationConfig, - }), - }; - } - else if (location !== undefined) { - return { - mode: "ResourceLocation", - operationLocation: location, - }; - } - else if (normalizedRequestMethod === "PUT" && requestPath) { - return { - mode: "Body", - operationLocation: requestPath, - }; - } - else { - return undefined; - } -} -function transformStatus(inputs) { - const { status, statusCode } = inputs; - if (typeof status !== "string" && status !== undefined) { - throw new Error(`Polling was unsuccessful. Expected status to have a string value or no value but it has instead: ${status}. This doesn't necessarily indicate the operation has failed. Check your Azure subscription or resource status for more information.`); - } - switch (status === null || status === void 0 ? void 0 : status.toLocaleLowerCase()) { - case undefined: - return toOperationStatus(statusCode); - case "succeeded": - return "succeeded"; - case "failed": - return "failed"; - case "running": - case "accepted": - case "started": - case "canceling": - case "cancelling": - return "running"; - case "canceled": - case "cancelled": - return "canceled"; - default: { - logger.warning(`LRO: unrecognized operation status: ${status}`); - return status; - } - } -} -function getStatus(rawResponse) { - var _a; - const { status } = (_a = rawResponse.body) !== null && _a !== void 0 ? _a : {}; - return transformStatus({ status, statusCode: rawResponse.statusCode }); -} -function getProvisioningState(rawResponse) { - var _a, _b; - const { properties, provisioningState } = (_a = rawResponse.body) !== null && _a !== void 0 ? _a : {}; - const status = (_b = properties === null || properties === void 0 ? void 0 : properties.provisioningState) !== null && _b !== void 0 ? _b : provisioningState; - return transformStatus({ status, statusCode: rawResponse.statusCode }); -} -function toOperationStatus(statusCode) { - if (statusCode === 202) { - return "running"; - } - else if (statusCode < 300) { - return "succeeded"; - } - else { - return "failed"; - } -} -function parseRetryAfter({ rawResponse }) { - const retryAfter = rawResponse.headers["retry-after"]; - if (retryAfter !== undefined) { - // Retry-After header value is either in HTTP date format, or in seconds - const retryAfterInSeconds = parseInt(retryAfter); - return isNaN(retryAfterInSeconds) - ? calculatePollingIntervalFromDate(new Date(retryAfter)) - : retryAfterInSeconds * 1000; - } - return undefined; -} -function calculatePollingIntervalFromDate(retryAfterDate) { - const timeNow = Math.floor(new Date().getTime()); - const retryAfterTime = retryAfterDate.getTime(); - if (timeNow < retryAfterTime) { - return retryAfterTime - timeNow; - } - return undefined; -} -function getStatusFromInitialResponse(inputs) { - const { response, state, operationLocation } = inputs; - function helper() { - var _a; - const mode = (_a = state.config.metadata) === null || _a === void 0 ? void 0 : _a["mode"]; - switch (mode) { - case undefined: - return toOperationStatus(response.rawResponse.statusCode); - case "Body": - return getOperationStatus(response, state); - default: - return "running"; - } - } - const status = helper(); - return status === "running" && operationLocation === undefined ? "succeeded" : status; -} -/** - * Initiates the long-running operation. - */ -async function initHttpOperation(inputs) { - const { stateProxy, resourceLocationConfig, processResult, lro, setErrorAsResult } = inputs; - return initOperation({ - init: async () => { - const response = await lro.sendInitialRequest(); - const config = inferLroMode({ - rawResponse: response.rawResponse, - requestPath: lro.requestPath, - requestMethod: lro.requestMethod, - resourceLocationConfig, - }); - return Object.assign({ response, operationLocation: config === null || config === void 0 ? void 0 : config.operationLocation, resourceLocation: config === null || config === void 0 ? void 0 : config.resourceLocation }, ((config === null || config === void 0 ? void 0 : config.mode) ? { metadata: { mode: config.mode } } : {})); - }, - stateProxy, - processResult: processResult - ? ({ flatResponse }, state) => processResult(flatResponse, state) - : ({ flatResponse }) => flatResponse, - getOperationStatus: getStatusFromInitialResponse, - setErrorAsResult, - }); -} -function getOperationLocation({ rawResponse }, state) { - var _a; - const mode = (_a = state.config.metadata) === null || _a === void 0 ? void 0 : _a["mode"]; - switch (mode) { - case "OperationLocation": { - return getOperationLocationPollingUrl({ - operationLocation: getOperationLocationHeader(rawResponse), - azureAsyncOperation: getAzureAsyncOperationHeader(rawResponse), - }); - } - case "ResourceLocation": { - return getLocationHeader(rawResponse); - } - case "Body": - default: { - return undefined; - } - } -} -function getOperationStatus({ rawResponse }, state) { - var _a; - const mode = (_a = state.config.metadata) === null || _a === void 0 ? void 0 : _a["mode"]; - switch (mode) { - case "OperationLocation": { - return getStatus(rawResponse); - } - case "ResourceLocation": { - return toOperationStatus(rawResponse.statusCode); - } - case "Body": { - return getProvisioningState(rawResponse); - } - default: - throw new Error(`Internal error: Unexpected operation mode: ${mode}`); - } -} -function getResourceLocation({ flatResponse }, state) { - if (typeof flatResponse === "object") { - const resourceLocation = flatResponse.resourceLocation; - if (resourceLocation !== undefined) { - state.config.resourceLocation = resourceLocation; - } - } - return state.config.resourceLocation; -} -/** Polls the long-running operation. */ -async function pollHttpOperation(inputs) { - const { lro, stateProxy, options, processResult, updateState, setDelay, state, setErrorAsResult, } = inputs; - return pollOperation({ - state, - stateProxy, - setDelay, - processResult: processResult - ? ({ flatResponse }, inputState) => processResult(flatResponse, inputState) - : ({ flatResponse }) => flatResponse, - updateState, - getPollingInterval: parseRetryAfter, - getOperationLocation, - getOperationStatus, - getResourceLocation, - options, - /** - * The expansion here is intentional because `lro` could be an object that - * references an inner this, so we need to preserve a reference to it. - */ - poll: async (location, inputOptions) => lro.sendPollRequest(location, inputOptions), - setErrorAsResult, - }); -} - -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. -/** - * Map an optional value through a function - * @internal - */ -const maybemap = (value, f) => value === undefined ? undefined : f(value); -const INTERRUPTED = new Error("The poller is already stopped"); -/** - * A promise that delays resolution until a certain amount of time (in milliseconds) has passed, with facilities for - * robust cancellation. - * - * ### Example: - * - * ```javascript - * let toCancel; - * - * // Wait 20 seconds, and optionally allow the function to be cancelled. - * await delayMs(20000, (cancel) => { toCancel = cancel }); - * - * // ... if `toCancel` is called before the 20 second timer expires, then the delayMs promise will reject. - * ``` - * - * @internal - * @param ms - the number of milliseconds to wait before resolving - * @param cb - a callback that can provide the caller with a cancellation function - */ -function delayMs(ms) { - let aborted = false; - let toReject; - return Object.assign(new Promise((resolve, reject) => { - let token; - toReject = () => { - maybemap(token, clearTimeout); - reject(INTERRUPTED); - }; - // In the rare case that the operation is _already_ aborted, we will reject instantly. This could happen, for - // example, if the user calls the cancellation function immediately without yielding execution. - if (aborted) { - toReject(); - } - else { - token = setTimeout(resolve, ms); - } - }), { - cancel: () => { - aborted = true; - toReject === null || toReject === void 0 ? void 0 : toReject(); - }, - }); -} - -// Copyright (c) Microsoft Corporation. -const createStateProxy$1 = () => ({ - /** - * The state at this point is created to be of type OperationState. - * It will be updated later to be of type TState when the - * customer-provided callback, `updateState`, is called during polling. - */ - initState: (config) => ({ status: "running", config }), - setCanceled: (state) => (state.status = "canceled"), - setError: (state, error) => (state.error = error), - setResult: (state, result) => (state.result = result), - setRunning: (state) => (state.status = "running"), - setSucceeded: (state) => (state.status = "succeeded"), - setFailed: (state) => (state.status = "failed"), - getError: (state) => state.error, - getResult: (state) => state.result, - isCanceled: (state) => state.status === "canceled", - isFailed: (state) => state.status === "failed", - isRunning: (state) => state.status === "running", - isSucceeded: (state) => state.status === "succeeded", -}); -/** - * Returns a poller factory. - */ -function buildCreatePoller(inputs) { - const { getOperationLocation, getStatusFromInitialResponse, getStatusFromPollResponse, getResourceLocation, getPollingInterval, resolveOnUnsuccessful, } = inputs; - return async ({ init, poll }, options) => { - const { processResult, updateState, withOperationLocation: withOperationLocationCallback, intervalInMs = POLL_INTERVAL_IN_MS, restoreFrom, } = options || {}; - const stateProxy = createStateProxy$1(); - const withOperationLocation = withOperationLocationCallback - ? (() => { - let called = false; - return (operationLocation, isUpdated) => { - if (isUpdated) - withOperationLocationCallback(operationLocation); - else if (!called) - withOperationLocationCallback(operationLocation); - called = true; - }; - })() - : undefined; - const state = restoreFrom - ? deserializeState(restoreFrom) - : await initOperation({ - init, - stateProxy, - processResult, - getOperationStatus: getStatusFromInitialResponse, - withOperationLocation, - setErrorAsResult: !resolveOnUnsuccessful, - }); - let resultPromise; - let cancelJob; - const abortController$1 = new abortController.AbortController(); - const handlers = new Map(); - const handleProgressEvents = async () => handlers.forEach((h) => h(state)); - let currentPollIntervalInMs = intervalInMs; - const poller = { - getOperationState: () => state, - getResult: () => state.result, - isDone: () => ["succeeded", "failed", "canceled"].includes(state.status), - isStopped: () => resultPromise === undefined, - stopPolling: () => { - abortController$1.abort(); - cancelJob === null || cancelJob === void 0 ? void 0 : cancelJob(); - }, - toString: () => JSON.stringify({ - state, - }), - onProgress: (callback) => { - const s = Symbol(); - handlers.set(s, callback); - return () => handlers.delete(s); - }, - pollUntilDone: (pollOptions) => (resultPromise !== null && resultPromise !== void 0 ? resultPromise : (resultPromise = (async () => { - const { abortSignal: inputAbortSignal } = pollOptions || {}; - const { signal: abortSignal } = inputAbortSignal - ? new abortController.AbortController([inputAbortSignal, abortController$1.signal]) - : abortController$1; - if (!poller.isDone()) { - await poller.poll({ abortSignal }); - while (!poller.isDone()) { - const delay = delayMs(currentPollIntervalInMs); - cancelJob = delay.cancel; - await delay; - await poller.poll({ abortSignal }); - } - } - switch (state.status) { - case "succeeded": { - return poller.getResult(); - } - case "canceled": { - if (!resolveOnUnsuccessful) - throw new Error("Operation was canceled"); - return poller.getResult(); - } - case "failed": { - if (!resolveOnUnsuccessful) - throw state.error; - return poller.getResult(); - } - case "notStarted": - case "running": { - // Unreachable - throw new Error(`polling completed without succeeding or failing`); - } - } - })().finally(() => { - resultPromise = undefined; - }))), - async poll(pollOptions) { - await pollOperation({ - poll, - state, - stateProxy, - getOperationLocation, - withOperationLocation, - getPollingInterval, - getOperationStatus: getStatusFromPollResponse, - getResourceLocation, - processResult, - updateState, - options: pollOptions, - setDelay: (pollIntervalInMs) => { - currentPollIntervalInMs = pollIntervalInMs; - }, - setErrorAsResult: !resolveOnUnsuccessful, - }); - await handleProgressEvents(); - if (state.status === "canceled" && !resolveOnUnsuccessful) { - throw new Error("Operation was canceled"); - } - if (state.status === "failed" && !resolveOnUnsuccessful) { - throw state.error; - } - }, - }; - return poller; - }; -} - -// Copyright (c) Microsoft Corporation. -/** - * Creates a poller that can be used to poll a long-running operation. - * @param lro - Description of the long-running operation - * @param options - options to configure the poller - * @returns an initialized poller - */ -async function createHttpPoller(lro, options) { - const { resourceLocationConfig, intervalInMs, processResult, restoreFrom, updateState, withOperationLocation, resolveOnUnsuccessful = false, } = options || {}; - return buildCreatePoller({ - getStatusFromInitialResponse, - getStatusFromPollResponse: getOperationStatus, - getOperationLocation, - getResourceLocation, - getPollingInterval: parseRetryAfter, - resolveOnUnsuccessful, - })({ - init: async () => { - const response = await lro.sendInitialRequest(); - const config = inferLroMode({ - rawResponse: response.rawResponse, - requestPath: lro.requestPath, - requestMethod: lro.requestMethod, - resourceLocationConfig, - }); - return Object.assign({ response, operationLocation: config === null || config === void 0 ? void 0 : config.operationLocation, resourceLocation: config === null || config === void 0 ? void 0 : config.resourceLocation }, ((config === null || config === void 0 ? void 0 : config.mode) ? { metadata: { mode: config.mode } } : {})); - }, - poll: lro.sendPollRequest, - }, { - intervalInMs, - withOperationLocation, - restoreFrom, - updateState, - processResult: processResult - ? ({ flatResponse }, state) => processResult(flatResponse, state) - : ({ flatResponse }) => flatResponse, - }); -} - -// Copyright (c) Microsoft Corporation. -const createStateProxy = () => ({ - initState: (config) => ({ config, isStarted: true }), - setCanceled: (state) => (state.isCancelled = true), - setError: (state, error) => (state.error = error), - setResult: (state, result) => (state.result = result), - setRunning: (state) => (state.isStarted = true), - setSucceeded: (state) => (state.isCompleted = true), - setFailed: () => { - /** empty body */ - }, - getError: (state) => state.error, - getResult: (state) => state.result, - isCanceled: (state) => !!state.isCancelled, - isFailed: (state) => !!state.error, - isRunning: (state) => !!state.isStarted, - isSucceeded: (state) => Boolean(state.isCompleted && !state.isCancelled && !state.error), -}); -class GenericPollOperation { - constructor(state, lro, setErrorAsResult, lroResourceLocationConfig, processResult, updateState, isDone) { - this.state = state; - this.lro = lro; - this.setErrorAsResult = setErrorAsResult; - this.lroResourceLocationConfig = lroResourceLocationConfig; - this.processResult = processResult; - this.updateState = updateState; - this.isDone = isDone; - } - setPollerConfig(pollerConfig) { - this.pollerConfig = pollerConfig; - } - async update(options) { - var _a; - const stateProxy = createStateProxy(); - if (!this.state.isStarted) { - this.state = Object.assign(Object.assign({}, this.state), (await initHttpOperation({ - lro: this.lro, - stateProxy, - resourceLocationConfig: this.lroResourceLocationConfig, - processResult: this.processResult, - setErrorAsResult: this.setErrorAsResult, - }))); - } - const updateState = this.updateState; - const isDone = this.isDone; - if (!this.state.isCompleted && this.state.error === undefined) { - await pollHttpOperation({ - lro: this.lro, - state: this.state, - stateProxy, - processResult: this.processResult, - updateState: updateState - ? (state, { rawResponse }) => updateState(state, rawResponse) - : undefined, - isDone: isDone - ? ({ flatResponse }, state) => isDone(flatResponse, state) - : undefined, - options, - setDelay: (intervalInMs) => { - this.pollerConfig.intervalInMs = intervalInMs; - }, - setErrorAsResult: this.setErrorAsResult, - }); - } - (_a = options === null || options === void 0 ? void 0 : options.fireProgress) === null || _a === void 0 ? void 0 : _a.call(options, this.state); - return this; - } - async cancel() { - logger.error("`cancelOperation` is deprecated because it wasn't implemented"); - return this; - } - /** - * Serializes the Poller operation. - */ - toString() { - return JSON.stringify({ - state: this.state, - }); - } -} - -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. -/** - * When a poller is manually stopped through the `stopPolling` method, - * the poller will be rejected with an instance of the PollerStoppedError. - */ -class PollerStoppedError extends Error { - constructor(message) { - super(message); - this.name = "PollerStoppedError"; - Object.setPrototypeOf(this, PollerStoppedError.prototype); - } -} -/** - * When the operation is cancelled, the poller will be rejected with an instance - * of the PollerCancelledError. - */ -class PollerCancelledError extends Error { - constructor(message) { - super(message); - this.name = "PollerCancelledError"; - Object.setPrototypeOf(this, PollerCancelledError.prototype); - } -} -/** - * A class that represents the definition of a program that polls through consecutive requests - * until it reaches a state of completion. - * - * A poller can be executed manually, by polling request by request by calling to the `poll()` method repeatedly, until its operation is completed. - * It also provides a way to wait until the operation completes, by calling `pollUntilDone()` and waiting until the operation finishes. - * Pollers can also request the cancellation of the ongoing process to whom is providing the underlying long running operation. - * - * ```ts - * const poller = new MyPoller(); - * - * // Polling just once: - * await poller.poll(); - * - * // We can try to cancel the request here, by calling: - * // - * // await poller.cancelOperation(); - * // - * - * // Getting the final result: - * const result = await poller.pollUntilDone(); - * ``` - * - * The Poller is defined by two types, a type representing the state of the poller, which - * must include a basic set of properties from `PollOperationState`, - * and a return type defined by `TResult`, which can be anything. - * - * The Poller class implements the `PollerLike` interface, which allows poller implementations to avoid having - * to export the Poller's class directly, and instead only export the already instantiated poller with the PollerLike type. - * - * ```ts - * class Client { - * public async makePoller: PollerLike { - * const poller = new MyPoller({}); - * // It might be preferred to return the poller after the first request is made, - * // so that some information can be obtained right away. - * await poller.poll(); - * return poller; - * } - * } - * - * const poller: PollerLike = myClient.makePoller(); - * ``` - * - * A poller can be created through its constructor, then it can be polled until it's completed. - * At any point in time, the state of the poller can be obtained without delay through the getOperationState method. - * At any point in time, the intermediate forms of the result type can be requested without delay. - * Once the underlying operation is marked as completed, the poller will stop and the final value will be returned. - * - * ```ts - * const poller = myClient.makePoller(); - * const state: MyOperationState = poller.getOperationState(); - * - * // The intermediate result can be obtained at any time. - * const result: MyResult | undefined = poller.getResult(); - * - * // The final result can only be obtained after the poller finishes. - * const result: MyResult = await poller.pollUntilDone(); - * ``` - * - */ -// eslint-disable-next-line no-use-before-define -class Poller { - /** - * A poller needs to be initialized by passing in at least the basic properties of the `PollOperation`. - * - * When writing an implementation of a Poller, this implementation needs to deal with the initialization - * of any custom state beyond the basic definition of the poller. The basic poller assumes that the poller's - * operation has already been defined, at least its basic properties. The code below shows how to approach - * the definition of the constructor of a new custom poller. - * - * ```ts - * export class MyPoller extends Poller { - * constructor({ - * // Anything you might need outside of the basics - * }) { - * let state: MyOperationState = { - * privateProperty: private, - * publicProperty: public, - * }; - * - * const operation = { - * state, - * update, - * cancel, - * toString - * } - * - * // Sending the operation to the parent's constructor. - * super(operation); - * - * // You can assign more local properties here. - * } - * } - * ``` - * - * Inside of this constructor, a new promise is created. This will be used to - * tell the user when the poller finishes (see `pollUntilDone()`). The promise's - * resolve and reject methods are also used internally to control when to resolve - * or reject anyone waiting for the poller to finish. - * - * The constructor of a custom implementation of a poller is where any serialized version of - * a previous poller's operation should be deserialized into the operation sent to the - * base constructor. For example: - * - * ```ts - * export class MyPoller extends Poller { - * constructor( - * baseOperation: string | undefined - * ) { - * let state: MyOperationState = {}; - * if (baseOperation) { - * state = { - * ...JSON.parse(baseOperation).state, - * ...state - * }; - * } - * const operation = { - * state, - * // ... - * } - * super(operation); - * } - * } - * ``` - * - * @param operation - Must contain the basic properties of `PollOperation`. - */ - constructor(operation) { - /** controls whether to throw an error if the operation failed or was canceled. */ - this.resolveOnUnsuccessful = false; - this.stopped = true; - this.pollProgressCallbacks = []; - this.operation = operation; - this.promise = new Promise((resolve, reject) => { - this.resolve = resolve; - this.reject = reject; - }); - // This prevents the UnhandledPromiseRejectionWarning in node.js from being thrown. - // The above warning would get thrown if `poller.poll` is called, it returns an error, - // and pullUntilDone did not have a .catch or await try/catch on it's return value. - this.promise.catch(() => { - /* intentionally blank */ - }); - } - /** - * Starts a loop that will break only if the poller is done - * or if the poller is stopped. - */ - async startPolling(pollOptions = {}) { - if (this.stopped) { - this.stopped = false; - } - while (!this.isStopped() && !this.isDone()) { - await this.poll(pollOptions); - await this.delay(); - } - } - /** - * pollOnce does one polling, by calling to the update method of the underlying - * poll operation to make any relevant change effective. - * - * It only optionally receives an object with an abortSignal property, from \@azure/abort-controller's AbortSignalLike. - * - * @param options - Optional properties passed to the operation's update method. - */ - async pollOnce(options = {}) { - if (!this.isDone()) { - this.operation = await this.operation.update({ - abortSignal: options.abortSignal, - fireProgress: this.fireProgress.bind(this), - }); - } - this.processUpdatedState(); - } - /** - * fireProgress calls the functions passed in via onProgress the method of the poller. - * - * It loops over all of the callbacks received from onProgress, and executes them, sending them - * the current operation state. - * - * @param state - The current operation state. - */ - fireProgress(state) { - for (const callback of this.pollProgressCallbacks) { - callback(state); - } - } - /** - * Invokes the underlying operation's cancel method. - */ - async cancelOnce(options = {}) { - this.operation = await this.operation.cancel(options); - } - /** - * Returns a promise that will resolve once a single polling request finishes. - * It does this by calling the update method of the Poller's operation. - * - * It only optionally receives an object with an abortSignal property, from \@azure/abort-controller's AbortSignalLike. - * - * @param options - Optional properties passed to the operation's update method. - */ - poll(options = {}) { - if (!this.pollOncePromise) { - this.pollOncePromise = this.pollOnce(options); - const clearPollOncePromise = () => { - this.pollOncePromise = undefined; - }; - this.pollOncePromise.then(clearPollOncePromise, clearPollOncePromise).catch(this.reject); - } - return this.pollOncePromise; - } - processUpdatedState() { - if (this.operation.state.error) { - this.stopped = true; - if (!this.resolveOnUnsuccessful) { - this.reject(this.operation.state.error); - throw this.operation.state.error; - } - } - if (this.operation.state.isCancelled) { - this.stopped = true; - if (!this.resolveOnUnsuccessful) { - const error = new PollerCancelledError("Operation was canceled"); - this.reject(error); - throw error; - } - } - if (this.isDone() && this.resolve) { - // If the poller has finished polling, this means we now have a result. - // However, it can be the case that TResult is instantiated to void, so - // we are not expecting a result anyway. To assert that we might not - // have a result eventually after finishing polling, we cast the result - // to TResult. - this.resolve(this.getResult()); - } - } - /** - * Returns a promise that will resolve once the underlying operation is completed. - */ - async pollUntilDone(pollOptions = {}) { - if (this.stopped) { - this.startPolling(pollOptions).catch(this.reject); - } - // This is needed because the state could have been updated by - // `cancelOperation`, e.g. the operation is canceled or an error occurred. - this.processUpdatedState(); - return this.promise; - } - /** - * Invokes the provided callback after each polling is completed, - * sending the current state of the poller's operation. - * - * It returns a method that can be used to stop receiving updates on the given callback function. - */ - onProgress(callback) { - this.pollProgressCallbacks.push(callback); - return () => { - this.pollProgressCallbacks = this.pollProgressCallbacks.filter((c) => c !== callback); - }; - } - /** - * Returns true if the poller has finished polling. - */ - isDone() { - const state = this.operation.state; - return Boolean(state.isCompleted || state.isCancelled || state.error); - } - /** - * Stops the poller from continuing to poll. - */ - stopPolling() { - if (!this.stopped) { - this.stopped = true; - if (this.reject) { - this.reject(new PollerStoppedError("This poller is already stopped")); - } - } - } - /** - * Returns true if the poller is stopped. - */ - isStopped() { - return this.stopped; - } - /** - * Attempts to cancel the underlying operation. - * - * It only optionally receives an object with an abortSignal property, from \@azure/abort-controller's AbortSignalLike. - * - * If it's called again before it finishes, it will throw an error. - * - * @param options - Optional properties passed to the operation's update method. - */ - cancelOperation(options = {}) { - if (!this.cancelPromise) { - this.cancelPromise = this.cancelOnce(options); - } - else if (options.abortSignal) { - throw new Error("A cancel request is currently pending"); - } - return this.cancelPromise; - } - /** - * Returns the state of the operation. - * - * Even though TState will be the same type inside any of the methods of any extension of the Poller class, - * implementations of the pollers can customize what's shared with the public by writing their own - * version of the `getOperationState` method, and by defining two types, one representing the internal state of the poller - * and a public type representing a safe to share subset of the properties of the internal state. - * Their definition of getOperationState can then return their public type. - * - * Example: - * - * ```ts - * // Let's say we have our poller's operation state defined as: - * interface MyOperationState extends PollOperationState { - * privateProperty?: string; - * publicProperty?: string; - * } - * - * // To allow us to have a true separation of public and private state, we have to define another interface: - * interface PublicState extends PollOperationState { - * publicProperty?: string; - * } - * - * // Then, we define our Poller as follows: - * export class MyPoller extends Poller { - * // ... More content is needed here ... - * - * public getOperationState(): PublicState { - * const state: PublicState = this.operation.state; - * return { - * // Properties from PollOperationState - * isStarted: state.isStarted, - * isCompleted: state.isCompleted, - * isCancelled: state.isCancelled, - * error: state.error, - * result: state.result, - * - * // The only other property needed by PublicState. - * publicProperty: state.publicProperty - * } - * } - * } - * ``` - * - * You can see this in the tests of this repository, go to the file: - * `../test/utils/testPoller.ts` - * and look for the getOperationState implementation. - */ - getOperationState() { - return this.operation.state; - } - /** - * Returns the result value of the operation, - * regardless of the state of the poller. - * It can return undefined or an incomplete form of the final TResult value - * depending on the implementation. - */ - getResult() { - const state = this.operation.state; - return state.result; - } - /** - * Returns a serialized version of the poller's operation - * by invoking the operation's toString method. - */ - toString() { - return this.operation.toString(); - } -} - -// Copyright (c) Microsoft Corporation. -/** - * The LRO Engine, a class that performs polling. - */ -class LroEngine extends Poller { - constructor(lro, options) { - const { intervalInMs = POLL_INTERVAL_IN_MS, resumeFrom, resolveOnUnsuccessful = false, isDone, lroResourceLocationConfig, processResult, updateState, } = options || {}; - const state = resumeFrom - ? deserializeState(resumeFrom) - : {}; - const operation = new GenericPollOperation(state, lro, !resolveOnUnsuccessful, lroResourceLocationConfig, processResult, updateState, isDone); - super(operation); - this.resolveOnUnsuccessful = resolveOnUnsuccessful; - this.config = { intervalInMs: intervalInMs }; - operation.setPollerConfig(this.config); - } - /** - * The method used by the poller to wait before attempting to update its operation. - */ - delay() { - return new Promise((resolve) => setTimeout(() => resolve(), this.config.intervalInMs)); - } -} - -exports.LroEngine = LroEngine; -exports.Poller = Poller; -exports.PollerCancelledError = PollerCancelledError; -exports.PollerStoppedError = PollerStoppedError; -exports.createHttpPoller = createHttpPoller; -//# sourceMappingURL=index.js.map - - -/***/ }), - -/***/ 4559: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - -"use strict"; - - -Object.defineProperty(exports, "__esModule", ({ value: true })); - -var tslib = __nccwpck_require__(4351); - -// Copyright (c) Microsoft Corporation. -/** - * returns an async iterator that iterates over results. It also has a `byPage` - * method that returns pages of items at once. - * - * @param pagedResult - an object that specifies how to get pages. - * @returns a paged async iterator that iterates over results. - */ -function getPagedAsyncIterator(pagedResult) { - var _a; - const iter = getItemAsyncIterator(pagedResult); - return { - next() { - return iter.next(); - }, - [Symbol.asyncIterator]() { - return this; - }, - byPage: (_a = pagedResult === null || pagedResult === void 0 ? void 0 : pagedResult.byPage) !== null && _a !== void 0 ? _a : ((settings) => { - const { continuationToken, maxPageSize } = settings !== null && settings !== void 0 ? settings : {}; - return getPageAsyncIterator(pagedResult, { - pageLink: continuationToken, - maxPageSize, - }); - }), - }; -} -function getItemAsyncIterator(pagedResult) { - return tslib.__asyncGenerator(this, arguments, function* getItemAsyncIterator_1() { - var e_1, _a; - const pages = getPageAsyncIterator(pagedResult); - const firstVal = yield tslib.__await(pages.next()); - // if the result does not have an array shape, i.e. TPage = TElement, then we return it as is - if (!Array.isArray(firstVal.value)) { - yield yield tslib.__await(firstVal.value); - // `pages` is of type `AsyncIterableIterator` but TPage = TElement in this case - yield tslib.__await(yield* tslib.__asyncDelegator(tslib.__asyncValues(pages))); - } - else { - yield tslib.__await(yield* tslib.__asyncDelegator(tslib.__asyncValues(firstVal.value))); - try { - for (var pages_1 = tslib.__asyncValues(pages), pages_1_1; pages_1_1 = yield tslib.__await(pages_1.next()), !pages_1_1.done;) { - const page = pages_1_1.value; - // pages is of type `AsyncIterableIterator` so `page` is of type `TPage`. In this branch, - // it must be the case that `TPage = TElement[]` - yield tslib.__await(yield* tslib.__asyncDelegator(tslib.__asyncValues(page))); - } - } - catch (e_1_1) { e_1 = { error: e_1_1 }; } - finally { - try { - if (pages_1_1 && !pages_1_1.done && (_a = pages_1.return)) yield tslib.__await(_a.call(pages_1)); - } - finally { if (e_1) throw e_1.error; } - } - } - }); -} -function getPageAsyncIterator(pagedResult, options = {}) { - return tslib.__asyncGenerator(this, arguments, function* getPageAsyncIterator_1() { - const { pageLink, maxPageSize } = options; - let response = yield tslib.__await(pagedResult.getPage(pageLink !== null && pageLink !== void 0 ? pageLink : pagedResult.firstPageLink, maxPageSize)); - yield yield tslib.__await(response.page); - while (response.nextPageLink) { - response = yield tslib.__await(pagedResult.getPage(response.nextPageLink, maxPageSize)); - yield yield tslib.__await(response.page); - } - }); -} - -exports.getPagedAsyncIterator = getPagedAsyncIterator; -//# sourceMappingURL=index.js.map - - -/***/ }), - -/***/ 4175: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - -"use strict"; - - -Object.defineProperty(exports, "__esModule", ({ value: true })); - -var api = __nccwpck_require__(5163); - -// Copyright (c) Microsoft Corporation. -(function (SpanKind) { - /** Default value. Indicates that the span is used internally. */ - SpanKind[SpanKind["INTERNAL"] = 0] = "INTERNAL"; - /** - * Indicates that the span covers server-side handling of an RPC or other - * remote request. - */ - SpanKind[SpanKind["SERVER"] = 1] = "SERVER"; - /** - * Indicates that the span covers the client-side wrapper around an RPC or - * other remote request. - */ - SpanKind[SpanKind["CLIENT"] = 2] = "CLIENT"; - /** - * Indicates that the span describes producer sending a message to a - * broker. Unlike client and server, there is no direct critical path latency - * relationship between producer and consumer spans. - */ - SpanKind[SpanKind["PRODUCER"] = 3] = "PRODUCER"; - /** - * Indicates that the span describes consumer receiving a message from a - * broker. Unlike client and server, there is no direct critical path latency - * relationship between producer and consumer spans. - */ - SpanKind[SpanKind["CONSUMER"] = 4] = "CONSUMER"; -})(exports.SpanKind || (exports.SpanKind = {})); -/** - * Return the span if one exists - * - * @param context - context to get span from - */ -function getSpan(context) { - return api.trace.getSpan(context); -} -/** - * Set the span on a context - * - * @param context - context to use as parent - * @param span - span to set active - */ -function setSpan(context, span) { - return api.trace.setSpan(context, span); -} -/** - * Wrap span context in a NoopSpan and set as span in a new - * context - * - * @param context - context to set active span on - * @param spanContext - span context to be wrapped - */ -function setSpanContext(context, spanContext) { - return api.trace.setSpanContext(context, spanContext); -} -/** - * Get the span context of the span if it exists. - * - * @param context - context to get values from - */ -function getSpanContext(context) { - return api.trace.getSpanContext(context); -} -/** - * Returns true of the given {@link SpanContext} is valid. - * A valid {@link SpanContext} is one which has a valid trace ID and span ID as per the spec. - * - * @param context - the {@link SpanContext} to validate. - * - * @returns true if the {@link SpanContext} is valid, false otherwise. - */ -function isSpanContextValid(context) { - return api.trace.isSpanContextValid(context); -} -function getTracer(name, version) { - return api.trace.getTracer(name || "azure/core-tracing", version); -} -/** Entrypoint for context API */ -const context = api.context; -(function (SpanStatusCode) { - /** - * The default status. - */ - SpanStatusCode[SpanStatusCode["UNSET"] = 0] = "UNSET"; - /** - * The operation has been validated by an Application developer or - * Operator to have completed successfully. - */ - SpanStatusCode[SpanStatusCode["OK"] = 1] = "OK"; - /** - * The operation contains an error. - */ - SpanStatusCode[SpanStatusCode["ERROR"] = 2] = "ERROR"; -})(exports.SpanStatusCode || (exports.SpanStatusCode = {})); - -// Copyright (c) Microsoft Corporation. -function isTracingDisabled() { - var _a; - if (typeof process === "undefined") { - // not supported in browser for now without polyfills - return false; - } - const azureTracingDisabledValue = (_a = process.env.AZURE_TRACING_DISABLED) === null || _a === void 0 ? void 0 : _a.toLowerCase(); - if (azureTracingDisabledValue === "false" || azureTracingDisabledValue === "0") { - return false; - } - return Boolean(azureTracingDisabledValue); -} -/** - * Creates a function that can be used to create spans using the global tracer. - * - * Usage: - * - * ```typescript - * // once - * const createSpan = createSpanFunction({ packagePrefix: "Azure.Data.AppConfiguration", namespace: "Microsoft.AppConfiguration" }); - * - * // in each operation - * const span = createSpan("deleteConfigurationSetting", operationOptions); - * // code... - * span.end(); - * ``` - * - * @hidden - * @param args - allows configuration of the prefix for each span as well as the az.namespace field. - */ -function createSpanFunction(args) { - return function (operationName, operationOptions) { - const tracer = getTracer(); - const tracingOptions = (operationOptions === null || operationOptions === void 0 ? void 0 : operationOptions.tracingOptions) || {}; - const spanOptions = Object.assign({ kind: exports.SpanKind.INTERNAL }, tracingOptions.spanOptions); - const spanName = args.packagePrefix ? `${args.packagePrefix}.${operationName}` : operationName; - let span; - if (isTracingDisabled()) { - span = api.trace.wrapSpanContext(api.INVALID_SPAN_CONTEXT); - } - else { - span = tracer.startSpan(spanName, spanOptions, tracingOptions.tracingContext); - } - if (args.namespace) { - span.setAttribute("az.namespace", args.namespace); - } - let newSpanOptions = tracingOptions.spanOptions || {}; - if (span.isRecording() && args.namespace) { - newSpanOptions = Object.assign(Object.assign({}, tracingOptions.spanOptions), { attributes: Object.assign(Object.assign({}, spanOptions.attributes), { "az.namespace": args.namespace }) }); - } - const newTracingOptions = Object.assign(Object.assign({}, tracingOptions), { spanOptions: newSpanOptions, tracingContext: setSpan(tracingOptions.tracingContext || context.active(), span) }); - const newOperationOptions = Object.assign(Object.assign({}, operationOptions), { tracingOptions: newTracingOptions }); - return { - span, - updatedOptions: newOperationOptions - }; - }; -} - -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. -const VERSION = "00"; -/** - * Generates a `SpanContext` given a `traceparent` header value. - * @param traceParent - Serialized span context data as a `traceparent` header value. - * @returns The `SpanContext` generated from the `traceparent` value. - */ -function extractSpanContextFromTraceParentHeader(traceParentHeader) { - const parts = traceParentHeader.split("-"); - if (parts.length !== 4) { - return; - } - const [version, traceId, spanId, traceOptions] = parts; - if (version !== VERSION) { - return; - } - const traceFlags = parseInt(traceOptions, 16); - const spanContext = { - spanId, - traceId, - traceFlags - }; - return spanContext; -} -/** - * Generates a `traceparent` value given a span context. - * @param spanContext - Contains context for a specific span. - * @returns The `spanContext` represented as a `traceparent` value. - */ -function getTraceParentHeader(spanContext) { - const missingFields = []; - if (!spanContext.traceId) { - missingFields.push("traceId"); - } - if (!spanContext.spanId) { - missingFields.push("spanId"); - } - if (missingFields.length) { - return; - } - const flags = spanContext.traceFlags || 0 /* NONE */; - const hexFlags = flags.toString(16); - const traceFlags = hexFlags.length === 1 ? `0${hexFlags}` : hexFlags; - // https://www.w3.org/TR/trace-context/#traceparent-header-field-values - return `${VERSION}-${spanContext.traceId}-${spanContext.spanId}-${traceFlags}`; -} - -exports.context = context; -exports.createSpanFunction = createSpanFunction; -exports.extractSpanContextFromTraceParentHeader = extractSpanContextFromTraceParentHeader; -exports.getSpan = getSpan; -exports.getSpanContext = getSpanContext; -exports.getTraceParentHeader = getTraceParentHeader; -exports.getTracer = getTracer; -exports.isSpanContextValid = isSpanContextValid; -exports.setSpan = setSpan; -exports.setSpanContext = setSpanContext; -//# sourceMappingURL=index.js.map - - -/***/ }), - -/***/ 1333: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - -"use strict"; - - -Object.defineProperty(exports, "__esModule", ({ value: true })); - -var abortController = __nccwpck_require__(2557); -var crypto = __nccwpck_require__(6113); - -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. -var _a; -/** - * A constant that indicates whether the environment the code is running is Node.JS. - */ -const isNode = typeof process !== "undefined" && Boolean(process.version) && Boolean((_a = process.versions) === null || _a === void 0 ? void 0 : _a.node); - -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. -/** - * Helper TypeGuard that checks if something is defined or not. - * @param thing - Anything - */ -function isDefined(thing) { - return typeof thing !== "undefined" && thing !== null; -} -/** - * Helper TypeGuard that checks if the input is an object with the specified properties. - * @param thing - Anything. - * @param properties - The name of the properties that should appear in the object. - */ -function isObjectWithProperties(thing, properties) { - if (!isDefined(thing) || typeof thing !== "object") { - return false; - } - for (const property of properties) { - if (!objectHasProperty(thing, property)) { - return false; - } - } - return true; -} -/** - * Helper TypeGuard that checks if the input is an object with the specified property. - * @param thing - Any object. - * @param property - The name of the property that should appear in the object. - */ -function objectHasProperty(thing, property) { - return (isDefined(thing) && typeof thing === "object" && property in thing); -} - -// Copyright (c) Microsoft Corporation. -const StandardAbortMessage = "The operation was aborted."; -/** - * A wrapper for setTimeout that resolves a promise after timeInMs milliseconds. - * @param timeInMs - The number of milliseconds to be delayed. - * @param options - The options for delay - currently abort options - * @returns Promise that is resolved after timeInMs - */ -function delay(timeInMs, options) { - return new Promise((resolve, reject) => { - let timer = undefined; - let onAborted = undefined; - const rejectOnAbort = () => { - var _a; - return reject(new abortController.AbortError((_a = options === null || options === void 0 ? void 0 : options.abortErrorMsg) !== null && _a !== void 0 ? _a : StandardAbortMessage)); - }; - const removeListeners = () => { - if ((options === null || options === void 0 ? void 0 : options.abortSignal) && onAborted) { - options.abortSignal.removeEventListener("abort", onAborted); - } - }; - onAborted = () => { - if (isDefined(timer)) { - clearTimeout(timer); - } - removeListeners(); - return rejectOnAbort(); - }; - if ((options === null || options === void 0 ? void 0 : options.abortSignal) && options.abortSignal.aborted) { - return rejectOnAbort(); - } - timer = setTimeout(() => { - removeListeners(); - resolve(); - }, timeInMs); - if (options === null || options === void 0 ? void 0 : options.abortSignal) { - options.abortSignal.addEventListener("abort", onAborted); - } - }); -} - -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. -/** - * Returns a random integer value between a lower and upper bound, - * inclusive of both bounds. - * Note that this uses Math.random and isn't secure. If you need to use - * this for any kind of security purpose, find a better source of random. - * @param min - The smallest integer value allowed. - * @param max - The largest integer value allowed. - */ -function getRandomIntegerInclusive(min, max) { - // Make sure inputs are integers. - min = Math.ceil(min); - max = Math.floor(max); - // Pick a random offset from zero to the size of the range. - // Since Math.random() can never return 1, we have to make the range one larger - // in order to be inclusive of the maximum value after we take the floor. - const offset = Math.floor(Math.random() * (max - min + 1)); - return offset + min; -} - -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. -/** - * Helper to determine when an input is a generic JS object. - * @returns true when input is an object type that is not null, Array, RegExp, or Date. - */ -function isObject(input) { - return (typeof input === "object" && - input !== null && - !Array.isArray(input) && - !(input instanceof RegExp) && - !(input instanceof Date)); -} - -// Copyright (c) Microsoft Corporation. -/** - * Typeguard for an error object shape (has name and message) - * @param e - Something caught by a catch clause. - */ -function isError(e) { - if (isObject(e)) { - const hasName = typeof e.name === "string"; - const hasMessage = typeof e.message === "string"; - return hasName && hasMessage; - } - return false; -} -/** - * Given what is thought to be an error object, return the message if possible. - * If the message is missing, returns a stringified version of the input. - * @param e - Something thrown from a try block - * @returns The error message or a string of the input - */ -function getErrorMessage(e) { - if (isError(e)) { - return e.message; - } - else { - let stringified; - try { - if (typeof e === "object" && e) { - stringified = JSON.stringify(e); - } - else { - stringified = String(e); - } - } - catch (err) { - stringified = "[unable to stringify input]"; - } - return `Unknown error ${stringified}`; - } -} - -// Copyright (c) Microsoft Corporation. -/** - * Generates a SHA-256 HMAC signature. - * @param key - The HMAC key represented as a base64 string, used to generate the cryptographic HMAC hash. - * @param stringToSign - The data to be signed. - * @param encoding - The textual encoding to use for the returned HMAC digest. - */ -async function computeSha256Hmac(key, stringToSign, encoding) { - const decodedKey = Buffer.from(key, "base64"); - return crypto.createHmac("sha256", decodedKey).update(stringToSign).digest(encoding); -} -/** - * Generates a SHA-256 hash. - * @param content - The data to be included in the hash. - * @param encoding - The textual encoding to use for the returned hash. - */ -async function computeSha256Hash(content, encoding) { - return crypto.createHash("sha256").update(content).digest(encoding); -} - -exports.computeSha256Hash = computeSha256Hash; -exports.computeSha256Hmac = computeSha256Hmac; -exports.delay = delay; -exports.getErrorMessage = getErrorMessage; -exports.getRandomIntegerInclusive = getRandomIntegerInclusive; -exports.isDefined = isDefined; -exports.isError = isError; -exports.isNode = isNode; -exports.isObject = isObject; -exports.isObjectWithProperties = isObjectWithProperties; -exports.objectHasProperty = objectHasProperty; -//# sourceMappingURL=index.js.map - - -/***/ }), - -/***/ 3233: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - -"use strict"; - - -Object.defineProperty(exports, "__esModule", ({ value: true })); - -function _interopDefault (ex) { return (ex && (typeof ex === 'object') && 'default' in ex) ? ex['default'] : ex; } - -var util = _interopDefault(__nccwpck_require__(3837)); -var os = __nccwpck_require__(2037); - -// Copyright (c) Microsoft Corporation. -function log(message, ...args) { - process.stderr.write(`${util.format(message, ...args)}${os.EOL}`); -} - -// Copyright (c) Microsoft Corporation. -const debugEnvVariable = (typeof process !== "undefined" && process.env && process.env.DEBUG) || undefined; -let enabledString; -let enabledNamespaces = []; -let skippedNamespaces = []; -const debuggers = []; -if (debugEnvVariable) { - enable(debugEnvVariable); -} -const debugObj = Object.assign((namespace) => { - return createDebugger(namespace); -}, { - enable, - enabled, - disable, - log -}); -function enable(namespaces) { - enabledString = namespaces; - enabledNamespaces = []; - skippedNamespaces = []; - const wildcard = /\*/g; - const namespaceList = namespaces.split(",").map((ns) => ns.trim().replace(wildcard, ".*?")); - for (const ns of namespaceList) { - if (ns.startsWith("-")) { - skippedNamespaces.push(new RegExp(`^${ns.substr(1)}$`)); - } - else { - enabledNamespaces.push(new RegExp(`^${ns}$`)); - } - } - for (const instance of debuggers) { - instance.enabled = enabled(instance.namespace); - } -} -function enabled(namespace) { - if (namespace.endsWith("*")) { - return true; - } - for (const skipped of skippedNamespaces) { - if (skipped.test(namespace)) { - return false; - } - } - for (const enabledNamespace of enabledNamespaces) { - if (enabledNamespace.test(namespace)) { - return true; - } - } - return false; -} -function disable() { - const result = enabledString || ""; - enable(""); - return result; -} -function createDebugger(namespace) { - const newDebugger = Object.assign(debug, { - enabled: enabled(namespace), - destroy, - log: debugObj.log, - namespace, - extend - }); - function debug(...args) { - if (!newDebugger.enabled) { - return; - } - if (args.length > 0) { - args[0] = `${namespace} ${args[0]}`; - } - newDebugger.log(...args); - } - debuggers.push(newDebugger); - return newDebugger; -} -function destroy() { - const index = debuggers.indexOf(this); - if (index >= 0) { - debuggers.splice(index, 1); - return true; - } - return false; -} -function extend(namespace) { - const newDebugger = createDebugger(`${this.namespace}:${namespace}`); - newDebugger.log = this.log; - return newDebugger; -} - -// Copyright (c) Microsoft Corporation. -const registeredLoggers = new Set(); -const logLevelFromEnv = (typeof process !== "undefined" && process.env && process.env.AZURE_LOG_LEVEL) || undefined; -let azureLogLevel; -/** - * The AzureLogger provides a mechanism for overriding where logs are output to. - * By default, logs are sent to stderr. - * Override the `log` method to redirect logs to another location. - */ -const AzureLogger = debugObj("azure"); -AzureLogger.log = (...args) => { - debugObj.log(...args); -}; -const AZURE_LOG_LEVELS = ["verbose", "info", "warning", "error"]; -if (logLevelFromEnv) { - // avoid calling setLogLevel because we don't want a mis-set environment variable to crash - if (isAzureLogLevel(logLevelFromEnv)) { - setLogLevel(logLevelFromEnv); - } - else { - console.error(`AZURE_LOG_LEVEL set to unknown log level '${logLevelFromEnv}'; logging is not enabled. Acceptable values: ${AZURE_LOG_LEVELS.join(", ")}.`); - } -} -/** - * Immediately enables logging at the specified log level. - * @param level - The log level to enable for logging. - * Options from most verbose to least verbose are: - * - verbose - * - info - * - warning - * - error - */ -function setLogLevel(level) { - if (level && !isAzureLogLevel(level)) { - throw new Error(`Unknown log level '${level}'. Acceptable values: ${AZURE_LOG_LEVELS.join(",")}`); - } - azureLogLevel = level; - const enabledNamespaces = []; - for (const logger of registeredLoggers) { - if (shouldEnable(logger)) { - enabledNamespaces.push(logger.namespace); - } - } - debugObj.enable(enabledNamespaces.join(",")); -} -/** - * Retrieves the currently specified log level. - */ -function getLogLevel() { - return azureLogLevel; -} -const levelMap = { - verbose: 400, - info: 300, - warning: 200, - error: 100 -}; -/** - * Creates a logger for use by the Azure SDKs that inherits from `AzureLogger`. - * @param namespace - The name of the SDK package. - * @hidden - */ -function createClientLogger(namespace) { - const clientRootLogger = AzureLogger.extend(namespace); - patchLogMethod(AzureLogger, clientRootLogger); - return { - error: createLogger(clientRootLogger, "error"), - warning: createLogger(clientRootLogger, "warning"), - info: createLogger(clientRootLogger, "info"), - verbose: createLogger(clientRootLogger, "verbose") - }; -} -function patchLogMethod(parent, child) { - child.log = (...args) => { - parent.log(...args); - }; -} -function createLogger(parent, level) { - const logger = Object.assign(parent.extend(level), { - level - }); - patchLogMethod(parent, logger); - if (shouldEnable(logger)) { - const enabledNamespaces = debugObj.disable(); - debugObj.enable(enabledNamespaces + "," + logger.namespace); - } - registeredLoggers.add(logger); - return logger; -} -function shouldEnable(logger) { - if (azureLogLevel && levelMap[logger.level] <= levelMap[azureLogLevel]) { - return true; - } - else { - return false; - } -} -function isAzureLogLevel(logLevel) { - return AZURE_LOG_LEVELS.includes(logLevel); -} - -exports.AzureLogger = AzureLogger; -exports.createClientLogger = createClientLogger; -exports.getLogLevel = getLogLevel; -exports.setLogLevel = setLogLevel; -//# sourceMappingURL=index.js.map - - -/***/ }), - -/***/ 4100: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - -"use strict"; - - -Object.defineProperty(exports, "__esModule", ({ value: true })); - -var coreHttp = __nccwpck_require__(4607); -var tslib = __nccwpck_require__(4351); -var coreTracing = __nccwpck_require__(4175); -var logger$1 = __nccwpck_require__(3233); -var abortController = __nccwpck_require__(2557); -var os = __nccwpck_require__(2037); -var crypto = __nccwpck_require__(6113); -var stream = __nccwpck_require__(2781); -__nccwpck_require__(4559); -var coreLro = __nccwpck_require__(7094); -var events = __nccwpck_require__(2361); -var fs = __nccwpck_require__(7147); -var util = __nccwpck_require__(3837); +var coreHttp = __nccwpck_require__(4607); +var tslib = __nccwpck_require__(4351); +var coreTracing = __nccwpck_require__(4175); +var logger$1 = __nccwpck_require__(3233); +var abortController = __nccwpck_require__(2557); +var os = __nccwpck_require__(2037); +var crypto = __nccwpck_require__(6113); +var stream = __nccwpck_require__(2781); +__nccwpck_require__(4559); +var coreLro = __nccwpck_require__(7094); +var events = __nccwpck_require__(2361); +var fs = __nccwpck_require__(7147); +var util = __nccwpck_require__(3837); function _interopNamespace(e) { if (e && e.__esModule) return e; @@ -32189,220 +29709,1045 @@ const listType = { allowedValues: ["committed", "uncommitted", "all"] } } -}; - -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ -/** Class representing a Service. */ -class Service { +}; + +/* + * Copyright (c) Microsoft Corporation. + * Licensed under the MIT License. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is regenerated. + */ +/** Class representing a Service. */ +class Service { + /** + * Initialize a new instance of the class Service class. + * @param client Reference to the service client + */ + constructor(client) { + this.client = client; + } + /** + * Sets properties for a storage account's Blob service endpoint, including properties for Storage + * Analytics and CORS (Cross-Origin Resource Sharing) rules + * @param blobServiceProperties The StorageService properties. + * @param options The options parameters. + */ + setProperties(blobServiceProperties, options) { + const operationArguments = { + blobServiceProperties, + options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) + }; + return this.client.sendOperationRequest(operationArguments, setPropertiesOperationSpec); + } + /** + * gets the properties of a storage account's Blob service, including properties for Storage Analytics + * and CORS (Cross-Origin Resource Sharing) rules. + * @param options The options parameters. + */ + getProperties(options) { + const operationArguments = { + options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) + }; + return this.client.sendOperationRequest(operationArguments, getPropertiesOperationSpec$2); + } + /** + * Retrieves statistics related to replication for the Blob service. It is only available on the + * secondary location endpoint when read-access geo-redundant replication is enabled for the storage + * account. + * @param options The options parameters. + */ + getStatistics(options) { + const operationArguments = { + options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) + }; + return this.client.sendOperationRequest(operationArguments, getStatisticsOperationSpec); + } + /** + * The List Containers Segment operation returns a list of the containers under the specified account + * @param options The options parameters. + */ + listContainersSegment(options) { + const operationArguments = { + options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) + }; + return this.client.sendOperationRequest(operationArguments, listContainersSegmentOperationSpec); + } + /** + * Retrieves a user delegation key for the Blob service. This is only a valid operation when using + * bearer token authentication. + * @param keyInfo Key information + * @param options The options parameters. + */ + getUserDelegationKey(keyInfo, options) { + const operationArguments = { + keyInfo, + options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) + }; + return this.client.sendOperationRequest(operationArguments, getUserDelegationKeyOperationSpec); + } + /** + * Returns the sku name and account kind + * @param options The options parameters. + */ + getAccountInfo(options) { + const operationArguments = { + options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) + }; + return this.client.sendOperationRequest(operationArguments, getAccountInfoOperationSpec$2); + } + /** + * The Batch operation allows multiple API calls to be embedded into a single HTTP request. + * @param contentLength The length of the request. + * @param multipartContentType Required. The value of this header must be multipart/mixed with a batch + * boundary. Example header value: multipart/mixed; boundary=batch_ + * @param body Initial data + * @param options The options parameters. + */ + submitBatch(contentLength, multipartContentType, body, options) { + const operationArguments = { + contentLength, + multipartContentType, + body, + options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) + }; + return this.client.sendOperationRequest(operationArguments, submitBatchOperationSpec$1); + } + /** + * The Filter Blobs operation enables callers to list blobs across all containers whose tags match a + * given search expression. Filter blobs searches across all containers within a storage account but + * can be scoped within the expression to a single container. + * @param options The options parameters. + */ + filterBlobs(options) { + const operationArguments = { + options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) + }; + return this.client.sendOperationRequest(operationArguments, filterBlobsOperationSpec$1); + } +} +// Operation Specifications +const xmlSerializer$5 = new coreHttp__namespace.Serializer(Mappers, /* isXml */ true); +const setPropertiesOperationSpec = { + path: "/", + httpMethod: "PUT", + responses: { + 202: { + headersMapper: ServiceSetPropertiesHeaders + }, + default: { + bodyMapper: StorageError, + headersMapper: ServiceSetPropertiesExceptionHeaders + } + }, + requestBody: blobServiceProperties, + queryParameters: [ + restype, + comp, + timeoutInSeconds + ], + urlParameters: [url], + headerParameters: [ + contentType, + accept, + version, + requestId + ], + isXML: true, + contentType: "application/xml; charset=utf-8", + mediaType: "xml", + serializer: xmlSerializer$5 +}; +const getPropertiesOperationSpec$2 = { + path: "/", + httpMethod: "GET", + responses: { + 200: { + bodyMapper: BlobServiceProperties, + headersMapper: ServiceGetPropertiesHeaders + }, + default: { + bodyMapper: StorageError, + headersMapper: ServiceGetPropertiesExceptionHeaders + } + }, + queryParameters: [ + restype, + comp, + timeoutInSeconds + ], + urlParameters: [url], + headerParameters: [ + version, + requestId, + accept1 + ], + isXML: true, + serializer: xmlSerializer$5 +}; +const getStatisticsOperationSpec = { + path: "/", + httpMethod: "GET", + responses: { + 200: { + bodyMapper: BlobServiceStatistics, + headersMapper: ServiceGetStatisticsHeaders + }, + default: { + bodyMapper: StorageError, + headersMapper: ServiceGetStatisticsExceptionHeaders + } + }, + queryParameters: [ + restype, + timeoutInSeconds, + comp1 + ], + urlParameters: [url], + headerParameters: [ + version, + requestId, + accept1 + ], + isXML: true, + serializer: xmlSerializer$5 +}; +const listContainersSegmentOperationSpec = { + path: "/", + httpMethod: "GET", + responses: { + 200: { + bodyMapper: ListContainersSegmentResponse, + headersMapper: ServiceListContainersSegmentHeaders + }, + default: { + bodyMapper: StorageError, + headersMapper: ServiceListContainersSegmentExceptionHeaders + } + }, + queryParameters: [ + timeoutInSeconds, + comp2, + prefix, + marker, + maxPageSize, + include + ], + urlParameters: [url], + headerParameters: [ + version, + requestId, + accept1 + ], + isXML: true, + serializer: xmlSerializer$5 +}; +const getUserDelegationKeyOperationSpec = { + path: "/", + httpMethod: "POST", + responses: { + 200: { + bodyMapper: UserDelegationKey, + headersMapper: ServiceGetUserDelegationKeyHeaders + }, + default: { + bodyMapper: StorageError, + headersMapper: ServiceGetUserDelegationKeyExceptionHeaders + } + }, + requestBody: keyInfo, + queryParameters: [ + restype, + timeoutInSeconds, + comp3 + ], + urlParameters: [url], + headerParameters: [ + contentType, + accept, + version, + requestId + ], + isXML: true, + contentType: "application/xml; charset=utf-8", + mediaType: "xml", + serializer: xmlSerializer$5 +}; +const getAccountInfoOperationSpec$2 = { + path: "/", + httpMethod: "GET", + responses: { + 200: { + headersMapper: ServiceGetAccountInfoHeaders + }, + default: { + bodyMapper: StorageError, + headersMapper: ServiceGetAccountInfoExceptionHeaders + } + }, + queryParameters: [comp, restype1], + urlParameters: [url], + headerParameters: [version, accept1], + isXML: true, + serializer: xmlSerializer$5 +}; +const submitBatchOperationSpec$1 = { + path: "/", + httpMethod: "POST", + responses: { + 202: { + bodyMapper: { + type: { name: "Stream" }, + serializedName: "parsedResponse" + }, + headersMapper: ServiceSubmitBatchHeaders + }, + default: { + bodyMapper: StorageError, + headersMapper: ServiceSubmitBatchExceptionHeaders + } + }, + requestBody: body, + queryParameters: [timeoutInSeconds, comp4], + urlParameters: [url], + headerParameters: [ + contentType, + accept, + version, + requestId, + contentLength, + multipartContentType + ], + isXML: true, + contentType: "application/xml; charset=utf-8", + mediaType: "xml", + serializer: xmlSerializer$5 +}; +const filterBlobsOperationSpec$1 = { + path: "/", + httpMethod: "GET", + responses: { + 200: { + bodyMapper: FilterBlobSegment, + headersMapper: ServiceFilterBlobsHeaders + }, + default: { + bodyMapper: StorageError, + headersMapper: ServiceFilterBlobsExceptionHeaders + } + }, + queryParameters: [ + timeoutInSeconds, + marker, + maxPageSize, + comp5, + where + ], + urlParameters: [url], + headerParameters: [ + version, + requestId, + accept1 + ], + isXML: true, + serializer: xmlSerializer$5 +}; + +/* + * Copyright (c) Microsoft Corporation. + * Licensed under the MIT License. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is regenerated. + */ +/** Class representing a Container. */ +class Container { + /** + * Initialize a new instance of the class Container class. + * @param client Reference to the service client + */ + constructor(client) { + this.client = client; + } + /** + * creates a new container under the specified account. If the container with the same name already + * exists, the operation fails + * @param options The options parameters. + */ + create(options) { + const operationArguments = { + options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) + }; + return this.client.sendOperationRequest(operationArguments, createOperationSpec$2); + } + /** + * returns all user-defined metadata and system properties for the specified container. The data + * returned does not include the container's list of blobs + * @param options The options parameters. + */ + getProperties(options) { + const operationArguments = { + options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) + }; + return this.client.sendOperationRequest(operationArguments, getPropertiesOperationSpec$1); + } + /** + * operation marks the specified container for deletion. The container and any blobs contained within + * it are later deleted during garbage collection + * @param options The options parameters. + */ + delete(options) { + const operationArguments = { + options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) + }; + return this.client.sendOperationRequest(operationArguments, deleteOperationSpec$1); + } + /** + * operation sets one or more user-defined name-value pairs for the specified container. + * @param options The options parameters. + */ + setMetadata(options) { + const operationArguments = { + options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) + }; + return this.client.sendOperationRequest(operationArguments, setMetadataOperationSpec$1); + } + /** + * gets the permissions for the specified container. The permissions indicate whether container data + * may be accessed publicly. + * @param options The options parameters. + */ + getAccessPolicy(options) { + const operationArguments = { + options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) + }; + return this.client.sendOperationRequest(operationArguments, getAccessPolicyOperationSpec); + } + /** + * sets the permissions for the specified container. The permissions indicate whether blobs in a + * container may be accessed publicly. + * @param options The options parameters. + */ + setAccessPolicy(options) { + const operationArguments = { + options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) + }; + return this.client.sendOperationRequest(operationArguments, setAccessPolicyOperationSpec); + } + /** + * Restores a previously-deleted container. + * @param options The options parameters. + */ + restore(options) { + const operationArguments = { + options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) + }; + return this.client.sendOperationRequest(operationArguments, restoreOperationSpec); + } + /** + * Renames an existing container. + * @param sourceContainerName Required. Specifies the name of the container to rename. + * @param options The options parameters. + */ + rename(sourceContainerName, options) { + const operationArguments = { + sourceContainerName, + options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) + }; + return this.client.sendOperationRequest(operationArguments, renameOperationSpec); + } /** - * Initialize a new instance of the class Service class. - * @param client Reference to the service client + * The Batch operation allows multiple API calls to be embedded into a single HTTP request. + * @param contentLength The length of the request. + * @param multipartContentType Required. The value of this header must be multipart/mixed with a batch + * boundary. Example header value: multipart/mixed; boundary=batch_ + * @param body Initial data + * @param options The options parameters. */ - constructor(client) { - this.client = client; + submitBatch(contentLength, multipartContentType, body, options) { + const operationArguments = { + contentLength, + multipartContentType, + body, + options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) + }; + return this.client.sendOperationRequest(operationArguments, submitBatchOperationSpec); } /** - * Sets properties for a storage account's Blob service endpoint, including properties for Storage - * Analytics and CORS (Cross-Origin Resource Sharing) rules - * @param blobServiceProperties The StorageService properties. + * The Filter Blobs operation enables callers to list blobs in a container whose tags match a given + * search expression. Filter blobs searches within the given container. * @param options The options parameters. */ - setProperties(blobServiceProperties, options) { + filterBlobs(options) { const operationArguments = { - blobServiceProperties, options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) }; - return this.client.sendOperationRequest(operationArguments, setPropertiesOperationSpec); + return this.client.sendOperationRequest(operationArguments, filterBlobsOperationSpec); } /** - * gets the properties of a storage account's Blob service, including properties for Storage Analytics - * and CORS (Cross-Origin Resource Sharing) rules. + * [Update] establishes and manages a lock on a container for delete operations. The lock duration can + * be 15 to 60 seconds, or can be infinite * @param options The options parameters. */ - getProperties(options) { + acquireLease(options) { const operationArguments = { options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) }; - return this.client.sendOperationRequest(operationArguments, getPropertiesOperationSpec$2); + return this.client.sendOperationRequest(operationArguments, acquireLeaseOperationSpec$1); } /** - * Retrieves statistics related to replication for the Blob service. It is only available on the - * secondary location endpoint when read-access geo-redundant replication is enabled for the storage - * account. + * [Update] establishes and manages a lock on a container for delete operations. The lock duration can + * be 15 to 60 seconds, or can be infinite + * @param leaseId Specifies the current lease ID on the resource. * @param options The options parameters. */ - getStatistics(options) { + releaseLease(leaseId, options) { const operationArguments = { + leaseId, options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) }; - return this.client.sendOperationRequest(operationArguments, getStatisticsOperationSpec); + return this.client.sendOperationRequest(operationArguments, releaseLeaseOperationSpec$1); } /** - * The List Containers Segment operation returns a list of the containers under the specified account + * [Update] establishes and manages a lock on a container for delete operations. The lock duration can + * be 15 to 60 seconds, or can be infinite + * @param leaseId Specifies the current lease ID on the resource. * @param options The options parameters. */ - listContainersSegment(options) { + renewLease(leaseId, options) { const operationArguments = { + leaseId, options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) }; - return this.client.sendOperationRequest(operationArguments, listContainersSegmentOperationSpec); + return this.client.sendOperationRequest(operationArguments, renewLeaseOperationSpec$1); } /** - * Retrieves a user delegation key for the Blob service. This is only a valid operation when using - * bearer token authentication. - * @param keyInfo Key information + * [Update] establishes and manages a lock on a container for delete operations. The lock duration can + * be 15 to 60 seconds, or can be infinite * @param options The options parameters. */ - getUserDelegationKey(keyInfo, options) { + breakLease(options) { const operationArguments = { - keyInfo, options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) }; - return this.client.sendOperationRequest(operationArguments, getUserDelegationKeyOperationSpec); + return this.client.sendOperationRequest(operationArguments, breakLeaseOperationSpec$1); } /** - * Returns the sku name and account kind + * [Update] establishes and manages a lock on a container for delete operations. The lock duration can + * be 15 to 60 seconds, or can be infinite + * @param leaseId Specifies the current lease ID on the resource. + * @param proposedLeaseId Proposed lease ID, in a GUID string format. The Blob service returns 400 + * (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor + * (String) for a list of valid GUID string formats. * @param options The options parameters. */ - getAccountInfo(options) { + changeLease(leaseId, proposedLeaseId, options) { const operationArguments = { + leaseId, + proposedLeaseId, options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) }; - return this.client.sendOperationRequest(operationArguments, getAccountInfoOperationSpec$2); + return this.client.sendOperationRequest(operationArguments, changeLeaseOperationSpec$1); } /** - * The Batch operation allows multiple API calls to be embedded into a single HTTP request. - * @param contentLength The length of the request. - * @param multipartContentType Required. The value of this header must be multipart/mixed with a batch - * boundary. Example header value: multipart/mixed; boundary=batch_ - * @param body Initial data + * [Update] The List Blobs operation returns a list of the blobs under the specified container * @param options The options parameters. */ - submitBatch(contentLength, multipartContentType, body, options) { + listBlobFlatSegment(options) { const operationArguments = { - contentLength, - multipartContentType, - body, options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) }; - return this.client.sendOperationRequest(operationArguments, submitBatchOperationSpec$1); + return this.client.sendOperationRequest(operationArguments, listBlobFlatSegmentOperationSpec); } /** - * The Filter Blobs operation enables callers to list blobs across all containers whose tags match a - * given search expression. Filter blobs searches across all containers within a storage account but - * can be scoped within the expression to a single container. + * [Update] The List Blobs operation returns a list of the blobs under the specified container + * @param delimiter When the request includes this parameter, the operation returns a BlobPrefix + * element in the response body that acts as a placeholder for all blobs whose names begin with the + * same substring up to the appearance of the delimiter character. The delimiter may be a single + * character or a string. * @param options The options parameters. */ - filterBlobs(options) { + listBlobHierarchySegment(delimiter, options) { const operationArguments = { + delimiter, options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) }; - return this.client.sendOperationRequest(operationArguments, filterBlobsOperationSpec$1); + return this.client.sendOperationRequest(operationArguments, listBlobHierarchySegmentOperationSpec); + } + /** + * Returns the sku name and account kind + * @param options The options parameters. + */ + getAccountInfo(options) { + const operationArguments = { + options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) + }; + return this.client.sendOperationRequest(operationArguments, getAccountInfoOperationSpec$1); } } // Operation Specifications -const xmlSerializer$5 = new coreHttp__namespace.Serializer(Mappers, /* isXml */ true); -const setPropertiesOperationSpec = { - path: "/", +const xmlSerializer$4 = new coreHttp__namespace.Serializer(Mappers, /* isXml */ true); +const createOperationSpec$2 = { + path: "/{containerName}", httpMethod: "PUT", + responses: { + 201: { + headersMapper: ContainerCreateHeaders + }, + default: { + bodyMapper: StorageError, + headersMapper: ContainerCreateExceptionHeaders + } + }, + queryParameters: [timeoutInSeconds, restype2], + urlParameters: [url], + headerParameters: [ + version, + requestId, + accept1, + metadata, + access, + defaultEncryptionScope, + preventEncryptionScopeOverride + ], + isXML: true, + serializer: xmlSerializer$4 +}; +const getPropertiesOperationSpec$1 = { + path: "/{containerName}", + httpMethod: "GET", + responses: { + 200: { + headersMapper: ContainerGetPropertiesHeaders + }, + default: { + bodyMapper: StorageError, + headersMapper: ContainerGetPropertiesExceptionHeaders + } + }, + queryParameters: [timeoutInSeconds, restype2], + urlParameters: [url], + headerParameters: [ + version, + requestId, + accept1, + leaseId + ], + isXML: true, + serializer: xmlSerializer$4 +}; +const deleteOperationSpec$1 = { + path: "/{containerName}", + httpMethod: "DELETE", responses: { 202: { - headersMapper: ServiceSetPropertiesHeaders + headersMapper: ContainerDeleteHeaders }, default: { bodyMapper: StorageError, - headersMapper: ServiceSetPropertiesExceptionHeaders + headersMapper: ContainerDeleteExceptionHeaders + } + }, + queryParameters: [timeoutInSeconds, restype2], + urlParameters: [url], + headerParameters: [ + version, + requestId, + accept1, + leaseId, + ifModifiedSince, + ifUnmodifiedSince + ], + isXML: true, + serializer: xmlSerializer$4 +}; +const setMetadataOperationSpec$1 = { + path: "/{containerName}", + httpMethod: "PUT", + responses: { + 200: { + headersMapper: ContainerSetMetadataHeaders + }, + default: { + bodyMapper: StorageError, + headersMapper: ContainerSetMetadataExceptionHeaders } }, - requestBody: blobServiceProperties, queryParameters: [ - restype, - comp, - timeoutInSeconds + timeoutInSeconds, + restype2, + comp6 + ], + urlParameters: [url], + headerParameters: [ + version, + requestId, + accept1, + metadata, + leaseId, + ifModifiedSince + ], + isXML: true, + serializer: xmlSerializer$4 +}; +const getAccessPolicyOperationSpec = { + path: "/{containerName}", + httpMethod: "GET", + responses: { + 200: { + bodyMapper: { + type: { + name: "Sequence", + element: { + type: { name: "Composite", className: "SignedIdentifier" } + } + }, + serializedName: "SignedIdentifiers", + xmlName: "SignedIdentifiers", + xmlIsWrapped: true, + xmlElementName: "SignedIdentifier" + }, + headersMapper: ContainerGetAccessPolicyHeaders + }, + default: { + bodyMapper: StorageError, + headersMapper: ContainerGetAccessPolicyExceptionHeaders + } + }, + queryParameters: [ + timeoutInSeconds, + restype2, + comp7 + ], + urlParameters: [url], + headerParameters: [ + version, + requestId, + accept1, + leaseId + ], + isXML: true, + serializer: xmlSerializer$4 +}; +const setAccessPolicyOperationSpec = { + path: "/{containerName}", + httpMethod: "PUT", + responses: { + 200: { + headersMapper: ContainerSetAccessPolicyHeaders + }, + default: { + bodyMapper: StorageError, + headersMapper: ContainerSetAccessPolicyExceptionHeaders + } + }, + requestBody: containerAcl, + queryParameters: [ + timeoutInSeconds, + restype2, + comp7 ], urlParameters: [url], headerParameters: [ contentType, accept, version, - requestId + requestId, + access, + leaseId, + ifModifiedSince, + ifUnmodifiedSince ], isXML: true, contentType: "application/xml; charset=utf-8", mediaType: "xml", - serializer: xmlSerializer$5 + serializer: xmlSerializer$4 }; -const getPropertiesOperationSpec$2 = { - path: "/", +const restoreOperationSpec = { + path: "/{containerName}", + httpMethod: "PUT", + responses: { + 201: { + headersMapper: ContainerRestoreHeaders + }, + default: { + bodyMapper: StorageError, + headersMapper: ContainerRestoreExceptionHeaders + } + }, + queryParameters: [ + timeoutInSeconds, + restype2, + comp8 + ], + urlParameters: [url], + headerParameters: [ + version, + requestId, + accept1, + deletedContainerName, + deletedContainerVersion + ], + isXML: true, + serializer: xmlSerializer$4 +}; +const renameOperationSpec = { + path: "/{containerName}", + httpMethod: "PUT", + responses: { + 200: { + headersMapper: ContainerRenameHeaders + }, + default: { + bodyMapper: StorageError, + headersMapper: ContainerRenameExceptionHeaders + } + }, + queryParameters: [ + timeoutInSeconds, + restype2, + comp9 + ], + urlParameters: [url], + headerParameters: [ + version, + requestId, + accept1, + sourceContainerName, + sourceLeaseId + ], + isXML: true, + serializer: xmlSerializer$4 +}; +const submitBatchOperationSpec = { + path: "/{containerName}", + httpMethod: "POST", + responses: { + 202: { + bodyMapper: { + type: { name: "Stream" }, + serializedName: "parsedResponse" + }, + headersMapper: ContainerSubmitBatchHeaders + }, + default: { + bodyMapper: StorageError, + headersMapper: ContainerSubmitBatchExceptionHeaders + } + }, + requestBody: body, + queryParameters: [ + timeoutInSeconds, + comp4, + restype2 + ], + urlParameters: [url], + headerParameters: [ + contentType, + accept, + version, + requestId, + contentLength, + multipartContentType + ], + isXML: true, + contentType: "application/xml; charset=utf-8", + mediaType: "xml", + serializer: xmlSerializer$4 +}; +const filterBlobsOperationSpec = { + path: "/{containerName}", httpMethod: "GET", responses: { 200: { - bodyMapper: BlobServiceProperties, - headersMapper: ServiceGetPropertiesHeaders + bodyMapper: FilterBlobSegment, + headersMapper: ContainerFilterBlobsHeaders + }, + default: { + bodyMapper: StorageError, + headersMapper: ContainerFilterBlobsExceptionHeaders + } + }, + queryParameters: [ + timeoutInSeconds, + marker, + maxPageSize, + comp5, + where, + restype2 + ], + urlParameters: [url], + headerParameters: [ + version, + requestId, + accept1 + ], + isXML: true, + serializer: xmlSerializer$4 +}; +const acquireLeaseOperationSpec$1 = { + path: "/{containerName}", + httpMethod: "PUT", + responses: { + 201: { + headersMapper: ContainerAcquireLeaseHeaders + }, + default: { + bodyMapper: StorageError, + headersMapper: ContainerAcquireLeaseExceptionHeaders + } + }, + queryParameters: [ + timeoutInSeconds, + restype2, + comp10 + ], + urlParameters: [url], + headerParameters: [ + version, + requestId, + accept1, + ifModifiedSince, + ifUnmodifiedSince, + action, + duration, + proposedLeaseId + ], + isXML: true, + serializer: xmlSerializer$4 +}; +const releaseLeaseOperationSpec$1 = { + path: "/{containerName}", + httpMethod: "PUT", + responses: { + 200: { + headersMapper: ContainerReleaseLeaseHeaders + }, + default: { + bodyMapper: StorageError, + headersMapper: ContainerReleaseLeaseExceptionHeaders + } + }, + queryParameters: [ + timeoutInSeconds, + restype2, + comp10 + ], + urlParameters: [url], + headerParameters: [ + version, + requestId, + accept1, + ifModifiedSince, + ifUnmodifiedSince, + action1, + leaseId1 + ], + isXML: true, + serializer: xmlSerializer$4 +}; +const renewLeaseOperationSpec$1 = { + path: "/{containerName}", + httpMethod: "PUT", + responses: { + 200: { + headersMapper: ContainerRenewLeaseHeaders + }, + default: { + bodyMapper: StorageError, + headersMapper: ContainerRenewLeaseExceptionHeaders + } + }, + queryParameters: [ + timeoutInSeconds, + restype2, + comp10 + ], + urlParameters: [url], + headerParameters: [ + version, + requestId, + accept1, + ifModifiedSince, + ifUnmodifiedSince, + leaseId1, + action2 + ], + isXML: true, + serializer: xmlSerializer$4 +}; +const breakLeaseOperationSpec$1 = { + path: "/{containerName}", + httpMethod: "PUT", + responses: { + 202: { + headersMapper: ContainerBreakLeaseHeaders }, default: { bodyMapper: StorageError, - headersMapper: ServiceGetPropertiesExceptionHeaders + headersMapper: ContainerBreakLeaseExceptionHeaders } }, queryParameters: [ - restype, - comp, - timeoutInSeconds + timeoutInSeconds, + restype2, + comp10 ], urlParameters: [url], headerParameters: [ version, requestId, - accept1 + accept1, + ifModifiedSince, + ifUnmodifiedSince, + action3, + breakPeriod ], isXML: true, - serializer: xmlSerializer$5 + serializer: xmlSerializer$4 }; -const getStatisticsOperationSpec = { - path: "/", - httpMethod: "GET", +const changeLeaseOperationSpec$1 = { + path: "/{containerName}", + httpMethod: "PUT", responses: { 200: { - bodyMapper: BlobServiceStatistics, - headersMapper: ServiceGetStatisticsHeaders + headersMapper: ContainerChangeLeaseHeaders }, default: { bodyMapper: StorageError, - headersMapper: ServiceGetStatisticsExceptionHeaders + headersMapper: ContainerChangeLeaseExceptionHeaders } }, queryParameters: [ - restype, timeoutInSeconds, - comp1 + restype2, + comp10 ], urlParameters: [url], headerParameters: [ version, requestId, - accept1 + accept1, + ifModifiedSince, + ifUnmodifiedSince, + leaseId1, + action4, + proposedLeaseId1 ], isXML: true, - serializer: xmlSerializer$5 + serializer: xmlSerializer$4 }; -const listContainersSegmentOperationSpec = { - path: "/", +const listBlobFlatSegmentOperationSpec = { + path: "/{containerName}", httpMethod: "GET", responses: { 200: { - bodyMapper: ListContainersSegmentResponse, - headersMapper: ServiceListContainersSegmentHeaders + bodyMapper: ListBlobsFlatSegmentResponse, + headersMapper: ContainerListBlobFlatSegmentHeaders }, default: { bodyMapper: StorageError, - headersMapper: ServiceListContainersSegmentExceptionHeaders + headersMapper: ContainerListBlobFlatSegmentExceptionHeaders } }, queryParameters: [ @@ -32411,7 +30756,8 @@ const listContainersSegmentOperationSpec = { prefix, marker, maxPageSize, - include + restype2, + include1 ], urlParameters: [url], headerParameters: [ @@ -32420,117 +30766,57 @@ const listContainersSegmentOperationSpec = { accept1 ], isXML: true, - serializer: xmlSerializer$5 + serializer: xmlSerializer$4 }; -const getUserDelegationKeyOperationSpec = { - path: "/", - httpMethod: "POST", +const listBlobHierarchySegmentOperationSpec = { + path: "/{containerName}", + httpMethod: "GET", responses: { 200: { - bodyMapper: UserDelegationKey, - headersMapper: ServiceGetUserDelegationKeyHeaders + bodyMapper: ListBlobsHierarchySegmentResponse, + headersMapper: ContainerListBlobHierarchySegmentHeaders }, default: { bodyMapper: StorageError, - headersMapper: ServiceGetUserDelegationKeyExceptionHeaders + headersMapper: ContainerListBlobHierarchySegmentExceptionHeaders } }, - requestBody: keyInfo, queryParameters: [ - restype, timeoutInSeconds, - comp3 + comp2, + prefix, + marker, + maxPageSize, + restype2, + include1, + delimiter ], urlParameters: [url], headerParameters: [ - contentType, - accept, version, - requestId + requestId, + accept1 ], isXML: true, - contentType: "application/xml; charset=utf-8", - mediaType: "xml", - serializer: xmlSerializer$5 + serializer: xmlSerializer$4 }; -const getAccountInfoOperationSpec$2 = { - path: "/", +const getAccountInfoOperationSpec$1 = { + path: "/{containerName}", httpMethod: "GET", responses: { 200: { - headersMapper: ServiceGetAccountInfoHeaders + headersMapper: ContainerGetAccountInfoHeaders }, default: { bodyMapper: StorageError, - headersMapper: ServiceGetAccountInfoExceptionHeaders + headersMapper: ContainerGetAccountInfoExceptionHeaders } }, queryParameters: [comp, restype1], urlParameters: [url], headerParameters: [version, accept1], isXML: true, - serializer: xmlSerializer$5 -}; -const submitBatchOperationSpec$1 = { - path: "/", - httpMethod: "POST", - responses: { - 202: { - bodyMapper: { - type: { name: "Stream" }, - serializedName: "parsedResponse" - }, - headersMapper: ServiceSubmitBatchHeaders - }, - default: { - bodyMapper: StorageError, - headersMapper: ServiceSubmitBatchExceptionHeaders - } - }, - requestBody: body, - queryParameters: [timeoutInSeconds, comp4], - urlParameters: [url], - headerParameters: [ - contentType, - accept, - version, - requestId, - contentLength, - multipartContentType - ], - isXML: true, - contentType: "application/xml; charset=utf-8", - mediaType: "xml", - serializer: xmlSerializer$5 -}; -const filterBlobsOperationSpec$1 = { - path: "/", - httpMethod: "GET", - responses: { - 200: { - bodyMapper: FilterBlobSegment, - headersMapper: ServiceFilterBlobsHeaders - }, - default: { - bodyMapper: StorageError, - headersMapper: ServiceFilterBlobsExceptionHeaders - } - }, - queryParameters: [ - timeoutInSeconds, - marker, - maxPageSize, - comp5, - where - ], - urlParameters: [url], - headerParameters: [ - version, - requestId, - accept1 - ], - isXML: true, - serializer: xmlSerializer$5 + serializer: xmlSerializer$4 }; /* @@ -32540,144 +30826,147 @@ const filterBlobsOperationSpec$1 = { * Code generated by Microsoft (R) AutoRest Code Generator. * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ -/** Class representing a Container. */ -class Container { +/** Class representing a Blob. */ +class Blob$1 { /** - * Initialize a new instance of the class Container class. + * Initialize a new instance of the class Blob class. * @param client Reference to the service client */ constructor(client) { this.client = client; } /** - * creates a new container under the specified account. If the container with the same name already - * exists, the operation fails + * The Download operation reads or downloads a blob from the system, including its metadata and + * properties. You can also call Download to read a snapshot. * @param options The options parameters. */ - create(options) { + download(options) { const operationArguments = { options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) }; - return this.client.sendOperationRequest(operationArguments, createOperationSpec$2); + return this.client.sendOperationRequest(operationArguments, downloadOperationSpec); } /** - * returns all user-defined metadata and system properties for the specified container. The data - * returned does not include the container's list of blobs + * The Get Properties operation returns all user-defined metadata, standard HTTP properties, and system + * properties for the blob. It does not return the content of the blob. * @param options The options parameters. */ getProperties(options) { const operationArguments = { options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) }; - return this.client.sendOperationRequest(operationArguments, getPropertiesOperationSpec$1); + return this.client.sendOperationRequest(operationArguments, getPropertiesOperationSpec); } /** - * operation marks the specified container for deletion. The container and any blobs contained within - * it are later deleted during garbage collection + * If the storage account's soft delete feature is disabled then, when a blob is deleted, it is + * permanently removed from the storage account. If the storage account's soft delete feature is + * enabled, then, when a blob is deleted, it is marked for deletion and becomes inaccessible + * immediately. However, the blob service retains the blob or snapshot for the number of days specified + * by the DeleteRetentionPolicy section of [Storage service properties] + * (Set-Blob-Service-Properties.md). After the specified number of days has passed, the blob's data is + * permanently removed from the storage account. Note that you continue to be charged for the + * soft-deleted blob's storage until it is permanently removed. Use the List Blobs API and specify the + * "include=deleted" query parameter to discover which blobs and snapshots have been soft deleted. You + * can then use the Undelete Blob API to restore a soft-deleted blob. All other operations on a + * soft-deleted blob or snapshot causes the service to return an HTTP status code of 404 + * (ResourceNotFound). * @param options The options parameters. */ delete(options) { const operationArguments = { options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) }; - return this.client.sendOperationRequest(operationArguments, deleteOperationSpec$1); + return this.client.sendOperationRequest(operationArguments, deleteOperationSpec); } /** - * operation sets one or more user-defined name-value pairs for the specified container. + * Undelete a blob that was previously soft deleted * @param options The options parameters. */ - setMetadata(options) { + undelete(options) { const operationArguments = { options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) }; - return this.client.sendOperationRequest(operationArguments, setMetadataOperationSpec$1); + return this.client.sendOperationRequest(operationArguments, undeleteOperationSpec); } /** - * gets the permissions for the specified container. The permissions indicate whether container data - * may be accessed publicly. + * Sets the time a blob will expire and be deleted. + * @param expiryOptions Required. Indicates mode of the expiry time * @param options The options parameters. */ - getAccessPolicy(options) { + setExpiry(expiryOptions, options) { const operationArguments = { + expiryOptions, options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) }; - return this.client.sendOperationRequest(operationArguments, getAccessPolicyOperationSpec); + return this.client.sendOperationRequest(operationArguments, setExpiryOperationSpec); } /** - * sets the permissions for the specified container. The permissions indicate whether blobs in a - * container may be accessed publicly. + * The Set HTTP Headers operation sets system properties on the blob * @param options The options parameters. */ - setAccessPolicy(options) { + setHttpHeaders(options) { const operationArguments = { options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) }; - return this.client.sendOperationRequest(operationArguments, setAccessPolicyOperationSpec); + return this.client.sendOperationRequest(operationArguments, setHttpHeadersOperationSpec); } /** - * Restores a previously-deleted container. + * The Set Immutability Policy operation sets the immutability policy on the blob * @param options The options parameters. */ - restore(options) { + setImmutabilityPolicy(options) { const operationArguments = { options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) }; - return this.client.sendOperationRequest(operationArguments, restoreOperationSpec); + return this.client.sendOperationRequest(operationArguments, setImmutabilityPolicyOperationSpec); } /** - * Renames an existing container. - * @param sourceContainerName Required. Specifies the name of the container to rename. + * The Delete Immutability Policy operation deletes the immutability policy on the blob * @param options The options parameters. */ - rename(sourceContainerName, options) { + deleteImmutabilityPolicy(options) { const operationArguments = { - sourceContainerName, options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) }; - return this.client.sendOperationRequest(operationArguments, renameOperationSpec); + return this.client.sendOperationRequest(operationArguments, deleteImmutabilityPolicyOperationSpec); } /** - * The Batch operation allows multiple API calls to be embedded into a single HTTP request. - * @param contentLength The length of the request. - * @param multipartContentType Required. The value of this header must be multipart/mixed with a batch - * boundary. Example header value: multipart/mixed; boundary=batch_ - * @param body Initial data + * The Set Legal Hold operation sets a legal hold on the blob. + * @param legalHold Specified if a legal hold should be set on the blob. * @param options The options parameters. */ - submitBatch(contentLength, multipartContentType, body, options) { + setLegalHold(legalHold, options) { const operationArguments = { - contentLength, - multipartContentType, - body, + legalHold, options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) }; - return this.client.sendOperationRequest(operationArguments, submitBatchOperationSpec); + return this.client.sendOperationRequest(operationArguments, setLegalHoldOperationSpec); } /** - * The Filter Blobs operation enables callers to list blobs in a container whose tags match a given - * search expression. Filter blobs searches within the given container. + * The Set Blob Metadata operation sets user-defined metadata for the specified blob as one or more + * name-value pairs * @param options The options parameters. */ - filterBlobs(options) { + setMetadata(options) { const operationArguments = { options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) }; - return this.client.sendOperationRequest(operationArguments, filterBlobsOperationSpec); + return this.client.sendOperationRequest(operationArguments, setMetadataOperationSpec); } /** - * [Update] establishes and manages a lock on a container for delete operations. The lock duration can - * be 15 to 60 seconds, or can be infinite + * [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete + * operations * @param options The options parameters. */ acquireLease(options) { const operationArguments = { options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) }; - return this.client.sendOperationRequest(operationArguments, acquireLeaseOperationSpec$1); + return this.client.sendOperationRequest(operationArguments, acquireLeaseOperationSpec); } /** - * [Update] establishes and manages a lock on a container for delete operations. The lock duration can - * be 15 to 60 seconds, or can be infinite + * [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete + * operations * @param leaseId Specifies the current lease ID on the resource. * @param options The options parameters. */ @@ -32686,11 +30975,11 @@ class Container { leaseId, options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) }; - return this.client.sendOperationRequest(operationArguments, releaseLeaseOperationSpec$1); + return this.client.sendOperationRequest(operationArguments, releaseLeaseOperationSpec); } /** - * [Update] establishes and manages a lock on a container for delete operations. The lock duration can - * be 15 to 60 seconds, or can be infinite + * [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete + * operations * @param leaseId Specifies the current lease ID on the resource. * @param options The options parameters. */ @@ -32699,60 +30988,106 @@ class Container { leaseId, options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) }; - return this.client.sendOperationRequest(operationArguments, renewLeaseOperationSpec$1); + return this.client.sendOperationRequest(operationArguments, renewLeaseOperationSpec); } /** - * [Update] establishes and manages a lock on a container for delete operations. The lock duration can - * be 15 to 60 seconds, or can be infinite + * [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete + * operations + * @param leaseId Specifies the current lease ID on the resource. + * @param proposedLeaseId Proposed lease ID, in a GUID string format. The Blob service returns 400 + * (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor + * (String) for a list of valid GUID string formats. + * @param options The options parameters. + */ + changeLease(leaseId, proposedLeaseId, options) { + const operationArguments = { + leaseId, + proposedLeaseId, + options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) + }; + return this.client.sendOperationRequest(operationArguments, changeLeaseOperationSpec); + } + /** + * [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete + * operations * @param options The options parameters. */ breakLease(options) { const operationArguments = { options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) }; - return this.client.sendOperationRequest(operationArguments, breakLeaseOperationSpec$1); + return this.client.sendOperationRequest(operationArguments, breakLeaseOperationSpec); } /** - * [Update] establishes and manages a lock on a container for delete operations. The lock duration can - * be 15 to 60 seconds, or can be infinite - * @param leaseId Specifies the current lease ID on the resource. - * @param proposedLeaseId Proposed lease ID, in a GUID string format. The Blob service returns 400 - * (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor - * (String) for a list of valid GUID string formats. + * The Create Snapshot operation creates a read-only snapshot of a blob * @param options The options parameters. */ - changeLease(leaseId, proposedLeaseId, options) { + createSnapshot(options) { const operationArguments = { - leaseId, - proposedLeaseId, options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) }; - return this.client.sendOperationRequest(operationArguments, changeLeaseOperationSpec$1); + return this.client.sendOperationRequest(operationArguments, createSnapshotOperationSpec); } /** - * [Update] The List Blobs operation returns a list of the blobs under the specified container + * The Start Copy From URL operation copies a blob or an internet resource to a new blob. + * @param copySource Specifies the name of the source page blob snapshot. This value is a URL of up to + * 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it would + * appear in a request URI. The source blob must either be public or must be authenticated via a shared + * access signature. * @param options The options parameters. */ - listBlobFlatSegment(options) { + startCopyFromURL(copySource, options) { const operationArguments = { + copySource, options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) }; - return this.client.sendOperationRequest(operationArguments, listBlobFlatSegmentOperationSpec); + return this.client.sendOperationRequest(operationArguments, startCopyFromURLOperationSpec); } /** - * [Update] The List Blobs operation returns a list of the blobs under the specified container - * @param delimiter When the request includes this parameter, the operation returns a BlobPrefix - * element in the response body that acts as a placeholder for all blobs whose names begin with the - * same substring up to the appearance of the delimiter character. The delimiter may be a single - * character or a string. + * The Copy From URL operation copies a blob or an internet resource to a new blob. It will not return + * a response until the copy is complete. + * @param copySource Specifies the name of the source page blob snapshot. This value is a URL of up to + * 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it would + * appear in a request URI. The source blob must either be public or must be authenticated via a shared + * access signature. * @param options The options parameters. */ - listBlobHierarchySegment(delimiter, options) { + copyFromURL(copySource, options) { const operationArguments = { - delimiter, + copySource, options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) }; - return this.client.sendOperationRequest(operationArguments, listBlobHierarchySegmentOperationSpec); + return this.client.sendOperationRequest(operationArguments, copyFromURLOperationSpec); + } + /** + * The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a destination + * blob with zero length and full metadata. + * @param copyId The copy identifier provided in the x-ms-copy-id header of the original Copy Blob + * operation. + * @param options The options parameters. + */ + abortCopyFromURL(copyId, options) { + const operationArguments = { + copyId, + options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) + }; + return this.client.sendOperationRequest(operationArguments, abortCopyFromURLOperationSpec); + } + /** + * The Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a premium + * storage account and on a block blob in a blob storage account (locally redundant storage only). A + * premium page blob's tier determines the allowed size, IOPS, and bandwidth of the blob. A block + * blob's tier determines Hot/Cool/Archive storage type. This operation does not update the blob's + * ETag. + * @param tier Indicates the tier to be set on the blob. + * @param options The options parameters. + */ + setTier(tier, options) { + const operationArguments = { + tier, + options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) + }; + return this.client.sendOperationRequest(operationArguments, setTierOperationSpec); } /** * Returns the sku name and account kind @@ -32762,73 +31097,219 @@ class Container { const operationArguments = { options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) }; - return this.client.sendOperationRequest(operationArguments, getAccountInfoOperationSpec$1); + return this.client.sendOperationRequest(operationArguments, getAccountInfoOperationSpec); + } + /** + * The Query operation enables users to select/project on blob data by providing simple query + * expressions. + * @param options The options parameters. + */ + query(options) { + const operationArguments = { + options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) + }; + return this.client.sendOperationRequest(operationArguments, queryOperationSpec); + } + /** + * The Get Tags operation enables users to get the tags associated with a blob. + * @param options The options parameters. + */ + getTags(options) { + const operationArguments = { + options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) + }; + return this.client.sendOperationRequest(operationArguments, getTagsOperationSpec); + } + /** + * The Set Tags operation enables users to set tags on a blob. + * @param options The options parameters. + */ + setTags(options) { + const operationArguments = { + options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) + }; + return this.client.sendOperationRequest(operationArguments, setTagsOperationSpec); } } // Operation Specifications -const xmlSerializer$4 = new coreHttp__namespace.Serializer(Mappers, /* isXml */ true); -const createOperationSpec$2 = { - path: "/{containerName}", +const xmlSerializer$3 = new coreHttp__namespace.Serializer(Mappers, /* isXml */ true); +const downloadOperationSpec = { + path: "/{containerName}/{blob}", + httpMethod: "GET", + responses: { + 200: { + bodyMapper: { + type: { name: "Stream" }, + serializedName: "parsedResponse" + }, + headersMapper: BlobDownloadHeaders + }, + 206: { + bodyMapper: { + type: { name: "Stream" }, + serializedName: "parsedResponse" + }, + headersMapper: BlobDownloadHeaders + }, + default: { + bodyMapper: StorageError, + headersMapper: BlobDownloadExceptionHeaders + } + }, + queryParameters: [ + timeoutInSeconds, + snapshot, + versionId + ], + urlParameters: [url], + headerParameters: [ + version, + requestId, + accept1, + leaseId, + ifModifiedSince, + ifUnmodifiedSince, + range, + rangeGetContentMD5, + rangeGetContentCRC64, + encryptionKey, + encryptionKeySha256, + encryptionAlgorithm, + ifMatch, + ifNoneMatch, + ifTags + ], + isXML: true, + serializer: xmlSerializer$3 +}; +const getPropertiesOperationSpec = { + path: "/{containerName}/{blob}", + httpMethod: "HEAD", + responses: { + 200: { + headersMapper: BlobGetPropertiesHeaders + }, + default: { + bodyMapper: StorageError, + headersMapper: BlobGetPropertiesExceptionHeaders + } + }, + queryParameters: [ + timeoutInSeconds, + snapshot, + versionId + ], + urlParameters: [url], + headerParameters: [ + version, + requestId, + accept1, + leaseId, + ifModifiedSince, + ifUnmodifiedSince, + encryptionKey, + encryptionKeySha256, + encryptionAlgorithm, + ifMatch, + ifNoneMatch, + ifTags + ], + isXML: true, + serializer: xmlSerializer$3 +}; +const deleteOperationSpec = { + path: "/{containerName}/{blob}", + httpMethod: "DELETE", + responses: { + 202: { + headersMapper: BlobDeleteHeaders + }, + default: { + bodyMapper: StorageError, + headersMapper: BlobDeleteExceptionHeaders + } + }, + queryParameters: [ + timeoutInSeconds, + snapshot, + versionId, + blobDeleteType + ], + urlParameters: [url], + headerParameters: [ + version, + requestId, + accept1, + leaseId, + ifModifiedSince, + ifUnmodifiedSince, + ifMatch, + ifNoneMatch, + ifTags, + deleteSnapshots + ], + isXML: true, + serializer: xmlSerializer$3 +}; +const undeleteOperationSpec = { + path: "/{containerName}/{blob}", httpMethod: "PUT", responses: { - 201: { - headersMapper: ContainerCreateHeaders + 200: { + headersMapper: BlobUndeleteHeaders }, default: { bodyMapper: StorageError, - headersMapper: ContainerCreateExceptionHeaders + headersMapper: BlobUndeleteExceptionHeaders } }, - queryParameters: [timeoutInSeconds, restype2], + queryParameters: [timeoutInSeconds, comp8], urlParameters: [url], headerParameters: [ version, requestId, - accept1, - metadata, - access, - defaultEncryptionScope, - preventEncryptionScopeOverride + accept1 ], isXML: true, - serializer: xmlSerializer$4 + serializer: xmlSerializer$3 }; -const getPropertiesOperationSpec$1 = { - path: "/{containerName}", - httpMethod: "GET", +const setExpiryOperationSpec = { + path: "/{containerName}/{blob}", + httpMethod: "PUT", responses: { 200: { - headersMapper: ContainerGetPropertiesHeaders + headersMapper: BlobSetExpiryHeaders }, default: { bodyMapper: StorageError, - headersMapper: ContainerGetPropertiesExceptionHeaders + headersMapper: BlobSetExpiryExceptionHeaders } }, - queryParameters: [timeoutInSeconds, restype2], + queryParameters: [timeoutInSeconds, comp11], urlParameters: [url], headerParameters: [ version, requestId, accept1, - leaseId + expiryOptions, + expiresOn ], isXML: true, - serializer: xmlSerializer$4 + serializer: xmlSerializer$3 }; -const deleteOperationSpec$1 = { - path: "/{containerName}", - httpMethod: "DELETE", +const setHttpHeadersOperationSpec = { + path: "/{containerName}/{blob}", + httpMethod: "PUT", responses: { - 202: { - headersMapper: ContainerDeleteHeaders + 200: { + headersMapper: BlobSetHttpHeadersHeaders }, default: { bodyMapper: StorageError, - headersMapper: ContainerDeleteExceptionHeaders + headersMapper: BlobSetHttpHeadersExceptionHeaders } }, - queryParameters: [timeoutInSeconds, restype2], + queryParameters: [comp, timeoutInSeconds], urlParameters: [url], headerParameters: [ version, @@ -32836,467 +31317,582 @@ const deleteOperationSpec$1 = { accept1, leaseId, ifModifiedSince, - ifUnmodifiedSince + ifUnmodifiedSince, + ifMatch, + ifNoneMatch, + ifTags, + blobCacheControl, + blobContentType, + blobContentMD5, + blobContentEncoding, + blobContentLanguage, + blobContentDisposition ], isXML: true, - serializer: xmlSerializer$4 + serializer: xmlSerializer$3 }; -const setMetadataOperationSpec$1 = { - path: "/{containerName}", +const setImmutabilityPolicyOperationSpec = { + path: "/{containerName}/{blob}", httpMethod: "PUT", responses: { 200: { - headersMapper: ContainerSetMetadataHeaders + headersMapper: BlobSetImmutabilityPolicyHeaders }, default: { bodyMapper: StorageError, - headersMapper: ContainerSetMetadataExceptionHeaders + headersMapper: BlobSetImmutabilityPolicyExceptionHeaders } }, - queryParameters: [ - timeoutInSeconds, - restype2, - comp6 - ], + queryParameters: [timeoutInSeconds, comp12], urlParameters: [url], headerParameters: [ version, requestId, accept1, - metadata, - leaseId, - ifModifiedSince + ifUnmodifiedSince, + immutabilityPolicyExpiry, + immutabilityPolicyMode ], isXML: true, - serializer: xmlSerializer$4 + serializer: xmlSerializer$3 }; -const getAccessPolicyOperationSpec = { - path: "/{containerName}", - httpMethod: "GET", +const deleteImmutabilityPolicyOperationSpec = { + path: "/{containerName}/{blob}", + httpMethod: "DELETE", responses: { 200: { - bodyMapper: { - type: { - name: "Sequence", - element: { - type: { name: "Composite", className: "SignedIdentifier" } - } - }, - serializedName: "SignedIdentifiers", - xmlName: "SignedIdentifiers", - xmlIsWrapped: true, - xmlElementName: "SignedIdentifier" - }, - headersMapper: ContainerGetAccessPolicyHeaders + headersMapper: BlobDeleteImmutabilityPolicyHeaders }, default: { bodyMapper: StorageError, - headersMapper: ContainerGetAccessPolicyExceptionHeaders + headersMapper: BlobDeleteImmutabilityPolicyExceptionHeaders } }, - queryParameters: [ - timeoutInSeconds, - restype2, - comp7 + queryParameters: [timeoutInSeconds, comp12], + urlParameters: [url], + headerParameters: [ + version, + requestId, + accept1 ], + isXML: true, + serializer: xmlSerializer$3 +}; +const setLegalHoldOperationSpec = { + path: "/{containerName}/{blob}", + httpMethod: "PUT", + responses: { + 200: { + headersMapper: BlobSetLegalHoldHeaders + }, + default: { + bodyMapper: StorageError, + headersMapper: BlobSetLegalHoldExceptionHeaders + } + }, + queryParameters: [timeoutInSeconds, comp13], urlParameters: [url], headerParameters: [ version, requestId, accept1, - leaseId + legalHold ], isXML: true, - serializer: xmlSerializer$4 + serializer: xmlSerializer$3 }; -const setAccessPolicyOperationSpec = { - path: "/{containerName}", +const setMetadataOperationSpec = { + path: "/{containerName}/{blob}", httpMethod: "PUT", responses: { 200: { - headersMapper: ContainerSetAccessPolicyHeaders + headersMapper: BlobSetMetadataHeaders }, default: { bodyMapper: StorageError, - headersMapper: ContainerSetAccessPolicyExceptionHeaders + headersMapper: BlobSetMetadataExceptionHeaders } }, - requestBody: containerAcl, - queryParameters: [ - timeoutInSeconds, - restype2, - comp7 - ], + queryParameters: [timeoutInSeconds, comp6], urlParameters: [url], headerParameters: [ - contentType, - accept, version, requestId, - access, + accept1, + metadata, leaseId, ifModifiedSince, - ifUnmodifiedSince + ifUnmodifiedSince, + encryptionKey, + encryptionKeySha256, + encryptionAlgorithm, + ifMatch, + ifNoneMatch, + ifTags, + encryptionScope ], isXML: true, - contentType: "application/xml; charset=utf-8", - mediaType: "xml", - serializer: xmlSerializer$4 + serializer: xmlSerializer$3 }; -const restoreOperationSpec = { - path: "/{containerName}", +const acquireLeaseOperationSpec = { + path: "/{containerName}/{blob}", httpMethod: "PUT", responses: { 201: { - headersMapper: ContainerRestoreHeaders + headersMapper: BlobAcquireLeaseHeaders }, default: { bodyMapper: StorageError, - headersMapper: ContainerRestoreExceptionHeaders + headersMapper: BlobAcquireLeaseExceptionHeaders } }, - queryParameters: [ - timeoutInSeconds, - restype2, - comp8 - ], + queryParameters: [timeoutInSeconds, comp10], urlParameters: [url], headerParameters: [ version, requestId, accept1, - deletedContainerName, - deletedContainerVersion + ifModifiedSince, + ifUnmodifiedSince, + action, + duration, + proposedLeaseId, + ifMatch, + ifNoneMatch, + ifTags ], isXML: true, - serializer: xmlSerializer$4 + serializer: xmlSerializer$3 }; -const renameOperationSpec = { - path: "/{containerName}", +const releaseLeaseOperationSpec = { + path: "/{containerName}/{blob}", httpMethod: "PUT", responses: { 200: { - headersMapper: ContainerRenameHeaders + headersMapper: BlobReleaseLeaseHeaders }, default: { bodyMapper: StorageError, - headersMapper: ContainerRenameExceptionHeaders + headersMapper: BlobReleaseLeaseExceptionHeaders } }, - queryParameters: [ - timeoutInSeconds, - restype2, - comp9 - ], + queryParameters: [timeoutInSeconds, comp10], urlParameters: [url], headerParameters: [ version, requestId, accept1, - sourceContainerName, - sourceLeaseId + ifModifiedSince, + ifUnmodifiedSince, + action1, + leaseId1, + ifMatch, + ifNoneMatch, + ifTags ], isXML: true, - serializer: xmlSerializer$4 + serializer: xmlSerializer$3 }; -const submitBatchOperationSpec = { - path: "/{containerName}", - httpMethod: "POST", +const renewLeaseOperationSpec = { + path: "/{containerName}/{blob}", + httpMethod: "PUT", responses: { - 202: { - bodyMapper: { - type: { name: "Stream" }, - serializedName: "parsedResponse" - }, - headersMapper: ContainerSubmitBatchHeaders + 200: { + headersMapper: BlobRenewLeaseHeaders }, default: { bodyMapper: StorageError, - headersMapper: ContainerSubmitBatchExceptionHeaders + headersMapper: BlobRenewLeaseExceptionHeaders } }, - requestBody: body, - queryParameters: [ - timeoutInSeconds, - comp4, - restype2 - ], + queryParameters: [timeoutInSeconds, comp10], urlParameters: [url], headerParameters: [ - contentType, - accept, version, requestId, - contentLength, - multipartContentType + accept1, + ifModifiedSince, + ifUnmodifiedSince, + leaseId1, + action2, + ifMatch, + ifNoneMatch, + ifTags ], isXML: true, - contentType: "application/xml; charset=utf-8", - mediaType: "xml", - serializer: xmlSerializer$4 + serializer: xmlSerializer$3 }; -const filterBlobsOperationSpec = { - path: "/{containerName}", - httpMethod: "GET", +const changeLeaseOperationSpec = { + path: "/{containerName}/{blob}", + httpMethod: "PUT", responses: { 200: { - bodyMapper: FilterBlobSegment, - headersMapper: ContainerFilterBlobsHeaders + headersMapper: BlobChangeLeaseHeaders }, default: { bodyMapper: StorageError, - headersMapper: ContainerFilterBlobsExceptionHeaders + headersMapper: BlobChangeLeaseExceptionHeaders } }, - queryParameters: [ - timeoutInSeconds, - marker, - maxPageSize, - comp5, - where, - restype2 + queryParameters: [timeoutInSeconds, comp10], + urlParameters: [url], + headerParameters: [ + version, + requestId, + accept1, + ifModifiedSince, + ifUnmodifiedSince, + leaseId1, + action4, + proposedLeaseId1, + ifMatch, + ifNoneMatch, + ifTags ], + isXML: true, + serializer: xmlSerializer$3 +}; +const breakLeaseOperationSpec = { + path: "/{containerName}/{blob}", + httpMethod: "PUT", + responses: { + 202: { + headersMapper: BlobBreakLeaseHeaders + }, + default: { + bodyMapper: StorageError, + headersMapper: BlobBreakLeaseExceptionHeaders + } + }, + queryParameters: [timeoutInSeconds, comp10], urlParameters: [url], headerParameters: [ version, requestId, - accept1 + accept1, + ifModifiedSince, + ifUnmodifiedSince, + action3, + breakPeriod, + ifMatch, + ifNoneMatch, + ifTags ], isXML: true, - serializer: xmlSerializer$4 + serializer: xmlSerializer$3 }; -const acquireLeaseOperationSpec$1 = { - path: "/{containerName}", +const createSnapshotOperationSpec = { + path: "/{containerName}/{blob}", httpMethod: "PUT", responses: { 201: { - headersMapper: ContainerAcquireLeaseHeaders + headersMapper: BlobCreateSnapshotHeaders }, default: { bodyMapper: StorageError, - headersMapper: ContainerAcquireLeaseExceptionHeaders + headersMapper: BlobCreateSnapshotExceptionHeaders } }, - queryParameters: [ - timeoutInSeconds, - restype2, - comp10 - ], + queryParameters: [timeoutInSeconds, comp14], urlParameters: [url], headerParameters: [ version, requestId, accept1, + metadata, + leaseId, ifModifiedSince, ifUnmodifiedSince, - action, - duration, - proposedLeaseId + encryptionKey, + encryptionKeySha256, + encryptionAlgorithm, + ifMatch, + ifNoneMatch, + ifTags, + encryptionScope ], isXML: true, - serializer: xmlSerializer$4 + serializer: xmlSerializer$3 }; -const releaseLeaseOperationSpec$1 = { - path: "/{containerName}", +const startCopyFromURLOperationSpec = { + path: "/{containerName}/{blob}", httpMethod: "PUT", responses: { - 200: { - headersMapper: ContainerReleaseLeaseHeaders + 202: { + headersMapper: BlobStartCopyFromURLHeaders }, default: { bodyMapper: StorageError, - headersMapper: ContainerReleaseLeaseExceptionHeaders + headersMapper: BlobStartCopyFromURLExceptionHeaders } }, - queryParameters: [ - timeoutInSeconds, - restype2, - comp10 + queryParameters: [timeoutInSeconds], + urlParameters: [url], + headerParameters: [ + version, + requestId, + accept1, + metadata, + leaseId, + ifModifiedSince, + ifUnmodifiedSince, + ifMatch, + ifNoneMatch, + ifTags, + immutabilityPolicyExpiry, + immutabilityPolicyMode, + tier, + rehydratePriority, + sourceIfModifiedSince, + sourceIfUnmodifiedSince, + sourceIfMatch, + sourceIfNoneMatch, + sourceIfTags, + copySource, + blobTagsString, + sealBlob, + legalHold1 ], + isXML: true, + serializer: xmlSerializer$3 +}; +const copyFromURLOperationSpec = { + path: "/{containerName}/{blob}", + httpMethod: "PUT", + responses: { + 202: { + headersMapper: BlobCopyFromURLHeaders + }, + default: { + bodyMapper: StorageError, + headersMapper: BlobCopyFromURLExceptionHeaders + } + }, + queryParameters: [timeoutInSeconds], urlParameters: [url], headerParameters: [ version, requestId, accept1, + metadata, + leaseId, ifModifiedSince, ifUnmodifiedSince, - action1, - leaseId1 + ifMatch, + ifNoneMatch, + ifTags, + immutabilityPolicyExpiry, + immutabilityPolicyMode, + encryptionScope, + tier, + sourceIfModifiedSince, + sourceIfUnmodifiedSince, + sourceIfMatch, + sourceIfNoneMatch, + copySource, + blobTagsString, + legalHold1, + xMsRequiresSync, + sourceContentMD5, + copySourceAuthorization, + copySourceTags ], isXML: true, - serializer: xmlSerializer$4 + serializer: xmlSerializer$3 }; -const renewLeaseOperationSpec$1 = { - path: "/{containerName}", +const abortCopyFromURLOperationSpec = { + path: "/{containerName}/{blob}", httpMethod: "PUT", responses: { - 200: { - headersMapper: ContainerRenewLeaseHeaders + 204: { + headersMapper: BlobAbortCopyFromURLHeaders }, default: { bodyMapper: StorageError, - headersMapper: ContainerRenewLeaseExceptionHeaders + headersMapper: BlobAbortCopyFromURLExceptionHeaders } }, queryParameters: [ timeoutInSeconds, - restype2, - comp10 + comp15, + copyId ], urlParameters: [url], headerParameters: [ version, requestId, accept1, - ifModifiedSince, - ifUnmodifiedSince, - leaseId1, - action2 + leaseId, + copyActionAbortConstant ], isXML: true, - serializer: xmlSerializer$4 + serializer: xmlSerializer$3 }; -const breakLeaseOperationSpec$1 = { - path: "/{containerName}", +const setTierOperationSpec = { + path: "/{containerName}/{blob}", httpMethod: "PUT", responses: { + 200: { + headersMapper: BlobSetTierHeaders + }, 202: { - headersMapper: ContainerBreakLeaseHeaders + headersMapper: BlobSetTierHeaders }, default: { bodyMapper: StorageError, - headersMapper: ContainerBreakLeaseExceptionHeaders + headersMapper: BlobSetTierExceptionHeaders } }, queryParameters: [ timeoutInSeconds, - restype2, - comp10 + snapshot, + versionId, + comp16 ], urlParameters: [url], headerParameters: [ version, requestId, accept1, - ifModifiedSince, - ifUnmodifiedSince, - action3, - breakPeriod + leaseId, + ifTags, + rehydratePriority, + tier1 ], isXML: true, - serializer: xmlSerializer$4 + serializer: xmlSerializer$3 }; -const changeLeaseOperationSpec$1 = { - path: "/{containerName}", - httpMethod: "PUT", +const getAccountInfoOperationSpec = { + path: "/{containerName}/{blob}", + httpMethod: "GET", responses: { 200: { - headersMapper: ContainerChangeLeaseHeaders + headersMapper: BlobGetAccountInfoHeaders + }, + default: { + bodyMapper: StorageError, + headersMapper: BlobGetAccountInfoExceptionHeaders + } + }, + queryParameters: [comp, restype1], + urlParameters: [url], + headerParameters: [version, accept1], + isXML: true, + serializer: xmlSerializer$3 +}; +const queryOperationSpec = { + path: "/{containerName}/{blob}", + httpMethod: "POST", + responses: { + 200: { + bodyMapper: { + type: { name: "Stream" }, + serializedName: "parsedResponse" + }, + headersMapper: BlobQueryHeaders + }, + 206: { + bodyMapper: { + type: { name: "Stream" }, + serializedName: "parsedResponse" + }, + headersMapper: BlobQueryHeaders }, default: { bodyMapper: StorageError, - headersMapper: ContainerChangeLeaseExceptionHeaders + headersMapper: BlobQueryExceptionHeaders } }, + requestBody: queryRequest, queryParameters: [ timeoutInSeconds, - restype2, - comp10 + snapshot, + comp17 ], urlParameters: [url], headerParameters: [ + contentType, + accept, version, requestId, - accept1, + leaseId, ifModifiedSince, ifUnmodifiedSince, - leaseId1, - action4, - proposedLeaseId1 + encryptionKey, + encryptionKeySha256, + encryptionAlgorithm, + ifMatch, + ifNoneMatch, + ifTags ], isXML: true, - serializer: xmlSerializer$4 + contentType: "application/xml; charset=utf-8", + mediaType: "xml", + serializer: xmlSerializer$3 }; -const listBlobFlatSegmentOperationSpec = { - path: "/{containerName}", +const getTagsOperationSpec = { + path: "/{containerName}/{blob}", httpMethod: "GET", responses: { 200: { - bodyMapper: ListBlobsFlatSegmentResponse, - headersMapper: ContainerListBlobFlatSegmentHeaders + bodyMapper: BlobTags, + headersMapper: BlobGetTagsHeaders }, default: { bodyMapper: StorageError, - headersMapper: ContainerListBlobFlatSegmentExceptionHeaders + headersMapper: BlobGetTagsExceptionHeaders } }, queryParameters: [ timeoutInSeconds, - comp2, - prefix, - marker, - maxPageSize, - restype2, - include1 + snapshot, + versionId, + comp18 ], urlParameters: [url], headerParameters: [ version, requestId, - accept1 + accept1, + leaseId, + ifTags ], isXML: true, - serializer: xmlSerializer$4 + serializer: xmlSerializer$3 }; -const listBlobHierarchySegmentOperationSpec = { - path: "/{containerName}", - httpMethod: "GET", +const setTagsOperationSpec = { + path: "/{containerName}/{blob}", + httpMethod: "PUT", responses: { - 200: { - bodyMapper: ListBlobsHierarchySegmentResponse, - headersMapper: ContainerListBlobHierarchySegmentHeaders + 204: { + headersMapper: BlobSetTagsHeaders }, default: { bodyMapper: StorageError, - headersMapper: ContainerListBlobHierarchySegmentExceptionHeaders + headersMapper: BlobSetTagsExceptionHeaders } }, + requestBody: tags, queryParameters: [ timeoutInSeconds, - comp2, - prefix, - marker, - maxPageSize, - restype2, - include1, - delimiter + versionId, + comp18 ], urlParameters: [url], headerParameters: [ + contentType, + accept, version, requestId, - accept1 + leaseId, + ifTags, + transactionalContentMD5, + transactionalContentCrc64 ], isXML: true, - serializer: xmlSerializer$4 -}; -const getAccountInfoOperationSpec$1 = { - path: "/{containerName}", - httpMethod: "GET", - responses: { - 200: { - headersMapper: ContainerGetAccountInfoHeaders - }, - default: { - bodyMapper: StorageError, - headersMapper: ContainerGetAccountInfoExceptionHeaders - } - }, - queryParameters: [comp, restype1], - urlParameters: [url], - headerParameters: [version, accept1], - isXML: true, - serializer: xmlSerializer$4 + contentType: "application/xml; charset=utf-8", + mediaType: "xml", + serializer: xmlSerializer$3 }; /* @@ -33306,487 +31902,408 @@ const getAccountInfoOperationSpec$1 = { * Code generated by Microsoft (R) AutoRest Code Generator. * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ -/** Class representing a Blob. */ -class Blob$1 { +/** Class representing a PageBlob. */ +class PageBlob { /** - * Initialize a new instance of the class Blob class. + * Initialize a new instance of the class PageBlob class. * @param client Reference to the service client */ constructor(client) { this.client = client; } /** - * The Download operation reads or downloads a blob from the system, including its metadata and - * properties. You can also call Download to read a snapshot. - * @param options The options parameters. - */ - download(options) { - const operationArguments = { - options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) - }; - return this.client.sendOperationRequest(operationArguments, downloadOperationSpec); - } - /** - * The Get Properties operation returns all user-defined metadata, standard HTTP properties, and system - * properties for the blob. It does not return the content of the blob. - * @param options The options parameters. - */ - getProperties(options) { - const operationArguments = { - options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) - }; - return this.client.sendOperationRequest(operationArguments, getPropertiesOperationSpec); - } - /** - * If the storage account's soft delete feature is disabled then, when a blob is deleted, it is - * permanently removed from the storage account. If the storage account's soft delete feature is - * enabled, then, when a blob is deleted, it is marked for deletion and becomes inaccessible - * immediately. However, the blob service retains the blob or snapshot for the number of days specified - * by the DeleteRetentionPolicy section of [Storage service properties] - * (Set-Blob-Service-Properties.md). After the specified number of days has passed, the blob's data is - * permanently removed from the storage account. Note that you continue to be charged for the - * soft-deleted blob's storage until it is permanently removed. Use the List Blobs API and specify the - * "include=deleted" query parameter to discover which blobs and snapshots have been soft deleted. You - * can then use the Undelete Blob API to restore a soft-deleted blob. All other operations on a - * soft-deleted blob or snapshot causes the service to return an HTTP status code of 404 - * (ResourceNotFound). - * @param options The options parameters. - */ - delete(options) { - const operationArguments = { - options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) - }; - return this.client.sendOperationRequest(operationArguments, deleteOperationSpec); - } - /** - * Undelete a blob that was previously soft deleted - * @param options The options parameters. - */ - undelete(options) { - const operationArguments = { - options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) - }; - return this.client.sendOperationRequest(operationArguments, undeleteOperationSpec); - } - /** - * Sets the time a blob will expire and be deleted. - * @param expiryOptions Required. Indicates mode of the expiry time - * @param options The options parameters. - */ - setExpiry(expiryOptions, options) { - const operationArguments = { - expiryOptions, - options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) - }; - return this.client.sendOperationRequest(operationArguments, setExpiryOperationSpec); - } - /** - * The Set HTTP Headers operation sets system properties on the blob - * @param options The options parameters. - */ - setHttpHeaders(options) { - const operationArguments = { - options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) - }; - return this.client.sendOperationRequest(operationArguments, setHttpHeadersOperationSpec); - } - /** - * The Set Immutability Policy operation sets the immutability policy on the blob - * @param options The options parameters. - */ - setImmutabilityPolicy(options) { - const operationArguments = { - options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) - }; - return this.client.sendOperationRequest(operationArguments, setImmutabilityPolicyOperationSpec); - } - /** - * The Delete Immutability Policy operation deletes the immutability policy on the blob - * @param options The options parameters. - */ - deleteImmutabilityPolicy(options) { - const operationArguments = { - options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) - }; - return this.client.sendOperationRequest(operationArguments, deleteImmutabilityPolicyOperationSpec); - } - /** - * The Set Legal Hold operation sets a legal hold on the blob. - * @param legalHold Specified if a legal hold should be set on the blob. - * @param options The options parameters. - */ - setLegalHold(legalHold, options) { - const operationArguments = { - legalHold, - options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) - }; - return this.client.sendOperationRequest(operationArguments, setLegalHoldOperationSpec); - } - /** - * The Set Blob Metadata operation sets user-defined metadata for the specified blob as one or more - * name-value pairs + * The Create operation creates a new page blob. + * @param contentLength The length of the request. + * @param blobContentLength This header specifies the maximum size for the page blob, up to 1 TB. The + * page blob size must be aligned to a 512-byte boundary. * @param options The options parameters. */ - setMetadata(options) { + create(contentLength, blobContentLength, options) { const operationArguments = { + contentLength, + blobContentLength, options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) }; - return this.client.sendOperationRequest(operationArguments, setMetadataOperationSpec); + return this.client.sendOperationRequest(operationArguments, createOperationSpec$1); } /** - * [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - * operations + * The Upload Pages operation writes a range of pages to a page blob + * @param contentLength The length of the request. + * @param body Initial data * @param options The options parameters. */ - acquireLease(options) { + uploadPages(contentLength, body, options) { const operationArguments = { + contentLength, + body, options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) }; - return this.client.sendOperationRequest(operationArguments, acquireLeaseOperationSpec); + return this.client.sendOperationRequest(operationArguments, uploadPagesOperationSpec); } /** - * [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - * operations - * @param leaseId Specifies the current lease ID on the resource. + * The Clear Pages operation clears a set of pages from a page blob + * @param contentLength The length of the request. * @param options The options parameters. */ - releaseLease(leaseId, options) { + clearPages(contentLength, options) { const operationArguments = { - leaseId, + contentLength, options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) }; - return this.client.sendOperationRequest(operationArguments, releaseLeaseOperationSpec); + return this.client.sendOperationRequest(operationArguments, clearPagesOperationSpec); } /** - * [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - * operations - * @param leaseId Specifies the current lease ID on the resource. + * The Upload Pages operation writes a range of pages to a page blob where the contents are read from a + * URL + * @param sourceUrl Specify a URL to the copy source. + * @param sourceRange Bytes of source data in the specified range. The length of this range should + * match the ContentLength header and x-ms-range/Range destination range header. + * @param contentLength The length of the request. + * @param range The range of bytes to which the source range would be written. The range should be 512 + * aligned and range-end is required. * @param options The options parameters. */ - renewLease(leaseId, options) { + uploadPagesFromURL(sourceUrl, sourceRange, contentLength, range, options) { const operationArguments = { - leaseId, + sourceUrl, + sourceRange, + contentLength, + range, options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) }; - return this.client.sendOperationRequest(operationArguments, renewLeaseOperationSpec); + return this.client.sendOperationRequest(operationArguments, uploadPagesFromURLOperationSpec); } /** - * [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - * operations - * @param leaseId Specifies the current lease ID on the resource. - * @param proposedLeaseId Proposed lease ID, in a GUID string format. The Blob service returns 400 - * (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor - * (String) for a list of valid GUID string formats. + * The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot of a + * page blob * @param options The options parameters. */ - changeLease(leaseId, proposedLeaseId, options) { + getPageRanges(options) { const operationArguments = { - leaseId, - proposedLeaseId, options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) }; - return this.client.sendOperationRequest(operationArguments, changeLeaseOperationSpec); + return this.client.sendOperationRequest(operationArguments, getPageRangesOperationSpec); } /** - * [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - * operations + * The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that were + * changed between target blob and previous snapshot. * @param options The options parameters. */ - breakLease(options) { + getPageRangesDiff(options) { const operationArguments = { options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) }; - return this.client.sendOperationRequest(operationArguments, breakLeaseOperationSpec); + return this.client.sendOperationRequest(operationArguments, getPageRangesDiffOperationSpec); } /** - * The Create Snapshot operation creates a read-only snapshot of a blob + * Resize the Blob + * @param blobContentLength This header specifies the maximum size for the page blob, up to 1 TB. The + * page blob size must be aligned to a 512-byte boundary. * @param options The options parameters. */ - createSnapshot(options) { + resize(blobContentLength, options) { const operationArguments = { + blobContentLength, options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) }; - return this.client.sendOperationRequest(operationArguments, createSnapshotOperationSpec); + return this.client.sendOperationRequest(operationArguments, resizeOperationSpec); } /** - * The Start Copy From URL operation copies a blob or an internet resource to a new blob. - * @param copySource Specifies the name of the source page blob snapshot. This value is a URL of up to - * 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it would - * appear in a request URI. The source blob must either be public or must be authenticated via a shared - * access signature. + * Update the sequence number of the blob + * @param sequenceNumberAction Required if the x-ms-blob-sequence-number header is set for the request. + * This property applies to page blobs only. This property indicates how the service should modify the + * blob's sequence number * @param options The options parameters. */ - startCopyFromURL(copySource, options) { + updateSequenceNumber(sequenceNumberAction, options) { const operationArguments = { - copySource, + sequenceNumberAction, options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) }; - return this.client.sendOperationRequest(operationArguments, startCopyFromURLOperationSpec); + return this.client.sendOperationRequest(operationArguments, updateSequenceNumberOperationSpec); } /** - * The Copy From URL operation copies a blob or an internet resource to a new blob. It will not return - * a response until the copy is complete. + * The Copy Incremental operation copies a snapshot of the source page blob to a destination page blob. + * The snapshot is copied such that only the differential changes between the previously copied + * snapshot are transferred to the destination. The copied snapshots are complete copies of the + * original snapshot and can be read or copied from as usual. This API is supported since REST version + * 2016-05-31. * @param copySource Specifies the name of the source page blob snapshot. This value is a URL of up to * 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it would * appear in a request URI. The source blob must either be public or must be authenticated via a shared * access signature. * @param options The options parameters. */ - copyFromURL(copySource, options) { + copyIncremental(copySource, options) { const operationArguments = { copySource, options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) }; - return this.client.sendOperationRequest(operationArguments, copyFromURLOperationSpec); - } - /** - * The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a destination - * blob with zero length and full metadata. - * @param copyId The copy identifier provided in the x-ms-copy-id header of the original Copy Blob - * operation. - * @param options The options parameters. - */ - abortCopyFromURL(copyId, options) { - const operationArguments = { - copyId, - options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) - }; - return this.client.sendOperationRequest(operationArguments, abortCopyFromURLOperationSpec); - } - /** - * The Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a premium - * storage account and on a block blob in a blob storage account (locally redundant storage only). A - * premium page blob's tier determines the allowed size, IOPS, and bandwidth of the blob. A block - * blob's tier determines Hot/Cool/Archive storage type. This operation does not update the blob's - * ETag. - * @param tier Indicates the tier to be set on the blob. - * @param options The options parameters. - */ - setTier(tier, options) { - const operationArguments = { - tier, - options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) - }; - return this.client.sendOperationRequest(operationArguments, setTierOperationSpec); - } - /** - * Returns the sku name and account kind - * @param options The options parameters. - */ - getAccountInfo(options) { - const operationArguments = { - options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) - }; - return this.client.sendOperationRequest(operationArguments, getAccountInfoOperationSpec); - } - /** - * The Query operation enables users to select/project on blob data by providing simple query - * expressions. - * @param options The options parameters. - */ - query(options) { - const operationArguments = { - options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) - }; - return this.client.sendOperationRequest(operationArguments, queryOperationSpec); - } - /** - * The Get Tags operation enables users to get the tags associated with a blob. - * @param options The options parameters. - */ - getTags(options) { - const operationArguments = { - options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) - }; - return this.client.sendOperationRequest(operationArguments, getTagsOperationSpec); - } - /** - * The Set Tags operation enables users to set tags on a blob. - * @param options The options parameters. - */ - setTags(options) { - const operationArguments = { - options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) - }; - return this.client.sendOperationRequest(operationArguments, setTagsOperationSpec); + return this.client.sendOperationRequest(operationArguments, copyIncrementalOperationSpec); } } // Operation Specifications -const xmlSerializer$3 = new coreHttp__namespace.Serializer(Mappers, /* isXml */ true); -const downloadOperationSpec = { +const xmlSerializer$2 = new coreHttp__namespace.Serializer(Mappers, /* isXml */ true); +const serializer$2 = new coreHttp__namespace.Serializer(Mappers, /* isXml */ false); +const createOperationSpec$1 = { path: "/{containerName}/{blob}", - httpMethod: "GET", + httpMethod: "PUT", responses: { - 200: { - bodyMapper: { - type: { name: "Stream" }, - serializedName: "parsedResponse" - }, - headersMapper: BlobDownloadHeaders - }, - 206: { - bodyMapper: { - type: { name: "Stream" }, - serializedName: "parsedResponse" - }, - headersMapper: BlobDownloadHeaders + 201: { + headersMapper: PageBlobCreateHeaders }, default: { bodyMapper: StorageError, - headersMapper: BlobDownloadExceptionHeaders + headersMapper: PageBlobCreateExceptionHeaders } }, - queryParameters: [ - timeoutInSeconds, - snapshot, - versionId - ], + queryParameters: [timeoutInSeconds], urlParameters: [url], headerParameters: [ version, requestId, accept1, + contentLength, + metadata, leaseId, ifModifiedSince, ifUnmodifiedSince, - range, - rangeGetContentMD5, - rangeGetContentCRC64, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifMatch, ifNoneMatch, - ifTags + ifTags, + blobCacheControl, + blobContentType, + blobContentMD5, + blobContentEncoding, + blobContentLanguage, + blobContentDisposition, + immutabilityPolicyExpiry, + immutabilityPolicyMode, + encryptionScope, + tier, + blobTagsString, + legalHold1, + blobType, + blobContentLength, + blobSequenceNumber ], isXML: true, - serializer: xmlSerializer$3 + serializer: xmlSerializer$2 }; -const getPropertiesOperationSpec = { +const uploadPagesOperationSpec = { path: "/{containerName}/{blob}", - httpMethod: "HEAD", + httpMethod: "PUT", responses: { - 200: { - headersMapper: BlobGetPropertiesHeaders + 201: { + headersMapper: PageBlobUploadPagesHeaders }, default: { bodyMapper: StorageError, - headersMapper: BlobGetPropertiesExceptionHeaders + headersMapper: PageBlobUploadPagesExceptionHeaders } }, - queryParameters: [ - timeoutInSeconds, - snapshot, - versionId + requestBody: body1, + queryParameters: [timeoutInSeconds, comp19], + urlParameters: [url], + headerParameters: [ + version, + requestId, + contentLength, + leaseId, + ifModifiedSince, + ifUnmodifiedSince, + range, + encryptionKey, + encryptionKeySha256, + encryptionAlgorithm, + ifMatch, + ifNoneMatch, + ifTags, + encryptionScope, + transactionalContentMD5, + transactionalContentCrc64, + contentType1, + accept2, + pageWrite, + ifSequenceNumberLessThanOrEqualTo, + ifSequenceNumberLessThan, + ifSequenceNumberEqualTo ], + mediaType: "binary", + serializer: serializer$2 +}; +const clearPagesOperationSpec = { + path: "/{containerName}/{blob}", + httpMethod: "PUT", + responses: { + 201: { + headersMapper: PageBlobClearPagesHeaders + }, + default: { + bodyMapper: StorageError, + headersMapper: PageBlobClearPagesExceptionHeaders + } + }, + queryParameters: [timeoutInSeconds, comp19], urlParameters: [url], headerParameters: [ version, requestId, accept1, + contentLength, leaseId, ifModifiedSince, ifUnmodifiedSince, + range, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifMatch, ifNoneMatch, - ifTags + ifTags, + encryptionScope, + ifSequenceNumberLessThanOrEqualTo, + ifSequenceNumberLessThan, + ifSequenceNumberEqualTo, + pageWrite1 ], isXML: true, - serializer: xmlSerializer$3 + serializer: xmlSerializer$2 }; -const deleteOperationSpec = { +const uploadPagesFromURLOperationSpec = { path: "/{containerName}/{blob}", - httpMethod: "DELETE", + httpMethod: "PUT", responses: { - 202: { - headersMapper: BlobDeleteHeaders + 201: { + headersMapper: PageBlobUploadPagesFromURLHeaders }, default: { bodyMapper: StorageError, - headersMapper: BlobDeleteExceptionHeaders + headersMapper: PageBlobUploadPagesFromURLExceptionHeaders } }, - queryParameters: [ - timeoutInSeconds, - snapshot, - versionId, - blobDeleteType - ], + queryParameters: [timeoutInSeconds, comp19], urlParameters: [url], headerParameters: [ version, requestId, accept1, + contentLength, leaseId, ifModifiedSince, ifUnmodifiedSince, + encryptionKey, + encryptionKeySha256, + encryptionAlgorithm, ifMatch, ifNoneMatch, ifTags, - deleteSnapshots + encryptionScope, + sourceIfModifiedSince, + sourceIfUnmodifiedSince, + sourceIfMatch, + sourceIfNoneMatch, + sourceContentMD5, + copySourceAuthorization, + pageWrite, + ifSequenceNumberLessThanOrEqualTo, + ifSequenceNumberLessThan, + ifSequenceNumberEqualTo, + sourceUrl, + sourceRange, + sourceContentCrc64, + range1 ], isXML: true, - serializer: xmlSerializer$3 + serializer: xmlSerializer$2 }; -const undeleteOperationSpec = { +const getPageRangesOperationSpec = { path: "/{containerName}/{blob}", - httpMethod: "PUT", + httpMethod: "GET", responses: { 200: { - headersMapper: BlobUndeleteHeaders + bodyMapper: PageList, + headersMapper: PageBlobGetPageRangesHeaders }, default: { bodyMapper: StorageError, - headersMapper: BlobUndeleteExceptionHeaders + headersMapper: PageBlobGetPageRangesExceptionHeaders } }, - queryParameters: [timeoutInSeconds, comp8], + queryParameters: [ + timeoutInSeconds, + marker, + maxPageSize, + snapshot, + comp20 + ], urlParameters: [url], headerParameters: [ version, requestId, - accept1 + accept1, + leaseId, + ifModifiedSince, + ifUnmodifiedSince, + range, + ifMatch, + ifNoneMatch, + ifTags ], isXML: true, - serializer: xmlSerializer$3 + serializer: xmlSerializer$2 }; -const setExpiryOperationSpec = { +const getPageRangesDiffOperationSpec = { path: "/{containerName}/{blob}", - httpMethod: "PUT", + httpMethod: "GET", responses: { 200: { - headersMapper: BlobSetExpiryHeaders + bodyMapper: PageList, + headersMapper: PageBlobGetPageRangesDiffHeaders }, default: { bodyMapper: StorageError, - headersMapper: BlobSetExpiryExceptionHeaders + headersMapper: PageBlobGetPageRangesDiffExceptionHeaders } }, - queryParameters: [timeoutInSeconds, comp11], + queryParameters: [ + timeoutInSeconds, + marker, + maxPageSize, + snapshot, + comp20, + prevsnapshot + ], urlParameters: [url], headerParameters: [ version, requestId, accept1, - expiryOptions, - expiresOn + leaseId, + ifModifiedSince, + ifUnmodifiedSince, + range, + ifMatch, + ifNoneMatch, + ifTags, + prevSnapshotUrl ], isXML: true, - serializer: xmlSerializer$3 + serializer: xmlSerializer$2 }; -const setHttpHeadersOperationSpec = { +const resizeOperationSpec = { path: "/{containerName}/{blob}", httpMethod: "PUT", responses: { 200: { - headersMapper: BlobSetHttpHeadersHeaders + headersMapper: PageBlobResizeHeaders }, default: { bodyMapper: StorageError, - headersMapper: BlobSetHttpHeadersExceptionHeaders + headersMapper: PageBlobResizeExceptionHeaders } }, queryParameters: [comp, timeoutInSeconds], @@ -33798,107 +32315,172 @@ const setHttpHeadersOperationSpec = { leaseId, ifModifiedSince, ifUnmodifiedSince, + encryptionKey, + encryptionKeySha256, + encryptionAlgorithm, ifMatch, ifNoneMatch, ifTags, - blobCacheControl, - blobContentType, - blobContentMD5, - blobContentEncoding, - blobContentLanguage, - blobContentDisposition + encryptionScope, + blobContentLength ], isXML: true, - serializer: xmlSerializer$3 + serializer: xmlSerializer$2 }; -const setImmutabilityPolicyOperationSpec = { +const updateSequenceNumberOperationSpec = { path: "/{containerName}/{blob}", httpMethod: "PUT", responses: { 200: { - headersMapper: BlobSetImmutabilityPolicyHeaders + headersMapper: PageBlobUpdateSequenceNumberHeaders }, default: { bodyMapper: StorageError, - headersMapper: BlobSetImmutabilityPolicyExceptionHeaders + headersMapper: PageBlobUpdateSequenceNumberExceptionHeaders } }, - queryParameters: [timeoutInSeconds, comp12], + queryParameters: [comp, timeoutInSeconds], urlParameters: [url], headerParameters: [ version, requestId, accept1, + leaseId, + ifModifiedSince, ifUnmodifiedSince, - immutabilityPolicyExpiry, - immutabilityPolicyMode - ], - isXML: true, - serializer: xmlSerializer$3 -}; -const deleteImmutabilityPolicyOperationSpec = { - path: "/{containerName}/{blob}", - httpMethod: "DELETE", - responses: { - 200: { - headersMapper: BlobDeleteImmutabilityPolicyHeaders - }, - default: { - bodyMapper: StorageError, - headersMapper: BlobDeleteImmutabilityPolicyExceptionHeaders - } - }, - queryParameters: [timeoutInSeconds, comp12], - urlParameters: [url], - headerParameters: [ - version, - requestId, - accept1 + ifMatch, + ifNoneMatch, + ifTags, + blobSequenceNumber, + sequenceNumberAction ], isXML: true, - serializer: xmlSerializer$3 + serializer: xmlSerializer$2 }; -const setLegalHoldOperationSpec = { +const copyIncrementalOperationSpec = { path: "/{containerName}/{blob}", httpMethod: "PUT", responses: { - 200: { - headersMapper: BlobSetLegalHoldHeaders + 202: { + headersMapper: PageBlobCopyIncrementalHeaders }, default: { bodyMapper: StorageError, - headersMapper: BlobSetLegalHoldExceptionHeaders + headersMapper: PageBlobCopyIncrementalExceptionHeaders } }, - queryParameters: [timeoutInSeconds, comp13], + queryParameters: [timeoutInSeconds, comp21], urlParameters: [url], headerParameters: [ version, requestId, accept1, - legalHold + ifModifiedSince, + ifUnmodifiedSince, + ifMatch, + ifNoneMatch, + ifTags, + copySource ], isXML: true, - serializer: xmlSerializer$3 + serializer: xmlSerializer$2 }; -const setMetadataOperationSpec = { + +/* + * Copyright (c) Microsoft Corporation. + * Licensed under the MIT License. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is regenerated. + */ +/** Class representing a AppendBlob. */ +class AppendBlob { + /** + * Initialize a new instance of the class AppendBlob class. + * @param client Reference to the service client + */ + constructor(client) { + this.client = client; + } + /** + * The Create Append Blob operation creates a new append blob. + * @param contentLength The length of the request. + * @param options The options parameters. + */ + create(contentLength, options) { + const operationArguments = { + contentLength, + options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) + }; + return this.client.sendOperationRequest(operationArguments, createOperationSpec); + } + /** + * The Append Block operation commits a new block of data to the end of an existing append blob. The + * Append Block operation is permitted only if the blob was created with x-ms-blob-type set to + * AppendBlob. Append Block is supported only on version 2015-02-21 version or later. + * @param contentLength The length of the request. + * @param body Initial data + * @param options The options parameters. + */ + appendBlock(contentLength, body, options) { + const operationArguments = { + contentLength, + body, + options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) + }; + return this.client.sendOperationRequest(operationArguments, appendBlockOperationSpec); + } + /** + * The Append Block operation commits a new block of data to the end of an existing append blob where + * the contents are read from a source url. The Append Block operation is permitted only if the blob + * was created with x-ms-blob-type set to AppendBlob. Append Block is supported only on version + * 2015-02-21 version or later. + * @param sourceUrl Specify a URL to the copy source. + * @param contentLength The length of the request. + * @param options The options parameters. + */ + appendBlockFromUrl(sourceUrl, contentLength, options) { + const operationArguments = { + sourceUrl, + contentLength, + options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) + }; + return this.client.sendOperationRequest(operationArguments, appendBlockFromUrlOperationSpec); + } + /** + * The Seal operation seals the Append Blob to make it read-only. Seal is supported only on version + * 2019-12-12 version or later. + * @param options The options parameters. + */ + seal(options) { + const operationArguments = { + options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) + }; + return this.client.sendOperationRequest(operationArguments, sealOperationSpec); + } +} +// Operation Specifications +const xmlSerializer$1 = new coreHttp__namespace.Serializer(Mappers, /* isXml */ true); +const serializer$1 = new coreHttp__namespace.Serializer(Mappers, /* isXml */ false); +const createOperationSpec = { path: "/{containerName}/{blob}", httpMethod: "PUT", responses: { - 200: { - headersMapper: BlobSetMetadataHeaders + 201: { + headersMapper: AppendBlobCreateHeaders }, default: { bodyMapper: StorageError, - headersMapper: BlobSetMetadataExceptionHeaders + headersMapper: AppendBlobCreateExceptionHeaders } }, - queryParameters: [timeoutInSeconds, comp6], + queryParameters: [timeoutInSeconds], urlParameters: [url], headerParameters: [ version, requestId, accept1, + contentLength, metadata, leaseId, ifModifiedSince, @@ -33909,293 +32491,510 @@ const setMetadataOperationSpec = { ifMatch, ifNoneMatch, ifTags, - encryptionScope + blobCacheControl, + blobContentType, + blobContentMD5, + blobContentEncoding, + blobContentLanguage, + blobContentDisposition, + immutabilityPolicyExpiry, + immutabilityPolicyMode, + encryptionScope, + blobTagsString, + legalHold1, + blobType1 ], isXML: true, - serializer: xmlSerializer$3 + serializer: xmlSerializer$1 }; -const acquireLeaseOperationSpec = { +const appendBlockOperationSpec = { path: "/{containerName}/{blob}", httpMethod: "PUT", responses: { 201: { - headersMapper: BlobAcquireLeaseHeaders + headersMapper: AppendBlobAppendBlockHeaders }, default: { bodyMapper: StorageError, - headersMapper: BlobAcquireLeaseExceptionHeaders + headersMapper: AppendBlobAppendBlockExceptionHeaders } }, - queryParameters: [timeoutInSeconds, comp10], + requestBody: body1, + queryParameters: [timeoutInSeconds, comp22], urlParameters: [url], headerParameters: [ version, requestId, - accept1, + contentLength, + leaseId, ifModifiedSince, ifUnmodifiedSince, - action, - duration, - proposedLeaseId, + encryptionKey, + encryptionKeySha256, + encryptionAlgorithm, ifMatch, ifNoneMatch, - ifTags + ifTags, + encryptionScope, + transactionalContentMD5, + transactionalContentCrc64, + contentType1, + accept2, + maxSize, + appendPosition ], - isXML: true, - serializer: xmlSerializer$3 + mediaType: "binary", + serializer: serializer$1 }; -const releaseLeaseOperationSpec = { +const appendBlockFromUrlOperationSpec = { path: "/{containerName}/{blob}", httpMethod: "PUT", responses: { - 200: { - headersMapper: BlobReleaseLeaseHeaders + 201: { + headersMapper: AppendBlobAppendBlockFromUrlHeaders }, default: { bodyMapper: StorageError, - headersMapper: BlobReleaseLeaseExceptionHeaders + headersMapper: AppendBlobAppendBlockFromUrlExceptionHeaders } }, - queryParameters: [timeoutInSeconds, comp10], + queryParameters: [timeoutInSeconds, comp22], urlParameters: [url], headerParameters: [ version, requestId, accept1, + contentLength, + leaseId, ifModifiedSince, ifUnmodifiedSince, - action1, - leaseId1, + encryptionKey, + encryptionKeySha256, + encryptionAlgorithm, ifMatch, ifNoneMatch, - ifTags + ifTags, + encryptionScope, + sourceIfModifiedSince, + sourceIfUnmodifiedSince, + sourceIfMatch, + sourceIfNoneMatch, + sourceContentMD5, + copySourceAuthorization, + transactionalContentMD5, + sourceUrl, + sourceContentCrc64, + maxSize, + appendPosition, + sourceRange1 ], isXML: true, - serializer: xmlSerializer$3 + serializer: xmlSerializer$1 }; -const renewLeaseOperationSpec = { +const sealOperationSpec = { path: "/{containerName}/{blob}", httpMethod: "PUT", responses: { 200: { - headersMapper: BlobRenewLeaseHeaders + headersMapper: AppendBlobSealHeaders }, default: { bodyMapper: StorageError, - headersMapper: BlobRenewLeaseExceptionHeaders + headersMapper: AppendBlobSealExceptionHeaders } }, - queryParameters: [timeoutInSeconds, comp10], + queryParameters: [timeoutInSeconds, comp23], urlParameters: [url], headerParameters: [ version, requestId, accept1, + leaseId, ifModifiedSince, ifUnmodifiedSince, - leaseId1, - action2, ifMatch, ifNoneMatch, - ifTags + appendPosition ], isXML: true, - serializer: xmlSerializer$3 + serializer: xmlSerializer$1 }; -const changeLeaseOperationSpec = { + +/* + * Copyright (c) Microsoft Corporation. + * Licensed under the MIT License. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is regenerated. + */ +/** Class representing a BlockBlob. */ +class BlockBlob { + /** + * Initialize a new instance of the class BlockBlob class. + * @param client Reference to the service client + */ + constructor(client) { + this.client = client; + } + /** + * The Upload Block Blob operation updates the content of an existing block blob. Updating an existing + * block blob overwrites any existing metadata on the blob. Partial updates are not supported with Put + * Blob; the content of the existing blob is overwritten with the content of the new blob. To perform a + * partial update of the content of a block blob, use the Put Block List operation. + * @param contentLength The length of the request. + * @param body Initial data + * @param options The options parameters. + */ + upload(contentLength, body, options) { + const operationArguments = { + contentLength, + body, + options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) + }; + return this.client.sendOperationRequest(operationArguments, uploadOperationSpec); + } + /** + * The Put Blob from URL operation creates a new Block Blob where the contents of the blob are read + * from a given URL. This API is supported beginning with the 2020-04-08 version. Partial updates are + * not supported with Put Blob from URL; the content of an existing blob is overwritten with the + * content of the new blob. To perform partial updates to a block blob’s contents using a source URL, + * use the Put Block from URL API in conjunction with Put Block List. + * @param contentLength The length of the request. + * @param copySource Specifies the name of the source page blob snapshot. This value is a URL of up to + * 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it would + * appear in a request URI. The source blob must either be public or must be authenticated via a shared + * access signature. + * @param options The options parameters. + */ + putBlobFromUrl(contentLength, copySource, options) { + const operationArguments = { + contentLength, + copySource, + options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) + }; + return this.client.sendOperationRequest(operationArguments, putBlobFromUrlOperationSpec); + } + /** + * The Stage Block operation creates a new block to be committed as part of a blob + * @param blockId A valid Base64 string value that identifies the block. Prior to encoding, the string + * must be less than or equal to 64 bytes in size. For a given blob, the length of the value specified + * for the blockid parameter must be the same size for each block. + * @param contentLength The length of the request. + * @param body Initial data + * @param options The options parameters. + */ + stageBlock(blockId, contentLength, body, options) { + const operationArguments = { + blockId, + contentLength, + body, + options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) + }; + return this.client.sendOperationRequest(operationArguments, stageBlockOperationSpec); + } + /** + * The Stage Block operation creates a new block to be committed as part of a blob where the contents + * are read from a URL. + * @param blockId A valid Base64 string value that identifies the block. Prior to encoding, the string + * must be less than or equal to 64 bytes in size. For a given blob, the length of the value specified + * for the blockid parameter must be the same size for each block. + * @param contentLength The length of the request. + * @param sourceUrl Specify a URL to the copy source. + * @param options The options parameters. + */ + stageBlockFromURL(blockId, contentLength, sourceUrl, options) { + const operationArguments = { + blockId, + contentLength, + sourceUrl, + options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) + }; + return this.client.sendOperationRequest(operationArguments, stageBlockFromURLOperationSpec); + } + /** + * The Commit Block List operation writes a blob by specifying the list of block IDs that make up the + * blob. In order to be written as part of a blob, a block must have been successfully written to the + * server in a prior Put Block operation. You can call Put Block List to update a blob by uploading + * only those blocks that have changed, then committing the new and existing blocks together. You can + * do this by specifying whether to commit a block from the committed block list or from the + * uncommitted block list, or to commit the most recently uploaded version of the block, whichever list + * it may belong to. + * @param blocks Blob Blocks. + * @param options The options parameters. + */ + commitBlockList(blocks, options) { + const operationArguments = { + blocks, + options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) + }; + return this.client.sendOperationRequest(operationArguments, commitBlockListOperationSpec); + } + /** + * The Get Block List operation retrieves the list of blocks that have been uploaded as part of a block + * blob + * @param listType Specifies whether to return the list of committed blocks, the list of uncommitted + * blocks, or both lists together. + * @param options The options parameters. + */ + getBlockList(listType, options) { + const operationArguments = { + listType, + options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) + }; + return this.client.sendOperationRequest(operationArguments, getBlockListOperationSpec); + } +} +// Operation Specifications +const xmlSerializer = new coreHttp__namespace.Serializer(Mappers, /* isXml */ true); +const serializer = new coreHttp__namespace.Serializer(Mappers, /* isXml */ false); +const uploadOperationSpec = { path: "/{containerName}/{blob}", httpMethod: "PUT", responses: { - 200: { - headersMapper: BlobChangeLeaseHeaders + 201: { + headersMapper: BlockBlobUploadHeaders }, default: { bodyMapper: StorageError, - headersMapper: BlobChangeLeaseExceptionHeaders + headersMapper: BlockBlobUploadExceptionHeaders } }, - queryParameters: [timeoutInSeconds, comp10], + requestBody: body1, + queryParameters: [timeoutInSeconds], urlParameters: [url], headerParameters: [ version, requestId, - accept1, + contentLength, + metadata, + leaseId, ifModifiedSince, ifUnmodifiedSince, - leaseId1, - action4, - proposedLeaseId1, + encryptionKey, + encryptionKeySha256, + encryptionAlgorithm, ifMatch, ifNoneMatch, - ifTags + ifTags, + blobCacheControl, + blobContentType, + blobContentMD5, + blobContentEncoding, + blobContentLanguage, + blobContentDisposition, + immutabilityPolicyExpiry, + immutabilityPolicyMode, + encryptionScope, + tier, + blobTagsString, + legalHold1, + transactionalContentMD5, + contentType1, + accept2, + blobType2 ], - isXML: true, - serializer: xmlSerializer$3 + mediaType: "binary", + serializer }; -const breakLeaseOperationSpec = { +const putBlobFromUrlOperationSpec = { path: "/{containerName}/{blob}", httpMethod: "PUT", responses: { - 202: { - headersMapper: BlobBreakLeaseHeaders + 201: { + headersMapper: BlockBlobPutBlobFromUrlHeaders }, default: { bodyMapper: StorageError, - headersMapper: BlobBreakLeaseExceptionHeaders + headersMapper: BlockBlobPutBlobFromUrlExceptionHeaders } }, - queryParameters: [timeoutInSeconds, comp10], + queryParameters: [timeoutInSeconds], urlParameters: [url], headerParameters: [ version, requestId, accept1, + contentLength, + metadata, + leaseId, ifModifiedSince, ifUnmodifiedSince, - action3, - breakPeriod, + encryptionKey, + encryptionKeySha256, + encryptionAlgorithm, ifMatch, ifNoneMatch, - ifTags + ifTags, + blobCacheControl, + blobContentType, + blobContentMD5, + blobContentEncoding, + blobContentLanguage, + blobContentDisposition, + encryptionScope, + tier, + sourceIfModifiedSince, + sourceIfUnmodifiedSince, + sourceIfMatch, + sourceIfNoneMatch, + sourceIfTags, + copySource, + blobTagsString, + sourceContentMD5, + copySourceAuthorization, + copySourceTags, + transactionalContentMD5, + blobType2, + copySourceBlobProperties ], isXML: true, - serializer: xmlSerializer$3 + serializer: xmlSerializer }; -const createSnapshotOperationSpec = { +const stageBlockOperationSpec = { path: "/{containerName}/{blob}", httpMethod: "PUT", responses: { 201: { - headersMapper: BlobCreateSnapshotHeaders + headersMapper: BlockBlobStageBlockHeaders }, default: { bodyMapper: StorageError, - headersMapper: BlobCreateSnapshotExceptionHeaders + headersMapper: BlockBlobStageBlockExceptionHeaders } }, - queryParameters: [timeoutInSeconds, comp14], + requestBody: body1, + queryParameters: [ + timeoutInSeconds, + comp24, + blockId + ], urlParameters: [url], headerParameters: [ version, requestId, - accept1, - metadata, + contentLength, leaseId, - ifModifiedSince, - ifUnmodifiedSince, encryptionKey, encryptionKeySha256, encryptionAlgorithm, - ifMatch, - ifNoneMatch, - ifTags, - encryptionScope + encryptionScope, + transactionalContentMD5, + transactionalContentCrc64, + contentType1, + accept2 ], - isXML: true, - serializer: xmlSerializer$3 + mediaType: "binary", + serializer }; -const startCopyFromURLOperationSpec = { +const stageBlockFromURLOperationSpec = { path: "/{containerName}/{blob}", httpMethod: "PUT", responses: { - 202: { - headersMapper: BlobStartCopyFromURLHeaders + 201: { + headersMapper: BlockBlobStageBlockFromURLHeaders }, default: { bodyMapper: StorageError, - headersMapper: BlobStartCopyFromURLExceptionHeaders + headersMapper: BlockBlobStageBlockFromURLExceptionHeaders } }, - queryParameters: [timeoutInSeconds], + queryParameters: [ + timeoutInSeconds, + comp24, + blockId + ], urlParameters: [url], headerParameters: [ version, requestId, accept1, - metadata, + contentLength, leaseId, - ifModifiedSince, - ifUnmodifiedSince, - ifMatch, - ifNoneMatch, - ifTags, - immutabilityPolicyExpiry, - immutabilityPolicyMode, - tier, - rehydratePriority, + encryptionKey, + encryptionKeySha256, + encryptionAlgorithm, + encryptionScope, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, - sourceIfTags, - copySource, - blobTagsString, - sealBlob, - legalHold1 + sourceContentMD5, + copySourceAuthorization, + sourceUrl, + sourceContentCrc64, + sourceRange1 ], isXML: true, - serializer: xmlSerializer$3 + serializer: xmlSerializer }; -const copyFromURLOperationSpec = { +const commitBlockListOperationSpec = { path: "/{containerName}/{blob}", httpMethod: "PUT", responses: { - 202: { - headersMapper: BlobCopyFromURLHeaders + 201: { + headersMapper: BlockBlobCommitBlockListHeaders }, default: { bodyMapper: StorageError, - headersMapper: BlobCopyFromURLExceptionHeaders + headersMapper: BlockBlobCommitBlockListExceptionHeaders } }, - queryParameters: [timeoutInSeconds], + requestBody: blocks, + queryParameters: [timeoutInSeconds, comp25], urlParameters: [url], headerParameters: [ + contentType, + accept, version, requestId, - accept1, metadata, leaseId, ifModifiedSince, ifUnmodifiedSince, + encryptionKey, + encryptionKeySha256, + encryptionAlgorithm, ifMatch, ifNoneMatch, ifTags, + blobCacheControl, + blobContentType, + blobContentMD5, + blobContentEncoding, + blobContentLanguage, + blobContentDisposition, immutabilityPolicyExpiry, immutabilityPolicyMode, encryptionScope, tier, - sourceIfModifiedSince, - sourceIfUnmodifiedSince, - sourceIfMatch, - sourceIfNoneMatch, - copySource, blobTagsString, legalHold1, - xMsRequiresSync, - sourceContentMD5, - copySourceAuthorization, - copySourceTags + transactionalContentMD5, + transactionalContentCrc64 ], isXML: true, - serializer: xmlSerializer$3 + contentType: "application/xml; charset=utf-8", + mediaType: "xml", + serializer: xmlSerializer }; -const abortCopyFromURLOperationSpec = { +const getBlockListOperationSpec = { path: "/{containerName}/{blob}", - httpMethod: "PUT", + httpMethod: "GET", responses: { - 204: { - headersMapper: BlobAbortCopyFromURLHeaders + 200: { + bodyMapper: BlockList, + headersMapper: BlockBlobGetBlockListHeaders }, default: { bodyMapper: StorageError, - headersMapper: BlobAbortCopyFromURLExceptionHeaders + headersMapper: BlockBlobGetBlockListExceptionHeaders } }, queryParameters: [ timeoutInSeconds, - comp15, - copyId + snapshot, + comp25, + listType ], urlParameters: [url], headerParameters: [ @@ -34203,4983 +33002,4785 @@ const abortCopyFromURLOperationSpec = { requestId, accept1, leaseId, - copyActionAbortConstant + ifTags ], isXML: true, - serializer: xmlSerializer$3 + serializer: xmlSerializer }; -const setTierOperationSpec = { - path: "/{containerName}/{blob}", - httpMethod: "PUT", - responses: { - 200: { - headersMapper: BlobSetTierHeaders - }, - 202: { - headersMapper: BlobSetTierHeaders - }, - default: { - bodyMapper: StorageError, - headersMapper: BlobSetTierExceptionHeaders + +// Copyright (c) Microsoft Corporation. +/** + * The `@azure/logger` configuration for this package. + */ +const logger = logger$1.createClientLogger("storage-blob"); + +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +const SDK_VERSION = "12.12.0"; +const SERVICE_VERSION = "2021-10-04"; +const BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES = 256 * 1024 * 1024; // 256MB +const BLOCK_BLOB_MAX_STAGE_BLOCK_BYTES = 4000 * 1024 * 1024; // 4000MB +const BLOCK_BLOB_MAX_BLOCKS = 50000; +const DEFAULT_BLOCK_BUFFER_SIZE_BYTES = 8 * 1024 * 1024; // 8MB +const DEFAULT_BLOB_DOWNLOAD_BLOCK_BYTES = 4 * 1024 * 1024; // 4MB +const DEFAULT_MAX_DOWNLOAD_RETRY_REQUESTS = 5; +const REQUEST_TIMEOUT = 100 * 1000; // In ms +/** + * The OAuth scope to use with Azure Storage. + */ +const StorageOAuthScopes = "https://storage.azure.com/.default"; +const URLConstants = { + Parameters: { + FORCE_BROWSER_NO_CACHE: "_", + SIGNATURE: "sig", + SNAPSHOT: "snapshot", + VERSIONID: "versionid", + TIMEOUT: "timeout", + }, +}; +const HTTPURLConnection = { + HTTP_ACCEPTED: 202, + HTTP_CONFLICT: 409, + HTTP_NOT_FOUND: 404, + HTTP_PRECON_FAILED: 412, + HTTP_RANGE_NOT_SATISFIABLE: 416, +}; +const HeaderConstants = { + AUTHORIZATION: "Authorization", + AUTHORIZATION_SCHEME: "Bearer", + CONTENT_ENCODING: "Content-Encoding", + CONTENT_ID: "Content-ID", + CONTENT_LANGUAGE: "Content-Language", + CONTENT_LENGTH: "Content-Length", + CONTENT_MD5: "Content-Md5", + CONTENT_TRANSFER_ENCODING: "Content-Transfer-Encoding", + CONTENT_TYPE: "Content-Type", + COOKIE: "Cookie", + DATE: "date", + IF_MATCH: "if-match", + IF_MODIFIED_SINCE: "if-modified-since", + IF_NONE_MATCH: "if-none-match", + IF_UNMODIFIED_SINCE: "if-unmodified-since", + PREFIX_FOR_STORAGE: "x-ms-", + RANGE: "Range", + USER_AGENT: "User-Agent", + X_MS_CLIENT_REQUEST_ID: "x-ms-client-request-id", + X_MS_COPY_SOURCE: "x-ms-copy-source", + X_MS_DATE: "x-ms-date", + X_MS_ERROR_CODE: "x-ms-error-code", + X_MS_VERSION: "x-ms-version", +}; +const ETagNone = ""; +const ETagAny = "*"; +const SIZE_1_MB = 1 * 1024 * 1024; +const BATCH_MAX_REQUEST = 256; +const BATCH_MAX_PAYLOAD_IN_BYTES = 4 * SIZE_1_MB; +const HTTP_LINE_ENDING = "\r\n"; +const HTTP_VERSION_1_1 = "HTTP/1.1"; +const EncryptionAlgorithmAES25 = "AES256"; +const DevelopmentConnectionString = `DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://127.0.0.1:10000/devstoreaccount1;`; +const StorageBlobLoggingAllowedHeaderNames = [ + "Access-Control-Allow-Origin", + "Cache-Control", + "Content-Length", + "Content-Type", + "Date", + "Request-Id", + "traceparent", + "Transfer-Encoding", + "User-Agent", + "x-ms-client-request-id", + "x-ms-date", + "x-ms-error-code", + "x-ms-request-id", + "x-ms-return-client-request-id", + "x-ms-version", + "Accept-Ranges", + "Content-Disposition", + "Content-Encoding", + "Content-Language", + "Content-MD5", + "Content-Range", + "ETag", + "Last-Modified", + "Server", + "Vary", + "x-ms-content-crc64", + "x-ms-copy-action", + "x-ms-copy-completion-time", + "x-ms-copy-id", + "x-ms-copy-progress", + "x-ms-copy-status", + "x-ms-has-immutability-policy", + "x-ms-has-legal-hold", + "x-ms-lease-state", + "x-ms-lease-status", + "x-ms-range", + "x-ms-request-server-encrypted", + "x-ms-server-encrypted", + "x-ms-snapshot", + "x-ms-source-range", + "If-Match", + "If-Modified-Since", + "If-None-Match", + "If-Unmodified-Since", + "x-ms-access-tier", + "x-ms-access-tier-change-time", + "x-ms-access-tier-inferred", + "x-ms-account-kind", + "x-ms-archive-status", + "x-ms-blob-append-offset", + "x-ms-blob-cache-control", + "x-ms-blob-committed-block-count", + "x-ms-blob-condition-appendpos", + "x-ms-blob-condition-maxsize", + "x-ms-blob-content-disposition", + "x-ms-blob-content-encoding", + "x-ms-blob-content-language", + "x-ms-blob-content-length", + "x-ms-blob-content-md5", + "x-ms-blob-content-type", + "x-ms-blob-public-access", + "x-ms-blob-sequence-number", + "x-ms-blob-type", + "x-ms-copy-destination-snapshot", + "x-ms-creation-time", + "x-ms-default-encryption-scope", + "x-ms-delete-snapshots", + "x-ms-delete-type-permanent", + "x-ms-deny-encryption-scope-override", + "x-ms-encryption-algorithm", + "x-ms-if-sequence-number-eq", + "x-ms-if-sequence-number-le", + "x-ms-if-sequence-number-lt", + "x-ms-incremental-copy", + "x-ms-lease-action", + "x-ms-lease-break-period", + "x-ms-lease-duration", + "x-ms-lease-id", + "x-ms-lease-time", + "x-ms-page-write", + "x-ms-proposed-lease-id", + "x-ms-range-get-content-md5", + "x-ms-rehydrate-priority", + "x-ms-sequence-number-action", + "x-ms-sku-name", + "x-ms-source-content-md5", + "x-ms-source-if-match", + "x-ms-source-if-modified-since", + "x-ms-source-if-none-match", + "x-ms-source-if-unmodified-since", + "x-ms-tag-count", + "x-ms-encryption-key-sha256", + "x-ms-if-tags", + "x-ms-source-if-tags", +]; +const StorageBlobLoggingAllowedQueryParameters = [ + "comp", + "maxresults", + "rscc", + "rscd", + "rsce", + "rscl", + "rsct", + "se", + "si", + "sip", + "sp", + "spr", + "sr", + "srt", + "ss", + "st", + "sv", + "include", + "marker", + "prefix", + "copyid", + "restype", + "blockid", + "blocklisttype", + "delimiter", + "prevsnapshot", + "ske", + "skoid", + "sks", + "skt", + "sktid", + "skv", + "snapshot", +]; +const BlobUsesCustomerSpecifiedEncryptionMsg = "BlobUsesCustomerSpecifiedEncryption"; +const BlobDoesNotUseCustomerSpecifiedEncryption = "BlobDoesNotUseCustomerSpecifiedEncryption"; +/// List of ports used for path style addressing. +/// Path style addressing means that storage account is put in URI's Path segment in instead of in host. +const PathStylePorts = [ + "10000", + "10001", + "10002", + "10003", + "10004", + "10100", + "10101", + "10102", + "10103", + "10104", + "11000", + "11001", + "11002", + "11003", + "11004", + "11100", + "11101", + "11102", + "11103", + "11104", +]; + +// Copyright (c) Microsoft Corporation. +/** + * Reserved URL characters must be properly escaped for Storage services like Blob or File. + * + * ## URL encode and escape strategy for JS SDKs + * + * When customers pass a URL string into XxxClient classes constructor, the URL string may already be URL encoded or not. + * But before sending to Azure Storage server, the URL must be encoded. However, it's hard for a SDK to guess whether the URL + * string has been encoded or not. We have 2 potential strategies, and chose strategy two for the XxxClient constructors. + * + * ### Strategy One: Assume the customer URL string is not encoded, and always encode URL string in SDK. + * + * This is what legacy V2 SDK does, simple and works for most of the cases. + * - When customer URL string is "http://account.blob.core.windows.net/con/b:", + * SDK will encode it to "http://account.blob.core.windows.net/con/b%3A" and send to server. A blob named "b:" will be created. + * - When customer URL string is "http://account.blob.core.windows.net/con/b%3A", + * SDK will encode it to "http://account.blob.core.windows.net/con/b%253A" and send to server. A blob named "b%3A" will be created. + * + * But this strategy will make it not possible to create a blob with "?" in it's name. Because when customer URL string is + * "http://account.blob.core.windows.net/con/blob?name", the "?name" will be treated as URL paramter instead of blob name. + * If customer URL string is "http://account.blob.core.windows.net/con/blob%3Fname", a blob named "blob%3Fname" will be created. + * V2 SDK doesn't have this issue because it doesn't allow customer pass in a full URL, it accepts a separate blob name and encodeURIComponent for it. + * We cannot accept a SDK cannot create a blob name with "?". So we implement strategy two: + * + * ### Strategy Two: SDK doesn't assume the URL has been encoded or not. It will just escape the special characters. + * + * This is what V10 Blob Go SDK does. It accepts a URL type in Go, and call url.EscapedPath() to escape the special chars unescaped. + * - When customer URL string is "http://account.blob.core.windows.net/con/b:", + * SDK will escape ":" like "http://account.blob.core.windows.net/con/b%3A" and send to server. A blob named "b:" will be created. + * - When customer URL string is "http://account.blob.core.windows.net/con/b%3A", + * There is no special characters, so send "http://account.blob.core.windows.net/con/b%3A" to server. A blob named "b:" will be created. + * - When customer URL string is "http://account.blob.core.windows.net/con/b%253A", + * There is no special characters, so send "http://account.blob.core.windows.net/con/b%253A" to server. A blob named "b%3A" will be created. + * + * This strategy gives us flexibility to create with any special characters. But "%" will be treated as a special characters, if the URL string + * is not encoded, there shouldn't a "%" in the URL string, otherwise the URL is not a valid URL. + * If customer needs to create a blob with "%" in it's blob name, use "%25" instead of "%". Just like above 3rd sample. + * And following URL strings are invalid: + * - "http://account.blob.core.windows.net/con/b%" + * - "http://account.blob.core.windows.net/con/b%2" + * - "http://account.blob.core.windows.net/con/b%G" + * + * Another special character is "?", use "%2F" to represent a blob name with "?" in a URL string. + * + * ### Strategy for containerName, blobName or other specific XXXName parameters in methods such as `containerClient.getBlobClient(blobName)` + * + * We will apply strategy one, and call encodeURIComponent for these parameters like blobName. Because what customers passes in is a plain name instead of a URL. + * + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-shares--directories--files--and-metadata + * + * @param url - + */ +function escapeURLPath(url) { + const urlParsed = coreHttp.URLBuilder.parse(url); + let path = urlParsed.getPath(); + path = path || "/"; + path = escape(path); + urlParsed.setPath(path); + return urlParsed.toString(); +} +function getProxyUriFromDevConnString(connectionString) { + // Development Connection String + // https://docs.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string#connect-to-the-emulator-account-using-the-well-known-account-name-and-key + let proxyUri = ""; + if (connectionString.search("DevelopmentStorageProxyUri=") !== -1) { + // CONNECTION_STRING=UseDevelopmentStorage=true;DevelopmentStorageProxyUri=http://myProxyUri + const matchCredentials = connectionString.split(";"); + for (const element of matchCredentials) { + if (element.trim().startsWith("DevelopmentStorageProxyUri=")) { + proxyUri = element.trim().match("DevelopmentStorageProxyUri=(.*)")[1]; + } } - }, - queryParameters: [ - timeoutInSeconds, - snapshot, - versionId, - comp16 - ], - urlParameters: [url], - headerParameters: [ - version, - requestId, - accept1, - leaseId, - ifTags, - rehydratePriority, - tier1 - ], - isXML: true, - serializer: xmlSerializer$3 -}; -const getAccountInfoOperationSpec = { - path: "/{containerName}/{blob}", - httpMethod: "GET", - responses: { - 200: { - headersMapper: BlobGetAccountInfoHeaders - }, - default: { - bodyMapper: StorageError, - headersMapper: BlobGetAccountInfoExceptionHeaders + } + return proxyUri; +} +function getValueInConnString(connectionString, argument) { + const elements = connectionString.split(";"); + for (const element of elements) { + if (element.trim().startsWith(argument)) { + return element.trim().match(argument + "=(.*)")[1]; } - }, - queryParameters: [comp, restype1], - urlParameters: [url], - headerParameters: [version, accept1], - isXML: true, - serializer: xmlSerializer$3 -}; -const queryOperationSpec = { - path: "/{containerName}/{blob}", - httpMethod: "POST", - responses: { - 200: { - bodyMapper: { - type: { name: "Stream" }, - serializedName: "parsedResponse" - }, - headersMapper: BlobQueryHeaders - }, - 206: { - bodyMapper: { - type: { name: "Stream" }, - serializedName: "parsedResponse" - }, - headersMapper: BlobQueryHeaders - }, - default: { - bodyMapper: StorageError, - headersMapper: BlobQueryExceptionHeaders + } + return ""; +} +/** + * Extracts the parts of an Azure Storage account connection string. + * + * @param connectionString - Connection string. + * @returns String key value pairs of the storage account's url and credentials. + */ +function extractConnectionStringParts(connectionString) { + let proxyUri = ""; + if (connectionString.startsWith("UseDevelopmentStorage=true")) { + // Development connection string + proxyUri = getProxyUriFromDevConnString(connectionString); + connectionString = DevelopmentConnectionString; + } + // Matching BlobEndpoint in the Account connection string + let blobEndpoint = getValueInConnString(connectionString, "BlobEndpoint"); + // Slicing off '/' at the end if exists + // (The methods that use `extractConnectionStringParts` expect the url to not have `/` at the end) + blobEndpoint = blobEndpoint.endsWith("/") ? blobEndpoint.slice(0, -1) : blobEndpoint; + if (connectionString.search("DefaultEndpointsProtocol=") !== -1 && + connectionString.search("AccountKey=") !== -1) { + // Account connection string + let defaultEndpointsProtocol = ""; + let accountName = ""; + let accountKey = Buffer.from("accountKey", "base64"); + let endpointSuffix = ""; + // Get account name and key + accountName = getValueInConnString(connectionString, "AccountName"); + accountKey = Buffer.from(getValueInConnString(connectionString, "AccountKey"), "base64"); + if (!blobEndpoint) { + // BlobEndpoint is not present in the Account connection string + // Can be obtained from `${defaultEndpointsProtocol}://${accountName}.blob.${endpointSuffix}` + defaultEndpointsProtocol = getValueInConnString(connectionString, "DefaultEndpointsProtocol"); + const protocol = defaultEndpointsProtocol.toLowerCase(); + if (protocol !== "https" && protocol !== "http") { + throw new Error("Invalid DefaultEndpointsProtocol in the provided Connection String. Expecting 'https' or 'http'"); + } + endpointSuffix = getValueInConnString(connectionString, "EndpointSuffix"); + if (!endpointSuffix) { + throw new Error("Invalid EndpointSuffix in the provided Connection String"); + } + blobEndpoint = `${defaultEndpointsProtocol}://${accountName}.blob.${endpointSuffix}`; } - }, - requestBody: queryRequest, - queryParameters: [ - timeoutInSeconds, - snapshot, - comp17 - ], - urlParameters: [url], - headerParameters: [ - contentType, - accept, - version, - requestId, - leaseId, - ifModifiedSince, - ifUnmodifiedSince, - encryptionKey, - encryptionKeySha256, - encryptionAlgorithm, - ifMatch, - ifNoneMatch, - ifTags - ], - isXML: true, - contentType: "application/xml; charset=utf-8", - mediaType: "xml", - serializer: xmlSerializer$3 -}; -const getTagsOperationSpec = { - path: "/{containerName}/{blob}", - httpMethod: "GET", - responses: { - 200: { - bodyMapper: BlobTags, - headersMapper: BlobGetTagsHeaders - }, - default: { - bodyMapper: StorageError, - headersMapper: BlobGetTagsExceptionHeaders + if (!accountName) { + throw new Error("Invalid AccountName in the provided Connection String"); } - }, - queryParameters: [ - timeoutInSeconds, - snapshot, - versionId, - comp18 - ], - urlParameters: [url], - headerParameters: [ - version, - requestId, - accept1, - leaseId, - ifTags - ], - isXML: true, - serializer: xmlSerializer$3 -}; -const setTagsOperationSpec = { - path: "/{containerName}/{blob}", - httpMethod: "PUT", - responses: { - 204: { - headersMapper: BlobSetTagsHeaders - }, - default: { - bodyMapper: StorageError, - headersMapper: BlobSetTagsExceptionHeaders + else if (accountKey.length === 0) { + throw new Error("Invalid AccountKey in the provided Connection String"); } - }, - requestBody: tags, - queryParameters: [ - timeoutInSeconds, - versionId, - comp18 - ], - urlParameters: [url], - headerParameters: [ - contentType, - accept, - version, - requestId, - leaseId, - ifTags, - transactionalContentMD5, - transactionalContentCrc64 - ], - isXML: true, - contentType: "application/xml; charset=utf-8", - mediaType: "xml", - serializer: xmlSerializer$3 -}; - -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. + return { + kind: "AccountConnString", + url: blobEndpoint, + accountName, + accountKey, + proxyUri, + }; + } + else { + // SAS connection string + const accountSas = getValueInConnString(connectionString, "SharedAccessSignature"); + const accountName = getAccountNameFromUrl(blobEndpoint); + if (!blobEndpoint) { + throw new Error("Invalid BlobEndpoint in the provided SAS Connection String"); + } + else if (!accountSas) { + throw new Error("Invalid SharedAccessSignature in the provided SAS Connection String"); + } + return { kind: "SASConnString", url: blobEndpoint, accountName, accountSas }; + } +} +/** + * Internal escape method implemented Strategy Two mentioned in escapeURL() description. * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. + * @param text - */ -/** Class representing a PageBlob. */ -class PageBlob { - /** - * Initialize a new instance of the class PageBlob class. - * @param client Reference to the service client - */ - constructor(client) { - this.client = client; +function escape(text) { + return encodeURIComponent(text) + .replace(/%2F/g, "/") // Don't escape for "/" + .replace(/'/g, "%27") // Escape for "'" + .replace(/\+/g, "%20") + .replace(/%25/g, "%"); // Revert encoded "%" +} +/** + * Append a string to URL path. Will remove duplicated "/" in front of the string + * when URL path ends with a "/". + * + * @param url - Source URL string + * @param name - String to be appended to URL + * @returns An updated URL string + */ +function appendToURLPath(url, name) { + const urlParsed = coreHttp.URLBuilder.parse(url); + let path = urlParsed.getPath(); + path = path ? (path.endsWith("/") ? `${path}${name}` : `${path}/${name}`) : name; + urlParsed.setPath(path); + return urlParsed.toString(); +} +/** + * Set URL parameter name and value. If name exists in URL parameters, old value + * will be replaced by name key. If not provide value, the parameter will be deleted. + * + * @param url - Source URL string + * @param name - Parameter name + * @param value - Parameter value + * @returns An updated URL string + */ +function setURLParameter(url, name, value) { + const urlParsed = coreHttp.URLBuilder.parse(url); + urlParsed.setQueryParameter(name, value); + return urlParsed.toString(); +} +/** + * Get URL parameter by name. + * + * @param url - + * @param name - + */ +function getURLParameter(url, name) { + const urlParsed = coreHttp.URLBuilder.parse(url); + return urlParsed.getQueryParameterValue(name); +} +/** + * Set URL host. + * + * @param url - Source URL string + * @param host - New host string + * @returns An updated URL string + */ +function setURLHost(url, host) { + const urlParsed = coreHttp.URLBuilder.parse(url); + urlParsed.setHost(host); + return urlParsed.toString(); +} +/** + * Get URL path from an URL string. + * + * @param url - Source URL string + */ +function getURLPath(url) { + const urlParsed = coreHttp.URLBuilder.parse(url); + return urlParsed.getPath(); +} +/** + * Get URL scheme from an URL string. + * + * @param url - Source URL string + */ +function getURLScheme(url) { + const urlParsed = coreHttp.URLBuilder.parse(url); + return urlParsed.getScheme(); +} +/** + * Get URL path and query from an URL string. + * + * @param url - Source URL string + */ +function getURLPathAndQuery(url) { + const urlParsed = coreHttp.URLBuilder.parse(url); + const pathString = urlParsed.getPath(); + if (!pathString) { + throw new RangeError("Invalid url without valid path."); } - /** - * The Create operation creates a new page blob. - * @param contentLength The length of the request. - * @param blobContentLength This header specifies the maximum size for the page blob, up to 1 TB. The - * page blob size must be aligned to a 512-byte boundary. - * @param options The options parameters. - */ - create(contentLength, blobContentLength, options) { - const operationArguments = { - contentLength, - blobContentLength, - options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) - }; - return this.client.sendOperationRequest(operationArguments, createOperationSpec$1); + let queryString = urlParsed.getQuery() || ""; + queryString = queryString.trim(); + if (queryString !== "") { + queryString = queryString.startsWith("?") ? queryString : `?${queryString}`; // Ensure query string start with '?' } - /** - * The Upload Pages operation writes a range of pages to a page blob - * @param contentLength The length of the request. - * @param body Initial data - * @param options The options parameters. - */ - uploadPages(contentLength, body, options) { - const operationArguments = { - contentLength, - body, - options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) - }; - return this.client.sendOperationRequest(operationArguments, uploadPagesOperationSpec); + return `${pathString}${queryString}`; +} +/** + * Get URL query key value pairs from an URL string. + * + * @param url - + */ +function getURLQueries(url) { + let queryString = coreHttp.URLBuilder.parse(url).getQuery(); + if (!queryString) { + return {}; } - /** - * The Clear Pages operation clears a set of pages from a page blob - * @param contentLength The length of the request. - * @param options The options parameters. - */ - clearPages(contentLength, options) { - const operationArguments = { - contentLength, - options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) - }; - return this.client.sendOperationRequest(operationArguments, clearPagesOperationSpec); + queryString = queryString.trim(); + queryString = queryString.startsWith("?") ? queryString.substr(1) : queryString; + let querySubStrings = queryString.split("&"); + querySubStrings = querySubStrings.filter((value) => { + const indexOfEqual = value.indexOf("="); + const lastIndexOfEqual = value.lastIndexOf("="); + return (indexOfEqual > 0 && indexOfEqual === lastIndexOfEqual && lastIndexOfEqual < value.length - 1); + }); + const queries = {}; + for (const querySubString of querySubStrings) { + const splitResults = querySubString.split("="); + const key = splitResults[0]; + const value = splitResults[1]; + queries[key] = value; } - /** - * The Upload Pages operation writes a range of pages to a page blob where the contents are read from a - * URL - * @param sourceUrl Specify a URL to the copy source. - * @param sourceRange Bytes of source data in the specified range. The length of this range should - * match the ContentLength header and x-ms-range/Range destination range header. - * @param contentLength The length of the request. - * @param range The range of bytes to which the source range would be written. The range should be 512 - * aligned and range-end is required. - * @param options The options parameters. - */ - uploadPagesFromURL(sourceUrl, sourceRange, contentLength, range, options) { - const operationArguments = { - sourceUrl, - sourceRange, - contentLength, - range, - options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) - }; - return this.client.sendOperationRequest(operationArguments, uploadPagesFromURLOperationSpec); + return queries; +} +/** + * Append a string to URL query. + * + * @param url - Source URL string. + * @param queryParts - String to be appended to the URL query. + * @returns An updated URL string. + */ +function appendToURLQuery(url, queryParts) { + const urlParsed = coreHttp.URLBuilder.parse(url); + let query = urlParsed.getQuery(); + if (query) { + query += "&" + queryParts; } - /** - * The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot of a - * page blob - * @param options The options parameters. - */ - getPageRanges(options) { - const operationArguments = { - options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) - }; - return this.client.sendOperationRequest(operationArguments, getPageRangesOperationSpec); + else { + query = queryParts; } - /** - * The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that were - * changed between target blob and previous snapshot. - * @param options The options parameters. - */ - getPageRangesDiff(options) { - const operationArguments = { - options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) + urlParsed.setQuery(query); + return urlParsed.toString(); +} +/** + * Rounds a date off to seconds. + * + * @param date - + * @param withMilliseconds - If true, YYYY-MM-DDThh:mm:ss.fffffffZ will be returned; + * If false, YYYY-MM-DDThh:mm:ssZ will be returned. + * @returns Date string in ISO8061 format, with or without 7 milliseconds component + */ +function truncatedISO8061Date(date, withMilliseconds = true) { + // Date.toISOString() will return like "2018-10-29T06:34:36.139Z" + const dateString = date.toISOString(); + return withMilliseconds + ? dateString.substring(0, dateString.length - 1) + "0000" + "Z" + : dateString.substring(0, dateString.length - 5) + "Z"; +} +/** + * Base64 encode. + * + * @param content - + */ +function base64encode(content) { + return !coreHttp.isNode ? btoa(content) : Buffer.from(content).toString("base64"); +} +/** + * Generate a 64 bytes base64 block ID string. + * + * @param blockIndex - + */ +function generateBlockID(blockIDPrefix, blockIndex) { + // To generate a 64 bytes base64 string, source string should be 48 + const maxSourceStringLength = 48; + // A blob can have a maximum of 100,000 uncommitted blocks at any given time + const maxBlockIndexLength = 6; + const maxAllowedBlockIDPrefixLength = maxSourceStringLength - maxBlockIndexLength; + if (blockIDPrefix.length > maxAllowedBlockIDPrefixLength) { + blockIDPrefix = blockIDPrefix.slice(0, maxAllowedBlockIDPrefixLength); + } + const res = blockIDPrefix + + padStart(blockIndex.toString(), maxSourceStringLength - blockIDPrefix.length, "0"); + return base64encode(res); +} +/** + * Delay specified time interval. + * + * @param timeInMs - + * @param aborter - + * @param abortError - + */ +async function delay(timeInMs, aborter, abortError) { + return new Promise((resolve, reject) => { + /* eslint-disable-next-line prefer-const */ + let timeout; + const abortHandler = () => { + if (timeout !== undefined) { + clearTimeout(timeout); + } + reject(abortError); }; - return this.client.sendOperationRequest(operationArguments, getPageRangesDiffOperationSpec); - } - /** - * Resize the Blob - * @param blobContentLength This header specifies the maximum size for the page blob, up to 1 TB. The - * page blob size must be aligned to a 512-byte boundary. - * @param options The options parameters. - */ - resize(blobContentLength, options) { - const operationArguments = { - blobContentLength, - options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) + const resolveHandler = () => { + if (aborter !== undefined) { + aborter.removeEventListener("abort", abortHandler); + } + resolve(); }; - return this.client.sendOperationRequest(operationArguments, resizeOperationSpec); + timeout = setTimeout(resolveHandler, timeInMs); + if (aborter !== undefined) { + aborter.addEventListener("abort", abortHandler); + } + }); +} +/** + * String.prototype.padStart() + * + * @param currentString - + * @param targetLength - + * @param padString - + */ +function padStart(currentString, targetLength, padString = " ") { + // @ts-expect-error: TS doesn't know this code needs to run downlevel sometimes + if (String.prototype.padStart) { + return currentString.padStart(targetLength, padString); } - /** - * Update the sequence number of the blob - * @param sequenceNumberAction Required if the x-ms-blob-sequence-number header is set for the request. - * This property applies to page blobs only. This property indicates how the service should modify the - * blob's sequence number - * @param options The options parameters. - */ - updateSequenceNumber(sequenceNumberAction, options) { - const operationArguments = { - sequenceNumberAction, - options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) - }; - return this.client.sendOperationRequest(operationArguments, updateSequenceNumberOperationSpec); + padString = padString || " "; + if (currentString.length > targetLength) { + return currentString; } - /** - * The Copy Incremental operation copies a snapshot of the source page blob to a destination page blob. - * The snapshot is copied such that only the differential changes between the previously copied - * snapshot are transferred to the destination. The copied snapshots are complete copies of the - * original snapshot and can be read or copied from as usual. This API is supported since REST version - * 2016-05-31. - * @param copySource Specifies the name of the source page blob snapshot. This value is a URL of up to - * 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it would - * appear in a request URI. The source blob must either be public or must be authenticated via a shared - * access signature. - * @param options The options parameters. - */ - copyIncremental(copySource, options) { - const operationArguments = { - copySource, - options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) - }; - return this.client.sendOperationRequest(operationArguments, copyIncrementalOperationSpec); + else { + targetLength = targetLength - currentString.length; + if (targetLength > padString.length) { + padString += padString.repeat(targetLength / padString.length); + } + return padString.slice(0, targetLength) + currentString; } } -// Operation Specifications -const xmlSerializer$2 = new coreHttp__namespace.Serializer(Mappers, /* isXml */ true); -const serializer$2 = new coreHttp__namespace.Serializer(Mappers, /* isXml */ false); -const createOperationSpec$1 = { - path: "/{containerName}/{blob}", - httpMethod: "PUT", - responses: { - 201: { - headersMapper: PageBlobCreateHeaders - }, - default: { - bodyMapper: StorageError, - headersMapper: PageBlobCreateExceptionHeaders - } - }, - queryParameters: [timeoutInSeconds], - urlParameters: [url], - headerParameters: [ - version, - requestId, - accept1, - contentLength, - metadata, - leaseId, - ifModifiedSince, - ifUnmodifiedSince, - encryptionKey, - encryptionKeySha256, - encryptionAlgorithm, - ifMatch, - ifNoneMatch, - ifTags, - blobCacheControl, - blobContentType, - blobContentMD5, - blobContentEncoding, - blobContentLanguage, - blobContentDisposition, - immutabilityPolicyExpiry, - immutabilityPolicyMode, - encryptionScope, - tier, - blobTagsString, - legalHold1, - blobType, - blobContentLength, - blobSequenceNumber - ], - isXML: true, - serializer: xmlSerializer$2 -}; -const uploadPagesOperationSpec = { - path: "/{containerName}/{blob}", - httpMethod: "PUT", - responses: { - 201: { - headersMapper: PageBlobUploadPagesHeaders - }, - default: { - bodyMapper: StorageError, - headersMapper: PageBlobUploadPagesExceptionHeaders - } - }, - requestBody: body1, - queryParameters: [timeoutInSeconds, comp19], - urlParameters: [url], - headerParameters: [ - version, - requestId, - contentLength, - leaseId, - ifModifiedSince, - ifUnmodifiedSince, - range, - encryptionKey, - encryptionKeySha256, - encryptionAlgorithm, - ifMatch, - ifNoneMatch, - ifTags, - encryptionScope, - transactionalContentMD5, - transactionalContentCrc64, - contentType1, - accept2, - pageWrite, - ifSequenceNumberLessThanOrEqualTo, - ifSequenceNumberLessThan, - ifSequenceNumberEqualTo - ], - mediaType: "binary", - serializer: serializer$2 -}; -const clearPagesOperationSpec = { - path: "/{containerName}/{blob}", - httpMethod: "PUT", - responses: { - 201: { - headersMapper: PageBlobClearPagesHeaders - }, - default: { - bodyMapper: StorageError, - headersMapper: PageBlobClearPagesExceptionHeaders +/** + * If two strings are equal when compared case insensitive. + * + * @param str1 - + * @param str2 - + */ +function iEqual(str1, str2) { + return str1.toLocaleLowerCase() === str2.toLocaleLowerCase(); +} +/** + * Extracts account name from the url + * @param url - url to extract the account name from + * @returns with the account name + */ +function getAccountNameFromUrl(url) { + const parsedUrl = coreHttp.URLBuilder.parse(url); + let accountName; + try { + if (parsedUrl.getHost().split(".")[1] === "blob") { + // `${defaultEndpointsProtocol}://${accountName}.blob.${endpointSuffix}`; + accountName = parsedUrl.getHost().split(".")[0]; } - }, - queryParameters: [timeoutInSeconds, comp19], - urlParameters: [url], - headerParameters: [ - version, - requestId, - accept1, - contentLength, - leaseId, - ifModifiedSince, - ifUnmodifiedSince, - range, - encryptionKey, - encryptionKeySha256, - encryptionAlgorithm, - ifMatch, - ifNoneMatch, - ifTags, - encryptionScope, - ifSequenceNumberLessThanOrEqualTo, - ifSequenceNumberLessThan, - ifSequenceNumberEqualTo, - pageWrite1 - ], - isXML: true, - serializer: xmlSerializer$2 -}; -const uploadPagesFromURLOperationSpec = { - path: "/{containerName}/{blob}", - httpMethod: "PUT", - responses: { - 201: { - headersMapper: PageBlobUploadPagesFromURLHeaders - }, - default: { - bodyMapper: StorageError, - headersMapper: PageBlobUploadPagesFromURLExceptionHeaders + else if (isIpEndpointStyle(parsedUrl)) { + // IPv4/IPv6 address hosts... Example - http://192.0.0.10:10001/devstoreaccount1/ + // Single word domain without a [dot] in the endpoint... Example - http://localhost:10001/devstoreaccount1/ + // .getPath() -> /devstoreaccount1/ + accountName = parsedUrl.getPath().split("/")[1]; } - }, - queryParameters: [timeoutInSeconds, comp19], - urlParameters: [url], - headerParameters: [ - version, - requestId, - accept1, - contentLength, - leaseId, - ifModifiedSince, - ifUnmodifiedSince, - encryptionKey, - encryptionKeySha256, - encryptionAlgorithm, - ifMatch, - ifNoneMatch, - ifTags, - encryptionScope, - sourceIfModifiedSince, - sourceIfUnmodifiedSince, - sourceIfMatch, - sourceIfNoneMatch, - sourceContentMD5, - copySourceAuthorization, - pageWrite, - ifSequenceNumberLessThanOrEqualTo, - ifSequenceNumberLessThan, - ifSequenceNumberEqualTo, - sourceUrl, - sourceRange, - sourceContentCrc64, - range1 - ], - isXML: true, - serializer: xmlSerializer$2 -}; -const getPageRangesOperationSpec = { - path: "/{containerName}/{blob}", - httpMethod: "GET", - responses: { - 200: { - bodyMapper: PageList, - headersMapper: PageBlobGetPageRangesHeaders - }, - default: { - bodyMapper: StorageError, - headersMapper: PageBlobGetPageRangesExceptionHeaders + else { + // Custom domain case: "https://customdomain.com/containername/blob". + accountName = ""; } - }, - queryParameters: [ - timeoutInSeconds, - marker, - maxPageSize, - snapshot, - comp20 - ], - urlParameters: [url], - headerParameters: [ - version, - requestId, - accept1, - leaseId, - ifModifiedSince, - ifUnmodifiedSince, - range, - ifMatch, - ifNoneMatch, - ifTags - ], - isXML: true, - serializer: xmlSerializer$2 -}; -const getPageRangesDiffOperationSpec = { - path: "/{containerName}/{blob}", - httpMethod: "GET", - responses: { - 200: { - bodyMapper: PageList, - headersMapper: PageBlobGetPageRangesDiffHeaders - }, - default: { - bodyMapper: StorageError, - headersMapper: PageBlobGetPageRangesDiffExceptionHeaders + return accountName; + } + catch (error) { + throw new Error("Unable to extract accountName with provided information."); + } +} +function isIpEndpointStyle(parsedUrl) { + if (parsedUrl.getHost() === undefined) { + return false; + } + const host = parsedUrl.getHost() + (parsedUrl.getPort() === undefined ? "" : ":" + parsedUrl.getPort()); + // Case 1: Ipv6, use a broad regex to find out candidates whose host contains two ':'. + // Case 2: localhost(:port), use broad regex to match port part. + // Case 3: Ipv4, use broad regex which just check if host contains Ipv4. + // For valid host please refer to https://man7.org/linux/man-pages/man7/hostname.7.html. + return (/^.*:.*:.*$|^localhost(:[0-9]+)?$|^(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])(\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])){3}(:[0-9]+)?$/.test(host) || + (parsedUrl.getPort() !== undefined && PathStylePorts.includes(parsedUrl.getPort()))); +} +/** + * Convert Tags to encoded string. + * + * @param tags - + */ +function toBlobTagsString(tags) { + if (tags === undefined) { + return undefined; + } + const tagPairs = []; + for (const key in tags) { + if (Object.prototype.hasOwnProperty.call(tags, key)) { + const value = tags[key]; + tagPairs.push(`${encodeURIComponent(key)}=${encodeURIComponent(value)}`); } - }, - queryParameters: [ - timeoutInSeconds, - marker, - maxPageSize, - snapshot, - comp20, - prevsnapshot - ], - urlParameters: [url], - headerParameters: [ - version, - requestId, - accept1, - leaseId, - ifModifiedSince, - ifUnmodifiedSince, - range, - ifMatch, - ifNoneMatch, - ifTags, - prevSnapshotUrl - ], - isXML: true, - serializer: xmlSerializer$2 -}; -const resizeOperationSpec = { - path: "/{containerName}/{blob}", - httpMethod: "PUT", - responses: { - 200: { - headersMapper: PageBlobResizeHeaders - }, - default: { - bodyMapper: StorageError, - headersMapper: PageBlobResizeExceptionHeaders + } + return tagPairs.join("&"); +} +/** + * Convert Tags type to BlobTags. + * + * @param tags - + */ +function toBlobTags(tags) { + if (tags === undefined) { + return undefined; + } + const res = { + blobTagSet: [], + }; + for (const key in tags) { + if (Object.prototype.hasOwnProperty.call(tags, key)) { + const value = tags[key]; + res.blobTagSet.push({ + key, + value, + }); } - }, - queryParameters: [comp, timeoutInSeconds], - urlParameters: [url], - headerParameters: [ - version, - requestId, - accept1, - leaseId, - ifModifiedSince, - ifUnmodifiedSince, - encryptionKey, - encryptionKeySha256, - encryptionAlgorithm, - ifMatch, - ifNoneMatch, - ifTags, - encryptionScope, - blobContentLength - ], - isXML: true, - serializer: xmlSerializer$2 -}; -const updateSequenceNumberOperationSpec = { - path: "/{containerName}/{blob}", - httpMethod: "PUT", - responses: { - 200: { - headersMapper: PageBlobUpdateSequenceNumberHeaders - }, - default: { - bodyMapper: StorageError, - headersMapper: PageBlobUpdateSequenceNumberExceptionHeaders + } + return res; +} +/** + * Covert BlobTags to Tags type. + * + * @param tags - + */ +function toTags(tags) { + if (tags === undefined) { + return undefined; + } + const res = {}; + for (const blobTag of tags.blobTagSet) { + res[blobTag.key] = blobTag.value; + } + return res; +} +/** + * Convert BlobQueryTextConfiguration to QuerySerialization type. + * + * @param textConfiguration - + */ +function toQuerySerialization(textConfiguration) { + if (textConfiguration === undefined) { + return undefined; + } + switch (textConfiguration.kind) { + case "csv": + return { + format: { + type: "delimited", + delimitedTextConfiguration: { + columnSeparator: textConfiguration.columnSeparator || ",", + fieldQuote: textConfiguration.fieldQuote || "", + recordSeparator: textConfiguration.recordSeparator, + escapeChar: textConfiguration.escapeCharacter || "", + headersPresent: textConfiguration.hasHeaders || false, + }, + }, + }; + case "json": + return { + format: { + type: "json", + jsonTextConfiguration: { + recordSeparator: textConfiguration.recordSeparator, + }, + }, + }; + case "arrow": + return { + format: { + type: "arrow", + arrowConfiguration: { + schema: textConfiguration.schema, + }, + }, + }; + case "parquet": + return { + format: { + type: "parquet", + }, + }; + default: + throw Error("Invalid BlobQueryTextConfiguration."); + } +} +function parseObjectReplicationRecord(objectReplicationRecord) { + if (!objectReplicationRecord) { + return undefined; + } + if ("policy-id" in objectReplicationRecord) { + // If the dictionary contains a key with policy id, we are not required to do any parsing since + // the policy id should already be stored in the ObjectReplicationDestinationPolicyId. + return undefined; + } + const orProperties = []; + for (const key in objectReplicationRecord) { + const ids = key.split("_"); + const policyPrefix = "or-"; + if (ids[0].startsWith(policyPrefix)) { + ids[0] = ids[0].substring(policyPrefix.length); } - }, - queryParameters: [comp, timeoutInSeconds], - urlParameters: [url], - headerParameters: [ - version, - requestId, - accept1, - leaseId, - ifModifiedSince, - ifUnmodifiedSince, - ifMatch, - ifNoneMatch, - ifTags, - blobSequenceNumber, - sequenceNumberAction - ], - isXML: true, - serializer: xmlSerializer$2 -}; -const copyIncrementalOperationSpec = { - path: "/{containerName}/{blob}", - httpMethod: "PUT", - responses: { - 202: { - headersMapper: PageBlobCopyIncrementalHeaders - }, - default: { - bodyMapper: StorageError, - headersMapper: PageBlobCopyIncrementalExceptionHeaders + const rule = { + ruleId: ids[1], + replicationStatus: objectReplicationRecord[key], + }; + const policyIndex = orProperties.findIndex((policy) => policy.policyId === ids[0]); + if (policyIndex > -1) { + orProperties[policyIndex].rules.push(rule); } - }, - queryParameters: [timeoutInSeconds, comp21], - urlParameters: [url], - headerParameters: [ - version, - requestId, - accept1, - ifModifiedSince, - ifUnmodifiedSince, - ifMatch, - ifNoneMatch, - ifTags, - copySource - ], - isXML: true, - serializer: xmlSerializer$2 -}; - -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. + else { + orProperties.push({ + policyId: ids[0], + rules: [rule], + }); + } + } + return orProperties; +} +/** + * Attach a TokenCredential to an object. * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. + * @param thing - + * @param credential - */ -/** Class representing a AppendBlob. */ -class AppendBlob { - /** - * Initialize a new instance of the class AppendBlob class. - * @param client Reference to the service client - */ - constructor(client) { - this.client = client; +function attachCredential(thing, credential) { + thing.credential = credential; + return thing; +} +function httpAuthorizationToString(httpAuthorization) { + return httpAuthorization ? httpAuthorization.scheme + " " + httpAuthorization.value : undefined; +} +function BlobNameToString(name) { + if (name.encoded) { + return decodeURIComponent(name.content); } - /** - * The Create Append Blob operation creates a new append blob. - * @param contentLength The length of the request. - * @param options The options parameters. - */ - create(contentLength, options) { - const operationArguments = { - contentLength, - options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) - }; - return this.client.sendOperationRequest(operationArguments, createOperationSpec); + else { + return name.content; } - /** - * The Append Block operation commits a new block of data to the end of an existing append blob. The - * Append Block operation is permitted only if the blob was created with x-ms-blob-type set to - * AppendBlob. Append Block is supported only on version 2015-02-21 version or later. - * @param contentLength The length of the request. - * @param body Initial data - * @param options The options parameters. - */ - appendBlock(contentLength, body, options) { - const operationArguments = { - contentLength, - body, - options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) - }; - return this.client.sendOperationRequest(operationArguments, appendBlockOperationSpec); +} +function ConvertInternalResponseOfListBlobFlat(internalResponse) { + return Object.assign(Object.assign({}, internalResponse), { segment: { + blobItems: internalResponse.segment.blobItems.map((blobItemInteral) => { + const blobItem = Object.assign(Object.assign({}, blobItemInteral), { name: BlobNameToString(blobItemInteral.name) }); + return blobItem; + }), + } }); +} +function ConvertInternalResponseOfListBlobHierarchy(internalResponse) { + var _a; + return Object.assign(Object.assign({}, internalResponse), { segment: { + blobPrefixes: (_a = internalResponse.segment.blobPrefixes) === null || _a === void 0 ? void 0 : _a.map((blobPrefixInternal) => { + const blobPrefix = { + name: BlobNameToString(blobPrefixInternal.name), + }; + return blobPrefix; + }), + blobItems: internalResponse.segment.blobItems.map((blobItemInteral) => { + const blobItem = Object.assign(Object.assign({}, blobItemInteral), { name: BlobNameToString(blobItemInteral.name) }); + return blobItem; + }), + } }); +} +function decodeBase64String(value) { + if (coreHttp.isNode) { + return Buffer.from(value, "base64"); } - /** - * The Append Block operation commits a new block of data to the end of an existing append blob where - * the contents are read from a source url. The Append Block operation is permitted only if the blob - * was created with x-ms-blob-type set to AppendBlob. Append Block is supported only on version - * 2015-02-21 version or later. - * @param sourceUrl Specify a URL to the copy source. - * @param contentLength The length of the request. - * @param options The options parameters. - */ - appendBlockFromUrl(sourceUrl, contentLength, options) { - const operationArguments = { - sourceUrl, - contentLength, - options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) + else { + const byteString = atob(value); + const arr = new Uint8Array(byteString.length); + for (let i = 0; i < byteString.length; i++) { + arr[i] = byteString.charCodeAt(i); + } + return arr; + } +} +function ParseBoolean(content) { + if (content === undefined) + return undefined; + if (content === "true") + return true; + if (content === "false") + return false; + return undefined; +} +function ParseBlobName(blobNameInXML) { + if (blobNameInXML["$"] !== undefined && blobNameInXML["#"] !== undefined) { + return { + encoded: ParseBoolean(blobNameInXML["$"]["Encoded"]), + content: blobNameInXML["#"], }; - return this.client.sendOperationRequest(operationArguments, appendBlockFromUrlOperationSpec); } - /** - * The Seal operation seals the Append Blob to make it read-only. Seal is supported only on version - * 2019-12-12 version or later. - * @param options The options parameters. - */ - seal(options) { - const operationArguments = { - options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) + else { + return { + encoded: false, + content: blobNameInXML, }; - return this.client.sendOperationRequest(operationArguments, sealOperationSpec); } } -// Operation Specifications -const xmlSerializer$1 = new coreHttp__namespace.Serializer(Mappers, /* isXml */ true); -const serializer$1 = new coreHttp__namespace.Serializer(Mappers, /* isXml */ false); -const createOperationSpec = { - path: "/{containerName}/{blob}", - httpMethod: "PUT", - responses: { - 201: { - headersMapper: AppendBlobCreateHeaders - }, - default: { - bodyMapper: StorageError, - headersMapper: AppendBlobCreateExceptionHeaders - } - }, - queryParameters: [timeoutInSeconds], - urlParameters: [url], - headerParameters: [ - version, - requestId, - accept1, - contentLength, - metadata, - leaseId, - ifModifiedSince, - ifUnmodifiedSince, - encryptionKey, - encryptionKeySha256, - encryptionAlgorithm, - ifMatch, - ifNoneMatch, - ifTags, - blobCacheControl, - blobContentType, - blobContentMD5, - blobContentEncoding, - blobContentLanguage, - blobContentDisposition, - immutabilityPolicyExpiry, - immutabilityPolicyMode, - encryptionScope, - blobTagsString, - legalHold1, - blobType1 - ], - isXML: true, - serializer: xmlSerializer$1 -}; -const appendBlockOperationSpec = { - path: "/{containerName}/{blob}", - httpMethod: "PUT", - responses: { - 201: { - headersMapper: AppendBlobAppendBlockHeaders - }, - default: { - bodyMapper: StorageError, - headersMapper: AppendBlobAppendBlockExceptionHeaders - } - }, - requestBody: body1, - queryParameters: [timeoutInSeconds, comp22], - urlParameters: [url], - headerParameters: [ - version, - requestId, - contentLength, - leaseId, - ifModifiedSince, - ifUnmodifiedSince, - encryptionKey, - encryptionKeySha256, - encryptionAlgorithm, - ifMatch, - ifNoneMatch, - ifTags, - encryptionScope, - transactionalContentMD5, - transactionalContentCrc64, - contentType1, - accept2, - maxSize, - appendPosition - ], - mediaType: "binary", - serializer: serializer$1 -}; -const appendBlockFromUrlOperationSpec = { - path: "/{containerName}/{blob}", - httpMethod: "PUT", - responses: { - 201: { - headersMapper: AppendBlobAppendBlockFromUrlHeaders - }, - default: { - bodyMapper: StorageError, - headersMapper: AppendBlobAppendBlockFromUrlExceptionHeaders - } - }, - queryParameters: [timeoutInSeconds, comp22], - urlParameters: [url], - headerParameters: [ - version, - requestId, - accept1, - contentLength, - leaseId, - ifModifiedSince, - ifUnmodifiedSince, - encryptionKey, - encryptionKeySha256, - encryptionAlgorithm, - ifMatch, - ifNoneMatch, - ifTags, - encryptionScope, - sourceIfModifiedSince, - sourceIfUnmodifiedSince, - sourceIfMatch, - sourceIfNoneMatch, - sourceContentMD5, - copySourceAuthorization, - transactionalContentMD5, - sourceUrl, - sourceContentCrc64, - maxSize, - appendPosition, - sourceRange1 - ], - isXML: true, - serializer: xmlSerializer$1 -}; -const sealOperationSpec = { - path: "/{containerName}/{blob}", - httpMethod: "PUT", - responses: { - 200: { - headersMapper: AppendBlobSealHeaders - }, - default: { - bodyMapper: StorageError, - headersMapper: AppendBlobSealExceptionHeaders - } - }, - queryParameters: [timeoutInSeconds, comp23], - urlParameters: [url], - headerParameters: [ - version, - requestId, - accept1, - leaseId, - ifModifiedSince, - ifUnmodifiedSince, - ifMatch, - ifNoneMatch, - appendPosition - ], - isXML: true, - serializer: xmlSerializer$1 -}; - -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ -/** Class representing a BlockBlob. */ -class BlockBlob { - /** - * Initialize a new instance of the class BlockBlob class. - * @param client Reference to the service client - */ - constructor(client) { - this.client = client; +function ParseBlobProperties(blobPropertiesInXML) { + const blobProperties = blobPropertiesInXML; + if (blobPropertiesInXML["Creation-Time"]) { + blobProperties.createdOn = new Date(blobPropertiesInXML["Creation-Time"]); + delete blobProperties["Creation-Time"]; + } + if (blobPropertiesInXML["Last-Modified"]) { + blobProperties.lastModified = new Date(blobPropertiesInXML["Last-Modified"]); + delete blobProperties["Last-Modified"]; + } + if (blobPropertiesInXML["Etag"]) { + blobProperties.etag = blobPropertiesInXML["Etag"]; + delete blobProperties["Etag"]; + } + if (blobPropertiesInXML["Content-Length"]) { + blobProperties.contentLength = parseFloat(blobPropertiesInXML["Content-Length"]); + delete blobProperties["Content-Length"]; + } + if (blobPropertiesInXML["Content-Type"]) { + blobProperties.contentType = blobPropertiesInXML["Content-Type"]; + delete blobProperties["Content-Type"]; + } + if (blobPropertiesInXML["Content-Encoding"]) { + blobProperties.contentEncoding = blobPropertiesInXML["Content-Encoding"]; + delete blobProperties["Content-Encoding"]; + } + if (blobPropertiesInXML["Content-Language"]) { + blobProperties.contentLanguage = blobPropertiesInXML["Content-Language"]; + delete blobProperties["Content-Language"]; + } + if (blobPropertiesInXML["Content-MD5"]) { + blobProperties.contentMD5 = decodeBase64String(blobPropertiesInXML["Content-MD5"]); + delete blobProperties["Content-MD5"]; + } + if (blobPropertiesInXML["Content-Disposition"]) { + blobProperties.contentDisposition = blobPropertiesInXML["Content-Disposition"]; + delete blobProperties["Content-Disposition"]; + } + if (blobPropertiesInXML["Cache-Control"]) { + blobProperties.cacheControl = blobPropertiesInXML["Cache-Control"]; + delete blobProperties["Cache-Control"]; + } + if (blobPropertiesInXML["x-ms-blob-sequence-number"]) { + blobProperties.blobSequenceNumber = parseFloat(blobPropertiesInXML["x-ms-blob-sequence-number"]); + delete blobProperties["x-ms-blob-sequence-number"]; + } + if (blobPropertiesInXML["BlobType"]) { + blobProperties.blobType = blobPropertiesInXML["BlobType"]; + delete blobProperties["BlobType"]; + } + if (blobPropertiesInXML["LeaseStatus"]) { + blobProperties.leaseStatus = blobPropertiesInXML["LeaseStatus"]; + delete blobProperties["LeaseStatus"]; + } + if (blobPropertiesInXML["LeaseState"]) { + blobProperties.leaseState = blobPropertiesInXML["LeaseState"]; + delete blobProperties["LeaseState"]; + } + if (blobPropertiesInXML["LeaseDuration"]) { + blobProperties.leaseDuration = blobPropertiesInXML["LeaseDuration"]; + delete blobProperties["LeaseDuration"]; + } + if (blobPropertiesInXML["CopyId"]) { + blobProperties.copyId = blobPropertiesInXML["CopyId"]; + delete blobProperties["CopyId"]; + } + if (blobPropertiesInXML["CopyStatus"]) { + blobProperties.copyStatus = blobPropertiesInXML["CopyStatus"]; + delete blobProperties["CopyStatus"]; + } + if (blobPropertiesInXML["CopySource"]) { + blobProperties.copySource = blobPropertiesInXML["CopySource"]; + delete blobProperties["CopySource"]; + } + if (blobPropertiesInXML["CopyProgress"]) { + blobProperties.copyProgress = blobPropertiesInXML["CopyProgress"]; + delete blobProperties["CopyProgress"]; + } + if (blobPropertiesInXML["CopyCompletionTime"]) { + blobProperties.copyCompletedOn = new Date(blobPropertiesInXML["CopyCompletionTime"]); + delete blobProperties["CopyCompletionTime"]; + } + if (blobPropertiesInXML["CopyStatusDescription"]) { + blobProperties.copyStatusDescription = blobPropertiesInXML["CopyStatusDescription"]; + delete blobProperties["CopyStatusDescription"]; + } + if (blobPropertiesInXML["ServerEncrypted"]) { + blobProperties.serverEncrypted = ParseBoolean(blobPropertiesInXML["ServerEncrypted"]); + delete blobProperties["ServerEncrypted"]; + } + if (blobPropertiesInXML["IncrementalCopy"]) { + blobProperties.incrementalCopy = ParseBoolean(blobPropertiesInXML["IncrementalCopy"]); + delete blobProperties["IncrementalCopy"]; + } + if (blobPropertiesInXML["DestinationSnapshot"]) { + blobProperties.destinationSnapshot = blobPropertiesInXML["DestinationSnapshot"]; + delete blobProperties["DestinationSnapshot"]; + } + if (blobPropertiesInXML["DeletedTime"]) { + blobProperties.deletedOn = new Date(blobPropertiesInXML["DeletedTime"]); + delete blobProperties["DeletedTime"]; + } + if (blobPropertiesInXML["RemainingRetentionDays"]) { + blobProperties.remainingRetentionDays = parseFloat(blobPropertiesInXML["RemainingRetentionDays"]); + delete blobProperties["RemainingRetentionDays"]; + } + if (blobPropertiesInXML["AccessTier"]) { + blobProperties.accessTier = blobPropertiesInXML["AccessTier"]; + delete blobProperties["AccessTier"]; + } + if (blobPropertiesInXML["AccessTierInferred"]) { + blobProperties.accessTierInferred = ParseBoolean(blobPropertiesInXML["AccessTierInferred"]); + delete blobProperties["AccessTierInferred"]; + } + if (blobPropertiesInXML["ArchiveStatus"]) { + blobProperties.archiveStatus = blobPropertiesInXML["ArchiveStatus"]; + delete blobProperties["ArchiveStatus"]; } - /** - * The Upload Block Blob operation updates the content of an existing block blob. Updating an existing - * block blob overwrites any existing metadata on the blob. Partial updates are not supported with Put - * Blob; the content of the existing blob is overwritten with the content of the new blob. To perform a - * partial update of the content of a block blob, use the Put Block List operation. - * @param contentLength The length of the request. - * @param body Initial data - * @param options The options parameters. - */ - upload(contentLength, body, options) { - const operationArguments = { - contentLength, - body, - options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) - }; - return this.client.sendOperationRequest(operationArguments, uploadOperationSpec); + if (blobPropertiesInXML["CustomerProvidedKeySha256"]) { + blobProperties.customerProvidedKeySha256 = blobPropertiesInXML["CustomerProvidedKeySha256"]; + delete blobProperties["CustomerProvidedKeySha256"]; } - /** - * The Put Blob from URL operation creates a new Block Blob where the contents of the blob are read - * from a given URL. This API is supported beginning with the 2020-04-08 version. Partial updates are - * not supported with Put Blob from URL; the content of an existing blob is overwritten with the - * content of the new blob. To perform partial updates to a block blob’s contents using a source URL, - * use the Put Block from URL API in conjunction with Put Block List. - * @param contentLength The length of the request. - * @param copySource Specifies the name of the source page blob snapshot. This value is a URL of up to - * 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it would - * appear in a request URI. The source blob must either be public or must be authenticated via a shared - * access signature. - * @param options The options parameters. - */ - putBlobFromUrl(contentLength, copySource, options) { - const operationArguments = { - contentLength, - copySource, - options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) - }; - return this.client.sendOperationRequest(operationArguments, putBlobFromUrlOperationSpec); + if (blobPropertiesInXML["EncryptionScope"]) { + blobProperties.encryptionScope = blobPropertiesInXML["EncryptionScope"]; + delete blobProperties["EncryptionScope"]; } - /** - * The Stage Block operation creates a new block to be committed as part of a blob - * @param blockId A valid Base64 string value that identifies the block. Prior to encoding, the string - * must be less than or equal to 64 bytes in size. For a given blob, the length of the value specified - * for the blockid parameter must be the same size for each block. - * @param contentLength The length of the request. - * @param body Initial data - * @param options The options parameters. - */ - stageBlock(blockId, contentLength, body, options) { - const operationArguments = { - blockId, - contentLength, - body, - options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) - }; - return this.client.sendOperationRequest(operationArguments, stageBlockOperationSpec); + if (blobPropertiesInXML["AccessTierChangeTime"]) { + blobProperties.accessTierChangedOn = new Date(blobPropertiesInXML["AccessTierChangeTime"]); + delete blobProperties["AccessTierChangeTime"]; } - /** - * The Stage Block operation creates a new block to be committed as part of a blob where the contents - * are read from a URL. - * @param blockId A valid Base64 string value that identifies the block. Prior to encoding, the string - * must be less than or equal to 64 bytes in size. For a given blob, the length of the value specified - * for the blockid parameter must be the same size for each block. - * @param contentLength The length of the request. - * @param sourceUrl Specify a URL to the copy source. - * @param options The options parameters. - */ - stageBlockFromURL(blockId, contentLength, sourceUrl, options) { - const operationArguments = { - blockId, - contentLength, - sourceUrl, - options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) - }; - return this.client.sendOperationRequest(operationArguments, stageBlockFromURLOperationSpec); + if (blobPropertiesInXML["TagCount"]) { + blobProperties.tagCount = parseFloat(blobPropertiesInXML["TagCount"]); + delete blobProperties["TagCount"]; } - /** - * The Commit Block List operation writes a blob by specifying the list of block IDs that make up the - * blob. In order to be written as part of a blob, a block must have been successfully written to the - * server in a prior Put Block operation. You can call Put Block List to update a blob by uploading - * only those blocks that have changed, then committing the new and existing blocks together. You can - * do this by specifying whether to commit a block from the committed block list or from the - * uncommitted block list, or to commit the most recently uploaded version of the block, whichever list - * it may belong to. - * @param blocks Blob Blocks. - * @param options The options parameters. - */ - commitBlockList(blocks, options) { - const operationArguments = { - blocks, - options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) - }; - return this.client.sendOperationRequest(operationArguments, commitBlockListOperationSpec); + if (blobPropertiesInXML["Expiry-Time"]) { + blobProperties.expiresOn = new Date(blobPropertiesInXML["Expiry-Time"]); + delete blobProperties["Expiry-Time"]; } - /** - * The Get Block List operation retrieves the list of blocks that have been uploaded as part of a block - * blob - * @param listType Specifies whether to return the list of committed blocks, the list of uncommitted - * blocks, or both lists together. - * @param options The options parameters. - */ - getBlockList(listType, options) { - const operationArguments = { - listType, - options: coreHttp__namespace.operationOptionsToRequestOptionsBase(options || {}) - }; - return this.client.sendOperationRequest(operationArguments, getBlockListOperationSpec); + if (blobPropertiesInXML["Sealed"]) { + blobProperties.isSealed = ParseBoolean(blobPropertiesInXML["Sealed"]); + delete blobProperties["Sealed"]; + } + if (blobPropertiesInXML["RehydratePriority"]) { + blobProperties.rehydratePriority = blobPropertiesInXML["RehydratePriority"]; + delete blobProperties["RehydratePriority"]; + } + if (blobPropertiesInXML["LastAccessTime"]) { + blobProperties.lastAccessedOn = new Date(blobPropertiesInXML["LastAccessTime"]); + delete blobProperties["LastAccessTime"]; } + if (blobPropertiesInXML["ImmutabilityPolicyUntilDate"]) { + blobProperties.immutabilityPolicyExpiresOn = new Date(blobPropertiesInXML["ImmutabilityPolicyUntilDate"]); + delete blobProperties["ImmutabilityPolicyUntilDate"]; + } + if (blobPropertiesInXML["ImmutabilityPolicyMode"]) { + blobProperties.immutabilityPolicyMode = blobPropertiesInXML["ImmutabilityPolicyMode"]; + delete blobProperties["ImmutabilityPolicyMode"]; + } + if (blobPropertiesInXML["LegalHold"]) { + blobProperties.legalHold = ParseBoolean(blobPropertiesInXML["LegalHold"]); + delete blobProperties["LegalHold"]; + } + return blobProperties; } -// Operation Specifications -const xmlSerializer = new coreHttp__namespace.Serializer(Mappers, /* isXml */ true); -const serializer = new coreHttp__namespace.Serializer(Mappers, /* isXml */ false); -const uploadOperationSpec = { - path: "/{containerName}/{blob}", - httpMethod: "PUT", - responses: { - 201: { - headersMapper: BlockBlobUploadHeaders - }, - default: { - bodyMapper: StorageError, - headersMapper: BlockBlobUploadExceptionHeaders - } - }, - requestBody: body1, - queryParameters: [timeoutInSeconds], - urlParameters: [url], - headerParameters: [ - version, - requestId, - contentLength, - metadata, - leaseId, - ifModifiedSince, - ifUnmodifiedSince, - encryptionKey, - encryptionKeySha256, - encryptionAlgorithm, - ifMatch, - ifNoneMatch, - ifTags, - blobCacheControl, - blobContentType, - blobContentMD5, - blobContentEncoding, - blobContentLanguage, - blobContentDisposition, - immutabilityPolicyExpiry, - immutabilityPolicyMode, - encryptionScope, - tier, - blobTagsString, - legalHold1, - transactionalContentMD5, - contentType1, - accept2, - blobType2 - ], - mediaType: "binary", - serializer -}; -const putBlobFromUrlOperationSpec = { - path: "/{containerName}/{blob}", - httpMethod: "PUT", - responses: { - 201: { - headersMapper: BlockBlobPutBlobFromUrlHeaders - }, - default: { - bodyMapper: StorageError, - headersMapper: BlockBlobPutBlobFromUrlExceptionHeaders - } - }, - queryParameters: [timeoutInSeconds], - urlParameters: [url], - headerParameters: [ - version, - requestId, - accept1, - contentLength, - metadata, - leaseId, - ifModifiedSince, - ifUnmodifiedSince, - encryptionKey, - encryptionKeySha256, - encryptionAlgorithm, - ifMatch, - ifNoneMatch, - ifTags, - blobCacheControl, - blobContentType, - blobContentMD5, - blobContentEncoding, - blobContentLanguage, - blobContentDisposition, - encryptionScope, - tier, - sourceIfModifiedSince, - sourceIfUnmodifiedSince, - sourceIfMatch, - sourceIfNoneMatch, - sourceIfTags, - copySource, - blobTagsString, - sourceContentMD5, - copySourceAuthorization, - copySourceTags, - transactionalContentMD5, - blobType2, - copySourceBlobProperties - ], - isXML: true, - serializer: xmlSerializer -}; -const stageBlockOperationSpec = { - path: "/{containerName}/{blob}", - httpMethod: "PUT", - responses: { - 201: { - headersMapper: BlockBlobStageBlockHeaders - }, - default: { - bodyMapper: StorageError, - headersMapper: BlockBlobStageBlockExceptionHeaders - } - }, - requestBody: body1, - queryParameters: [ - timeoutInSeconds, - comp24, - blockId - ], - urlParameters: [url], - headerParameters: [ - version, - requestId, - contentLength, - leaseId, - encryptionKey, - encryptionKeySha256, - encryptionAlgorithm, - encryptionScope, - transactionalContentMD5, - transactionalContentCrc64, - contentType1, - accept2 - ], - mediaType: "binary", - serializer -}; -const stageBlockFromURLOperationSpec = { - path: "/{containerName}/{blob}", - httpMethod: "PUT", - responses: { - 201: { - headersMapper: BlockBlobStageBlockFromURLHeaders - }, - default: { - bodyMapper: StorageError, - headersMapper: BlockBlobStageBlockFromURLExceptionHeaders - } - }, - queryParameters: [ - timeoutInSeconds, - comp24, - blockId - ], - urlParameters: [url], - headerParameters: [ - version, - requestId, - accept1, - contentLength, - leaseId, - encryptionKey, - encryptionKeySha256, - encryptionAlgorithm, - encryptionScope, - sourceIfModifiedSince, - sourceIfUnmodifiedSince, - sourceIfMatch, - sourceIfNoneMatch, - sourceContentMD5, - copySourceAuthorization, - sourceUrl, - sourceContentCrc64, - sourceRange1 - ], - isXML: true, - serializer: xmlSerializer -}; -const commitBlockListOperationSpec = { - path: "/{containerName}/{blob}", - httpMethod: "PUT", - responses: { - 201: { - headersMapper: BlockBlobCommitBlockListHeaders - }, - default: { - bodyMapper: StorageError, - headersMapper: BlockBlobCommitBlockListExceptionHeaders - } - }, - requestBody: blocks, - queryParameters: [timeoutInSeconds, comp25], - urlParameters: [url], - headerParameters: [ - contentType, - accept, - version, - requestId, - metadata, - leaseId, - ifModifiedSince, - ifUnmodifiedSince, - encryptionKey, - encryptionKeySha256, - encryptionAlgorithm, - ifMatch, - ifNoneMatch, - ifTags, - blobCacheControl, - blobContentType, - blobContentMD5, - blobContentEncoding, - blobContentLanguage, - blobContentDisposition, - immutabilityPolicyExpiry, - immutabilityPolicyMode, - encryptionScope, - tier, - blobTagsString, - legalHold1, - transactionalContentMD5, - transactionalContentCrc64 - ], - isXML: true, - contentType: "application/xml; charset=utf-8", - mediaType: "xml", - serializer: xmlSerializer -}; -const getBlockListOperationSpec = { - path: "/{containerName}/{blob}", - httpMethod: "GET", - responses: { - 200: { - bodyMapper: BlockList, - headersMapper: BlockBlobGetBlockListHeaders - }, - default: { - bodyMapper: StorageError, - headersMapper: BlockBlobGetBlockListExceptionHeaders +function ParseBlobItem(blobInXML) { + const blobItem = blobInXML; + blobItem.properties = ParseBlobProperties(blobInXML["Properties"]); + delete blobItem["Properties"]; + blobItem.name = ParseBlobName(blobInXML["Name"]); + delete blobItem["Name"]; + blobItem.deleted = ParseBoolean(blobInXML["Deleted"]); + delete blobItem["Deleted"]; + if (blobInXML["Snapshot"]) { + blobItem.snapshot = blobInXML["Snapshot"]; + delete blobItem["Snapshot"]; + } + if (blobInXML["VersionId"]) { + blobItem.versionId = blobInXML["VersionId"]; + delete blobItem["VersionId"]; + } + if (blobInXML["IsCurrentVersion"]) { + blobItem.isCurrentVersion = ParseBoolean(blobInXML["IsCurrentVersion"]); + delete blobItem["IsCurrentVersion"]; + } + if (blobInXML["Metadata"]) { + blobItem.metadata = blobInXML["Metadata"]; + delete blobItem["Metadata"]; + } + if (blobInXML["Tags"]) { + blobItem.blobTags = ParseBlobTags(blobInXML["Tags"]); + delete blobItem["Tags"]; + } + if (blobInXML["OrMetadata"]) { + blobItem.objectReplicationMetadata = blobInXML["OrMetadata"]; + delete blobItem["OrMetadata"]; + } + if (blobInXML["HasVersionsOnly"]) { + blobItem.hasVersionsOnly = ParseBoolean(blobInXML["HasVersionsOnly"]); + delete blobItem["HasVersionsOnly"]; + } + return blobItem; +} +function ParseBlobPrefix(blobPrefixInXML) { + return { + name: ParseBlobName(blobPrefixInXML["Name"]), + }; +} +function ParseBlobTag(blobTagInXML) { + return { + key: blobTagInXML["Key"], + value: blobTagInXML["Value"], + }; +} +function ParseBlobTags(blobTagsInXML) { + if (blobTagsInXML === undefined || + blobTagsInXML["TagSet"] === undefined || + blobTagsInXML["TagSet"]["Tag"] === undefined) { + return undefined; + } + const blobTagSet = []; + if (blobTagsInXML["TagSet"]["Tag"] instanceof Array) { + blobTagsInXML["TagSet"]["Tag"].forEach((blobTagInXML) => { + blobTagSet.push(ParseBlobTag(blobTagInXML)); + }); + } + else { + blobTagSet.push(ParseBlobTag(blobTagsInXML["TagSet"]["Tag"])); + } + return { blobTagSet: blobTagSet }; +} +function ProcessBlobItems(blobArrayInXML) { + const blobItems = []; + if (blobArrayInXML instanceof Array) { + blobArrayInXML.forEach((blobInXML) => { + blobItems.push(ParseBlobItem(blobInXML)); + }); + } + else { + blobItems.push(ParseBlobItem(blobArrayInXML)); + } + return blobItems; +} +function ProcessBlobPrefixes(blobPrefixesInXML) { + const blobPrefixes = []; + if (blobPrefixesInXML instanceof Array) { + blobPrefixesInXML.forEach((blobPrefixInXML) => { + blobPrefixes.push(ParseBlobPrefix(blobPrefixInXML)); + }); + } + else { + blobPrefixes.push(ParseBlobPrefix(blobPrefixesInXML)); + } + return blobPrefixes; +} +function* ExtractPageRangeInfoItems(getPageRangesSegment) { + let pageRange = []; + let clearRange = []; + if (getPageRangesSegment.pageRange) + pageRange = getPageRangesSegment.pageRange; + if (getPageRangesSegment.clearRange) + clearRange = getPageRangesSegment.clearRange; + let pageRangeIndex = 0; + let clearRangeIndex = 0; + while (pageRangeIndex < pageRange.length && clearRangeIndex < clearRange.length) { + if (pageRange[pageRangeIndex].start < clearRange[clearRangeIndex].start) { + yield { + start: pageRange[pageRangeIndex].start, + end: pageRange[pageRangeIndex].end, + isClear: false, + }; + ++pageRangeIndex; } - }, - queryParameters: [ - timeoutInSeconds, - snapshot, - comp25, - listType - ], - urlParameters: [url], - headerParameters: [ - version, - requestId, - accept1, - leaseId, - ifTags - ], - isXML: true, - serializer: xmlSerializer -}; + else { + yield { + start: clearRange[clearRangeIndex].start, + end: clearRange[clearRangeIndex].end, + isClear: true, + }; + ++clearRangeIndex; + } + } + for (; pageRangeIndex < pageRange.length; ++pageRangeIndex) { + yield { + start: pageRange[pageRangeIndex].start, + end: pageRange[pageRangeIndex].end, + isClear: false, + }; + } + for (; clearRangeIndex < clearRange.length; ++clearRangeIndex) { + yield { + start: clearRange[clearRangeIndex].start, + end: clearRange[clearRangeIndex].end, + isClear: true, + }; + } +} +/** + * Escape the blobName but keep path separator ('/'). + */ +function EscapePath(blobName) { + const split = blobName.split("/"); + for (let i = 0; i < split.length; i++) { + split[i] = encodeURIComponent(split[i]); + } + return split.join("/"); +} // Copyright (c) Microsoft Corporation. /** - * The `@azure/logger` configuration for this package. + * StorageBrowserPolicy will handle differences between Node.js and browser runtime, including: + * + * 1. Browsers cache GET/HEAD requests by adding conditional headers such as 'IF_MODIFIED_SINCE'. + * StorageBrowserPolicy is a policy used to add a timestamp query to GET/HEAD request URL + * thus avoid the browser cache. + * + * 2. Remove cookie header for security + * + * 3. Remove content-length header to avoid browsers warning */ -const logger = logger$1.createClientLogger("storage-blob"); +class StorageBrowserPolicy extends coreHttp.BaseRequestPolicy { + /** + * Creates an instance of StorageBrowserPolicy. + * @param nextPolicy - + * @param options - + */ + // The base class has a protected constructor. Adding a public one to enable constructing of this class. + /* eslint-disable-next-line @typescript-eslint/no-useless-constructor*/ + constructor(nextPolicy, options) { + super(nextPolicy, options); + } + /** + * Sends out request. + * + * @param request - + */ + async sendRequest(request) { + if (coreHttp.isNode) { + return this._nextPolicy.sendRequest(request); + } + if (request.method.toUpperCase() === "GET" || request.method.toUpperCase() === "HEAD") { + request.url = setURLParameter(request.url, URLConstants.Parameters.FORCE_BROWSER_NO_CACHE, new Date().getTime().toString()); + } + request.headers.remove(HeaderConstants.COOKIE); + // According to XHR standards, content-length should be fully controlled by browsers + request.headers.remove(HeaderConstants.CONTENT_LENGTH); + return this._nextPolicy.sendRequest(request); + } +} // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. -const SDK_VERSION = "12.12.0"; -const SERVICE_VERSION = "2021-10-04"; -const BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES = 256 * 1024 * 1024; // 256MB -const BLOCK_BLOB_MAX_STAGE_BLOCK_BYTES = 4000 * 1024 * 1024; // 4000MB -const BLOCK_BLOB_MAX_BLOCKS = 50000; -const DEFAULT_BLOCK_BUFFER_SIZE_BYTES = 8 * 1024 * 1024; // 8MB -const DEFAULT_BLOB_DOWNLOAD_BLOCK_BYTES = 4 * 1024 * 1024; // 4MB -const DEFAULT_MAX_DOWNLOAD_RETRY_REQUESTS = 5; -const REQUEST_TIMEOUT = 100 * 1000; // In ms /** - * The OAuth scope to use with Azure Storage. + * StorageBrowserPolicyFactory is a factory class helping generating StorageBrowserPolicy objects. */ -const StorageOAuthScopes = "https://storage.azure.com/.default"; -const URLConstants = { - Parameters: { - FORCE_BROWSER_NO_CACHE: "_", - SIGNATURE: "sig", - SNAPSHOT: "snapshot", - VERSIONID: "versionid", - TIMEOUT: "timeout", - }, -}; -const HTTPURLConnection = { - HTTP_ACCEPTED: 202, - HTTP_CONFLICT: 409, - HTTP_NOT_FOUND: 404, - HTTP_PRECON_FAILED: 412, - HTTP_RANGE_NOT_SATISFIABLE: 416, -}; -const HeaderConstants = { - AUTHORIZATION: "Authorization", - AUTHORIZATION_SCHEME: "Bearer", - CONTENT_ENCODING: "Content-Encoding", - CONTENT_ID: "Content-ID", - CONTENT_LANGUAGE: "Content-Language", - CONTENT_LENGTH: "Content-Length", - CONTENT_MD5: "Content-Md5", - CONTENT_TRANSFER_ENCODING: "Content-Transfer-Encoding", - CONTENT_TYPE: "Content-Type", - COOKIE: "Cookie", - DATE: "date", - IF_MATCH: "if-match", - IF_MODIFIED_SINCE: "if-modified-since", - IF_NONE_MATCH: "if-none-match", - IF_UNMODIFIED_SINCE: "if-unmodified-since", - PREFIX_FOR_STORAGE: "x-ms-", - RANGE: "Range", - USER_AGENT: "User-Agent", - X_MS_CLIENT_REQUEST_ID: "x-ms-client-request-id", - X_MS_COPY_SOURCE: "x-ms-copy-source", - X_MS_DATE: "x-ms-date", - X_MS_ERROR_CODE: "x-ms-error-code", - X_MS_VERSION: "x-ms-version", -}; -const ETagNone = ""; -const ETagAny = "*"; -const SIZE_1_MB = 1 * 1024 * 1024; -const BATCH_MAX_REQUEST = 256; -const BATCH_MAX_PAYLOAD_IN_BYTES = 4 * SIZE_1_MB; -const HTTP_LINE_ENDING = "\r\n"; -const HTTP_VERSION_1_1 = "HTTP/1.1"; -const EncryptionAlgorithmAES25 = "AES256"; -const DevelopmentConnectionString = `DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://127.0.0.1:10000/devstoreaccount1;`; -const StorageBlobLoggingAllowedHeaderNames = [ - "Access-Control-Allow-Origin", - "Cache-Control", - "Content-Length", - "Content-Type", - "Date", - "Request-Id", - "traceparent", - "Transfer-Encoding", - "User-Agent", - "x-ms-client-request-id", - "x-ms-date", - "x-ms-error-code", - "x-ms-request-id", - "x-ms-return-client-request-id", - "x-ms-version", - "Accept-Ranges", - "Content-Disposition", - "Content-Encoding", - "Content-Language", - "Content-MD5", - "Content-Range", - "ETag", - "Last-Modified", - "Server", - "Vary", - "x-ms-content-crc64", - "x-ms-copy-action", - "x-ms-copy-completion-time", - "x-ms-copy-id", - "x-ms-copy-progress", - "x-ms-copy-status", - "x-ms-has-immutability-policy", - "x-ms-has-legal-hold", - "x-ms-lease-state", - "x-ms-lease-status", - "x-ms-range", - "x-ms-request-server-encrypted", - "x-ms-server-encrypted", - "x-ms-snapshot", - "x-ms-source-range", - "If-Match", - "If-Modified-Since", - "If-None-Match", - "If-Unmodified-Since", - "x-ms-access-tier", - "x-ms-access-tier-change-time", - "x-ms-access-tier-inferred", - "x-ms-account-kind", - "x-ms-archive-status", - "x-ms-blob-append-offset", - "x-ms-blob-cache-control", - "x-ms-blob-committed-block-count", - "x-ms-blob-condition-appendpos", - "x-ms-blob-condition-maxsize", - "x-ms-blob-content-disposition", - "x-ms-blob-content-encoding", - "x-ms-blob-content-language", - "x-ms-blob-content-length", - "x-ms-blob-content-md5", - "x-ms-blob-content-type", - "x-ms-blob-public-access", - "x-ms-blob-sequence-number", - "x-ms-blob-type", - "x-ms-copy-destination-snapshot", - "x-ms-creation-time", - "x-ms-default-encryption-scope", - "x-ms-delete-snapshots", - "x-ms-delete-type-permanent", - "x-ms-deny-encryption-scope-override", - "x-ms-encryption-algorithm", - "x-ms-if-sequence-number-eq", - "x-ms-if-sequence-number-le", - "x-ms-if-sequence-number-lt", - "x-ms-incremental-copy", - "x-ms-lease-action", - "x-ms-lease-break-period", - "x-ms-lease-duration", - "x-ms-lease-id", - "x-ms-lease-time", - "x-ms-page-write", - "x-ms-proposed-lease-id", - "x-ms-range-get-content-md5", - "x-ms-rehydrate-priority", - "x-ms-sequence-number-action", - "x-ms-sku-name", - "x-ms-source-content-md5", - "x-ms-source-if-match", - "x-ms-source-if-modified-since", - "x-ms-source-if-none-match", - "x-ms-source-if-unmodified-since", - "x-ms-tag-count", - "x-ms-encryption-key-sha256", - "x-ms-if-tags", - "x-ms-source-if-tags", -]; -const StorageBlobLoggingAllowedQueryParameters = [ - "comp", - "maxresults", - "rscc", - "rscd", - "rsce", - "rscl", - "rsct", - "se", - "si", - "sip", - "sp", - "spr", - "sr", - "srt", - "ss", - "st", - "sv", - "include", - "marker", - "prefix", - "copyid", - "restype", - "blockid", - "blocklisttype", - "delimiter", - "prevsnapshot", - "ske", - "skoid", - "sks", - "skt", - "sktid", - "skv", - "snapshot", -]; -const BlobUsesCustomerSpecifiedEncryptionMsg = "BlobUsesCustomerSpecifiedEncryption"; -const BlobDoesNotUseCustomerSpecifiedEncryption = "BlobDoesNotUseCustomerSpecifiedEncryption"; -/// List of ports used for path style addressing. -/// Path style addressing means that storage account is put in URI's Path segment in instead of in host. -const PathStylePorts = [ - "10000", - "10001", - "10002", - "10003", - "10004", - "10100", - "10101", - "10102", - "10103", - "10104", - "11000", - "11001", - "11002", - "11003", - "11004", - "11100", - "11101", - "11102", - "11103", - "11104", -]; +class StorageBrowserPolicyFactory { + /** + * Creates a StorageBrowserPolicyFactory object. + * + * @param nextPolicy - + * @param options - + */ + create(nextPolicy, options) { + return new StorageBrowserPolicy(nextPolicy, options); + } +} // Copyright (c) Microsoft Corporation. /** - * Reserved URL characters must be properly escaped for Storage services like Blob or File. - * - * ## URL encode and escape strategy for JS SDKs - * - * When customers pass a URL string into XxxClient classes constructor, the URL string may already be URL encoded or not. - * But before sending to Azure Storage server, the URL must be encoded. However, it's hard for a SDK to guess whether the URL - * string has been encoded or not. We have 2 potential strategies, and chose strategy two for the XxxClient constructors. - * - * ### Strategy One: Assume the customer URL string is not encoded, and always encode URL string in SDK. - * - * This is what legacy V2 SDK does, simple and works for most of the cases. - * - When customer URL string is "http://account.blob.core.windows.net/con/b:", - * SDK will encode it to "http://account.blob.core.windows.net/con/b%3A" and send to server. A blob named "b:" will be created. - * - When customer URL string is "http://account.blob.core.windows.net/con/b%3A", - * SDK will encode it to "http://account.blob.core.windows.net/con/b%253A" and send to server. A blob named "b%3A" will be created. - * - * But this strategy will make it not possible to create a blob with "?" in it's name. Because when customer URL string is - * "http://account.blob.core.windows.net/con/blob?name", the "?name" will be treated as URL paramter instead of blob name. - * If customer URL string is "http://account.blob.core.windows.net/con/blob%3Fname", a blob named "blob%3Fname" will be created. - * V2 SDK doesn't have this issue because it doesn't allow customer pass in a full URL, it accepts a separate blob name and encodeURIComponent for it. - * We cannot accept a SDK cannot create a blob name with "?". So we implement strategy two: - * - * ### Strategy Two: SDK doesn't assume the URL has been encoded or not. It will just escape the special characters. - * - * This is what V10 Blob Go SDK does. It accepts a URL type in Go, and call url.EscapedPath() to escape the special chars unescaped. - * - When customer URL string is "http://account.blob.core.windows.net/con/b:", - * SDK will escape ":" like "http://account.blob.core.windows.net/con/b%3A" and send to server. A blob named "b:" will be created. - * - When customer URL string is "http://account.blob.core.windows.net/con/b%3A", - * There is no special characters, so send "http://account.blob.core.windows.net/con/b%3A" to server. A blob named "b:" will be created. - * - When customer URL string is "http://account.blob.core.windows.net/con/b%253A", - * There is no special characters, so send "http://account.blob.core.windows.net/con/b%253A" to server. A blob named "b%3A" will be created. - * - * This strategy gives us flexibility to create with any special characters. But "%" will be treated as a special characters, if the URL string - * is not encoded, there shouldn't a "%" in the URL string, otherwise the URL is not a valid URL. - * If customer needs to create a blob with "%" in it's blob name, use "%25" instead of "%". Just like above 3rd sample. - * And following URL strings are invalid: - * - "http://account.blob.core.windows.net/con/b%" - * - "http://account.blob.core.windows.net/con/b%2" - * - "http://account.blob.core.windows.net/con/b%G" - * - * Another special character is "?", use "%2F" to represent a blob name with "?" in a URL string. - * - * ### Strategy for containerName, blobName or other specific XXXName parameters in methods such as `containerClient.getBlobClient(blobName)` - * - * We will apply strategy one, and call encodeURIComponent for these parameters like blobName. Because what customers passes in is a plain name instead of a URL. - * - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-shares--directories--files--and-metadata - * - * @param url - + * RetryPolicy types. */ -function escapeURLPath(url) { - const urlParsed = coreHttp.URLBuilder.parse(url); - let path = urlParsed.getPath(); - path = path || "/"; - path = escape(path); - urlParsed.setPath(path); - return urlParsed.toString(); -} -function getProxyUriFromDevConnString(connectionString) { - // Development Connection String - // https://docs.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string#connect-to-the-emulator-account-using-the-well-known-account-name-and-key - let proxyUri = ""; - if (connectionString.search("DevelopmentStorageProxyUri=") !== -1) { - // CONNECTION_STRING=UseDevelopmentStorage=true;DevelopmentStorageProxyUri=http://myProxyUri - const matchCredentials = connectionString.split(";"); - for (const element of matchCredentials) { - if (element.trim().startsWith("DevelopmentStorageProxyUri=")) { - proxyUri = element.trim().match("DevelopmentStorageProxyUri=(.*)")[1]; +exports.StorageRetryPolicyType = void 0; +(function (StorageRetryPolicyType) { + /** + * Exponential retry. Retry time delay grows exponentially. + */ + StorageRetryPolicyType[StorageRetryPolicyType["EXPONENTIAL"] = 0] = "EXPONENTIAL"; + /** + * Linear retry. Retry time delay grows linearly. + */ + StorageRetryPolicyType[StorageRetryPolicyType["FIXED"] = 1] = "FIXED"; +})(exports.StorageRetryPolicyType || (exports.StorageRetryPolicyType = {})); +// Default values of StorageRetryOptions +const DEFAULT_RETRY_OPTIONS = { + maxRetryDelayInMs: 120 * 1000, + maxTries: 4, + retryDelayInMs: 4 * 1000, + retryPolicyType: exports.StorageRetryPolicyType.EXPONENTIAL, + secondaryHost: "", + tryTimeoutInMs: undefined, // Use server side default timeout strategy +}; +const RETRY_ABORT_ERROR = new abortController.AbortError("The operation was aborted."); +/** + * Retry policy with exponential retry and linear retry implemented. + */ +class StorageRetryPolicy extends coreHttp.BaseRequestPolicy { + /** + * Creates an instance of RetryPolicy. + * + * @param nextPolicy - + * @param options - + * @param retryOptions - + */ + constructor(nextPolicy, options, retryOptions = DEFAULT_RETRY_OPTIONS) { + super(nextPolicy, options); + // Initialize retry options + this.retryOptions = { + retryPolicyType: retryOptions.retryPolicyType + ? retryOptions.retryPolicyType + : DEFAULT_RETRY_OPTIONS.retryPolicyType, + maxTries: retryOptions.maxTries && retryOptions.maxTries >= 1 + ? Math.floor(retryOptions.maxTries) + : DEFAULT_RETRY_OPTIONS.maxTries, + tryTimeoutInMs: retryOptions.tryTimeoutInMs && retryOptions.tryTimeoutInMs >= 0 + ? retryOptions.tryTimeoutInMs + : DEFAULT_RETRY_OPTIONS.tryTimeoutInMs, + retryDelayInMs: retryOptions.retryDelayInMs && retryOptions.retryDelayInMs >= 0 + ? Math.min(retryOptions.retryDelayInMs, retryOptions.maxRetryDelayInMs + ? retryOptions.maxRetryDelayInMs + : DEFAULT_RETRY_OPTIONS.maxRetryDelayInMs) + : DEFAULT_RETRY_OPTIONS.retryDelayInMs, + maxRetryDelayInMs: retryOptions.maxRetryDelayInMs && retryOptions.maxRetryDelayInMs >= 0 + ? retryOptions.maxRetryDelayInMs + : DEFAULT_RETRY_OPTIONS.maxRetryDelayInMs, + secondaryHost: retryOptions.secondaryHost + ? retryOptions.secondaryHost + : DEFAULT_RETRY_OPTIONS.secondaryHost, + }; + } + /** + * Sends request. + * + * @param request - + */ + async sendRequest(request) { + return this.attemptSendRequest(request, false, 1); + } + /** + * Decide and perform next retry. Won't mutate request parameter. + * + * @param request - + * @param secondaryHas404 - If attempt was against the secondary & it returned a StatusNotFound (404), then + * the resource was not found. This may be due to replication delay. So, in this + * case, we'll never try the secondary again for this operation. + * @param attempt - How many retries has been attempted to performed, starting from 1, which includes + * the attempt will be performed by this method call. + */ + async attemptSendRequest(request, secondaryHas404, attempt) { + const newRequest = request.clone(); + const isPrimaryRetry = secondaryHas404 || + !this.retryOptions.secondaryHost || + !(request.method === "GET" || request.method === "HEAD" || request.method === "OPTIONS") || + attempt % 2 === 1; + if (!isPrimaryRetry) { + newRequest.url = setURLHost(newRequest.url, this.retryOptions.secondaryHost); + } + // Set the server-side timeout query parameter "timeout=[seconds]" + if (this.retryOptions.tryTimeoutInMs) { + newRequest.url = setURLParameter(newRequest.url, URLConstants.Parameters.TIMEOUT, Math.floor(this.retryOptions.tryTimeoutInMs / 1000).toString()); + } + let response; + try { + logger.info(`RetryPolicy: =====> Try=${attempt} ${isPrimaryRetry ? "Primary" : "Secondary"}`); + response = await this._nextPolicy.sendRequest(newRequest); + if (!this.shouldRetry(isPrimaryRetry, attempt, response)) { + return response; + } + secondaryHas404 = secondaryHas404 || (!isPrimaryRetry && response.status === 404); + } + catch (err) { + logger.error(`RetryPolicy: Caught error, message: ${err.message}, code: ${err.code}`); + if (!this.shouldRetry(isPrimaryRetry, attempt, response, err)) { + throw err; + } + } + await this.delay(isPrimaryRetry, attempt, request.abortSignal); + return this.attemptSendRequest(request, secondaryHas404, ++attempt); + } + /** + * Decide whether to retry according to last HTTP response and retry counters. + * + * @param isPrimaryRetry - + * @param attempt - + * @param response - + * @param err - + */ + shouldRetry(isPrimaryRetry, attempt, response, err) { + if (attempt >= this.retryOptions.maxTries) { + logger.info(`RetryPolicy: Attempt(s) ${attempt} >= maxTries ${this.retryOptions + .maxTries}, no further try.`); + return false; + } + // Handle network failures, you may need to customize the list when you implement + // your own http client + const retriableErrors = [ + "ETIMEDOUT", + "ESOCKETTIMEDOUT", + "ECONNREFUSED", + "ECONNRESET", + "ENOENT", + "ENOTFOUND", + "TIMEOUT", + "EPIPE", + "REQUEST_SEND_ERROR", // For default xhr based http client provided in ms-rest-js + ]; + if (err) { + for (const retriableError of retriableErrors) { + if (err.name.toUpperCase().includes(retriableError) || + err.message.toUpperCase().includes(retriableError) || + (err.code && err.code.toString().toUpperCase() === retriableError)) { + logger.info(`RetryPolicy: Network error ${retriableError} found, will retry.`); + return true; + } } } - } - return proxyUri; -} -function getValueInConnString(connectionString, argument) { - const elements = connectionString.split(";"); - for (const element of elements) { - if (element.trim().startsWith(argument)) { - return element.trim().match(argument + "=(.*)")[1]; - } - } - return ""; -} -/** - * Extracts the parts of an Azure Storage account connection string. - * - * @param connectionString - Connection string. - * @returns String key value pairs of the storage account's url and credentials. - */ -function extractConnectionStringParts(connectionString) { - let proxyUri = ""; - if (connectionString.startsWith("UseDevelopmentStorage=true")) { - // Development connection string - proxyUri = getProxyUriFromDevConnString(connectionString); - connectionString = DevelopmentConnectionString; - } - // Matching BlobEndpoint in the Account connection string - let blobEndpoint = getValueInConnString(connectionString, "BlobEndpoint"); - // Slicing off '/' at the end if exists - // (The methods that use `extractConnectionStringParts` expect the url to not have `/` at the end) - blobEndpoint = blobEndpoint.endsWith("/") ? blobEndpoint.slice(0, -1) : blobEndpoint; - if (connectionString.search("DefaultEndpointsProtocol=") !== -1 && - connectionString.search("AccountKey=") !== -1) { - // Account connection string - let defaultEndpointsProtocol = ""; - let accountName = ""; - let accountKey = Buffer.from("accountKey", "base64"); - let endpointSuffix = ""; - // Get account name and key - accountName = getValueInConnString(connectionString, "AccountName"); - accountKey = Buffer.from(getValueInConnString(connectionString, "AccountKey"), "base64"); - if (!blobEndpoint) { - // BlobEndpoint is not present in the Account connection string - // Can be obtained from `${defaultEndpointsProtocol}://${accountName}.blob.${endpointSuffix}` - defaultEndpointsProtocol = getValueInConnString(connectionString, "DefaultEndpointsProtocol"); - const protocol = defaultEndpointsProtocol.toLowerCase(); - if (protocol !== "https" && protocol !== "http") { - throw new Error("Invalid DefaultEndpointsProtocol in the provided Connection String. Expecting 'https' or 'http'"); + // If attempt was against the secondary & it returned a StatusNotFound (404), then + // the resource was not found. This may be due to replication delay. So, in this + // case, we'll never try the secondary again for this operation. + if (response || err) { + const statusCode = response ? response.status : err ? err.statusCode : 0; + if (!isPrimaryRetry && statusCode === 404) { + logger.info(`RetryPolicy: Secondary access with 404, will retry.`); + return true; } - endpointSuffix = getValueInConnString(connectionString, "EndpointSuffix"); - if (!endpointSuffix) { - throw new Error("Invalid EndpointSuffix in the provided Connection String"); + // Server internal error or server timeout + if (statusCode === 503 || statusCode === 500) { + logger.info(`RetryPolicy: Will retry for status code ${statusCode}.`); + return true; } - blobEndpoint = `${defaultEndpointsProtocol}://${accountName}.blob.${endpointSuffix}`; - } - if (!accountName) { - throw new Error("Invalid AccountName in the provided Connection String"); } - else if (accountKey.length === 0) { - throw new Error("Invalid AccountKey in the provided Connection String"); + if ((err === null || err === void 0 ? void 0 : err.code) === "PARSE_ERROR" && (err === null || err === void 0 ? void 0 : err.message.startsWith(`Error "Error: Unclosed root tag`))) { + logger.info("RetryPolicy: Incomplete XML response likely due to service timeout, will retry."); + return true; } - return { - kind: "AccountConnString", - url: blobEndpoint, - accountName, - accountKey, - proxyUri, - }; + return false; } - else { - // SAS connection string - const accountSas = getValueInConnString(connectionString, "SharedAccessSignature"); - const accountName = getAccountNameFromUrl(blobEndpoint); - if (!blobEndpoint) { - throw new Error("Invalid BlobEndpoint in the provided SAS Connection String"); + /** + * Delay a calculated time between retries. + * + * @param isPrimaryRetry - + * @param attempt - + * @param abortSignal - + */ + async delay(isPrimaryRetry, attempt, abortSignal) { + let delayTimeInMs = 0; + if (isPrimaryRetry) { + switch (this.retryOptions.retryPolicyType) { + case exports.StorageRetryPolicyType.EXPONENTIAL: + delayTimeInMs = Math.min((Math.pow(2, attempt - 1) - 1) * this.retryOptions.retryDelayInMs, this.retryOptions.maxRetryDelayInMs); + break; + case exports.StorageRetryPolicyType.FIXED: + delayTimeInMs = this.retryOptions.retryDelayInMs; + break; + } } - else if (!accountSas) { - throw new Error("Invalid SharedAccessSignature in the provided SAS Connection String"); + else { + delayTimeInMs = Math.random() * 1000; } - return { kind: "SASConnString", url: blobEndpoint, accountName, accountSas }; + logger.info(`RetryPolicy: Delay for ${delayTimeInMs}ms`); + return delay(delayTimeInMs, abortSignal, RETRY_ABORT_ERROR); } } + +// Copyright (c) Microsoft Corporation. /** - * Internal escape method implemented Strategy Two mentioned in escapeURL() description. - * - * @param text - + * StorageRetryPolicyFactory is a factory class helping generating {@link StorageRetryPolicy} objects. */ -function escape(text) { - return encodeURIComponent(text) - .replace(/%2F/g, "/") // Don't escape for "/" - .replace(/'/g, "%27") // Escape for "'" - .replace(/\+/g, "%20") - .replace(/%25/g, "%"); // Revert encoded "%" +class StorageRetryPolicyFactory { + /** + * Creates an instance of StorageRetryPolicyFactory. + * @param retryOptions - + */ + constructor(retryOptions) { + this.retryOptions = retryOptions; + } + /** + * Creates a StorageRetryPolicy object. + * + * @param nextPolicy - + * @param options - + */ + create(nextPolicy, options) { + return new StorageRetryPolicy(nextPolicy, options, this.retryOptions); + } } + +// Copyright (c) Microsoft Corporation. /** - * Append a string to URL path. Will remove duplicated "/" in front of the string - * when URL path ends with a "/". - * - * @param url - Source URL string - * @param name - String to be appended to URL - * @returns An updated URL string + * Credential policy used to sign HTTP(S) requests before sending. This is an + * abstract class. */ -function appendToURLPath(url, name) { - const urlParsed = coreHttp.URLBuilder.parse(url); - let path = urlParsed.getPath(); - path = path ? (path.endsWith("/") ? `${path}${name}` : `${path}/${name}`) : name; - urlParsed.setPath(path); - return urlParsed.toString(); +class CredentialPolicy extends coreHttp.BaseRequestPolicy { + /** + * Sends out request. + * + * @param request - + */ + sendRequest(request) { + return this._nextPolicy.sendRequest(this.signRequest(request)); + } + /** + * Child classes must implement this method with request signing. This method + * will be executed in {@link sendRequest}. + * + * @param request - + */ + signRequest(request) { + // Child classes must override this method with request signing. This method + // will be executed in sendRequest(). + return request; + } } + +// Copyright (c) Microsoft Corporation. /** - * Set URL parameter name and value. If name exists in URL parameters, old value - * will be replaced by name key. If not provide value, the parameter will be deleted. - * - * @param url - Source URL string - * @param name - Parameter name - * @param value - Parameter value - * @returns An updated URL string + * AnonymousCredentialPolicy is used with HTTP(S) requests that read public resources + * or for use with Shared Access Signatures (SAS). */ -function setURLParameter(url, name, value) { - const urlParsed = coreHttp.URLBuilder.parse(url); - urlParsed.setQueryParameter(name, value); - return urlParsed.toString(); +class AnonymousCredentialPolicy extends CredentialPolicy { + /** + * Creates an instance of AnonymousCredentialPolicy. + * @param nextPolicy - + * @param options - + */ + // The base class has a protected constructor. Adding a public one to enable constructing of this class. + /* eslint-disable-next-line @typescript-eslint/no-useless-constructor*/ + constructor(nextPolicy, options) { + super(nextPolicy, options); + } } + +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. /** - * Get URL parameter by name. - * - * @param url - - * @param name - + * Credential is an abstract class for Azure Storage HTTP requests signing. This + * class will host an credentialPolicyCreator factory which generates CredentialPolicy. */ -function getURLParameter(url, name) { - const urlParsed = coreHttp.URLBuilder.parse(url); - return urlParsed.getQueryParameterValue(name); +class Credential { + /** + * Creates a RequestPolicy object. + * + * @param _nextPolicy - + * @param _options - + */ + create(_nextPolicy, _options) { + throw new Error("Method should be implemented in children classes."); + } } + +// Copyright (c) Microsoft Corporation. /** - * Set URL host. - * - * @param url - Source URL string - * @param host - New host string - * @returns An updated URL string + * AnonymousCredential provides a credentialPolicyCreator member used to create + * AnonymousCredentialPolicy objects. AnonymousCredentialPolicy is used with + * HTTP(S) requests that read public resources or for use with Shared Access + * Signatures (SAS). */ -function setURLHost(url, host) { - const urlParsed = coreHttp.URLBuilder.parse(url); - urlParsed.setHost(host); - return urlParsed.toString(); +class AnonymousCredential extends Credential { + /** + * Creates an {@link AnonymousCredentialPolicy} object. + * + * @param nextPolicy - + * @param options - + */ + create(nextPolicy, options) { + return new AnonymousCredentialPolicy(nextPolicy, options); + } } + +// Copyright (c) Microsoft Corporation. /** - * Get URL path from an URL string. - * - * @param url - Source URL string + * TelemetryPolicy is a policy used to tag user-agent header for every requests. */ -function getURLPath(url) { - const urlParsed = coreHttp.URLBuilder.parse(url); - return urlParsed.getPath(); +class TelemetryPolicy extends coreHttp.BaseRequestPolicy { + /** + * Creates an instance of TelemetryPolicy. + * @param nextPolicy - + * @param options - + * @param telemetry - + */ + constructor(nextPolicy, options, telemetry) { + super(nextPolicy, options); + this.telemetry = telemetry; + } + /** + * Sends out request. + * + * @param request - + */ + async sendRequest(request) { + if (coreHttp.isNode) { + if (!request.headers) { + request.headers = new coreHttp.HttpHeaders(); + } + if (!request.headers.get(HeaderConstants.USER_AGENT)) { + request.headers.set(HeaderConstants.USER_AGENT, this.telemetry); + } + } + return this._nextPolicy.sendRequest(request); + } } + +// Copyright (c) Microsoft Corporation. /** - * Get URL scheme from an URL string. + * TelemetryPolicyFactory is a factory class helping generating {@link TelemetryPolicy} objects. + */ +class TelemetryPolicyFactory { + /** + * Creates an instance of TelemetryPolicyFactory. + * @param telemetry - + */ + constructor(telemetry) { + const userAgentInfo = []; + if (coreHttp.isNode) { + if (telemetry) { + const telemetryString = telemetry.userAgentPrefix || ""; + if (telemetryString.length > 0 && userAgentInfo.indexOf(telemetryString) === -1) { + userAgentInfo.push(telemetryString); + } + } + // e.g. azsdk-js-storageblob/10.0.0 + const libInfo = `azsdk-js-storageblob/${SDK_VERSION}`; + if (userAgentInfo.indexOf(libInfo) === -1) { + userAgentInfo.push(libInfo); + } + // e.g. (NODE-VERSION 4.9.1; Windows_NT 10.0.16299) + let runtimeInfo = `(NODE-VERSION ${process.version})`; + if (os__namespace) { + runtimeInfo = `(NODE-VERSION ${process.version}; ${os__namespace.type()} ${os__namespace.release()})`; + } + if (userAgentInfo.indexOf(runtimeInfo) === -1) { + userAgentInfo.push(runtimeInfo); + } + } + this.telemetryString = userAgentInfo.join(" "); + } + /** + * Creates a TelemetryPolicy object. + * + * @param nextPolicy - + * @param options - + */ + create(nextPolicy, options) { + return new TelemetryPolicy(nextPolicy, options, this.telemetryString); + } +} + +// Copyright (c) Microsoft Corporation. +const _defaultHttpClient = new coreHttp.DefaultHttpClient(); +function getCachedDefaultHttpClient() { + return _defaultHttpClient; +} + +// Copyright (c) Microsoft Corporation. +/** + * A set of constants used internally when processing requests. + */ +const Constants = { + DefaultScope: "/.default", + /** + * Defines constants for use with HTTP headers. + */ + HeaderConstants: { + /** + * The Authorization header. + */ + AUTHORIZATION: "authorization", + }, +}; +// Default options for the cycler if none are provided +const DEFAULT_CYCLER_OPTIONS = { + forcedRefreshWindowInMs: 1000, + retryIntervalInMs: 3000, + refreshWindowInMs: 1000 * 60 * 2, // Start refreshing 2m before expiry +}; +/** + * Converts an an unreliable access token getter (which may resolve with null) + * into an AccessTokenGetter by retrying the unreliable getter in a regular + * interval. * - * @param url - Source URL string + * @param getAccessToken - a function that produces a promise of an access + * token that may fail by returning null + * @param retryIntervalInMs - the time (in milliseconds) to wait between retry + * attempts + * @param timeoutInMs - the timestamp after which the refresh attempt will fail, + * throwing an exception + * @returns - a promise that, if it resolves, will resolve with an access token */ -function getURLScheme(url) { - const urlParsed = coreHttp.URLBuilder.parse(url); - return urlParsed.getScheme(); +async function beginRefresh(getAccessToken, retryIntervalInMs, timeoutInMs) { + // This wrapper handles exceptions gracefully as long as we haven't exceeded + // the timeout. + async function tryGetAccessToken() { + if (Date.now() < timeoutInMs) { + try { + return await getAccessToken(); + } + catch (_a) { + return null; + } + } + else { + const finalToken = await getAccessToken(); + // Timeout is up, so throw if it's still null + if (finalToken === null) { + throw new Error("Failed to refresh access token."); + } + return finalToken; + } + } + let token = await tryGetAccessToken(); + while (token === null) { + await coreHttp.delay(retryIntervalInMs); + token = await tryGetAccessToken(); + } + return token; } /** - * Get URL path and query from an URL string. + * Creates a token cycler from a credential, scopes, and optional settings. * - * @param url - Source URL string + * A token cycler represents a way to reliably retrieve a valid access token + * from a TokenCredential. It will handle initializing the token, refreshing it + * when it nears expiration, and synchronizes refresh attempts to avoid + * concurrency hazards. + * + * @param credential - the underlying TokenCredential that provides the access + * token + * @param scopes - the scopes to request authorization for + * @param tokenCyclerOptions - optionally override default settings for the cycler + * + * @returns - a function that reliably produces a valid access token */ -function getURLPathAndQuery(url) { - const urlParsed = coreHttp.URLBuilder.parse(url); - const pathString = urlParsed.getPath(); - if (!pathString) { - throw new RangeError("Invalid url without valid path."); +function createTokenCycler(credential, scopes, tokenCyclerOptions) { + let refreshWorker = null; + let token = null; + const options = Object.assign(Object.assign({}, DEFAULT_CYCLER_OPTIONS), tokenCyclerOptions); + /** + * This little holder defines several predicates that we use to construct + * the rules of refreshing the token. + */ + const cycler = { + /** + * Produces true if a refresh job is currently in progress. + */ + get isRefreshing() { + return refreshWorker !== null; + }, + /** + * Produces true if the cycler SHOULD refresh (we are within the refresh + * window and not already refreshing) + */ + get shouldRefresh() { + var _a; + return (!cycler.isRefreshing && + ((_a = token === null || token === void 0 ? void 0 : token.expiresOnTimestamp) !== null && _a !== void 0 ? _a : 0) - options.refreshWindowInMs < Date.now()); + }, + /** + * Produces true if the cycler MUST refresh (null or nearly-expired + * token). + */ + get mustRefresh() { + return (token === null || token.expiresOnTimestamp - options.forcedRefreshWindowInMs < Date.now()); + }, + }; + /** + * Starts a refresh job or returns the existing job if one is already + * running. + */ + function refresh(getTokenOptions) { + var _a; + if (!cycler.isRefreshing) { + // We bind `scopes` here to avoid passing it around a lot + const tryGetAccessToken = () => credential.getToken(scopes, getTokenOptions); + // Take advantage of promise chaining to insert an assignment to `token` + // before the refresh can be considered done. + refreshWorker = beginRefresh(tryGetAccessToken, options.retryIntervalInMs, + // If we don't have a token, then we should timeout immediately + (_a = token === null || token === void 0 ? void 0 : token.expiresOnTimestamp) !== null && _a !== void 0 ? _a : Date.now()) + .then((_token) => { + refreshWorker = null; + token = _token; + return token; + }) + .catch((reason) => { + // We also should reset the refresher if we enter a failed state. All + // existing awaiters will throw, but subsequent requests will start a + // new retry chain. + refreshWorker = null; + token = null; + throw reason; + }); + } + return refreshWorker; } - let queryString = urlParsed.getQuery() || ""; - queryString = queryString.trim(); - if (queryString !== "") { - queryString = queryString.startsWith("?") ? queryString : `?${queryString}`; // Ensure query string start with '?' + return async (tokenOptions) => { + // + // Simple rules: + // - If we MUST refresh, then return the refresh task, blocking + // the pipeline until a token is available. + // - If we SHOULD refresh, then run refresh but don't return it + // (we can still use the cached token). + // - Return the token, since it's fine if we didn't return in + // step 1. + // + if (cycler.mustRefresh) + return refresh(tokenOptions); + if (cycler.shouldRefresh) { + refresh(tokenOptions); + } + return token; + }; +} +/** + * We will retrieve the challenge only if the response status code was 401, + * and if the response contained the header "WWW-Authenticate" with a non-empty value. + */ +function getChallenge(response) { + const challenge = response.headers.get("WWW-Authenticate"); + if (response.status === 401 && challenge) { + return challenge; } - return `${pathString}${queryString}`; + return; } /** - * Get URL query key value pairs from an URL string. + * Converts: `Bearer a="b" c="d"`. + * Into: `[ { a: 'b', c: 'd' }]`. * - * @param url - + * @internal */ -function getURLQueries(url) { - let queryString = coreHttp.URLBuilder.parse(url).getQuery(); - if (!queryString) { - return {}; - } - queryString = queryString.trim(); - queryString = queryString.startsWith("?") ? queryString.substr(1) : queryString; - let querySubStrings = queryString.split("&"); - querySubStrings = querySubStrings.filter((value) => { - const indexOfEqual = value.indexOf("="); - const lastIndexOfEqual = value.lastIndexOf("="); - return (indexOfEqual > 0 && indexOfEqual === lastIndexOfEqual && lastIndexOfEqual < value.length - 1); - }); - const queries = {}; - for (const querySubString of querySubStrings) { - const splitResults = querySubString.split("="); - const key = splitResults[0]; - const value = splitResults[1]; - queries[key] = value; - } - return queries; +function parseChallenge(challenge) { + const bearerChallenge = challenge.slice("Bearer ".length); + const challengeParts = `${bearerChallenge.trim()} `.split(" ").filter((x) => x); + const keyValuePairs = challengeParts.map((keyValue) => (([key, value]) => ({ [key]: value }))(keyValue.trim().split("="))); + // Key-value pairs to plain object: + return keyValuePairs.reduce((a, b) => (Object.assign(Object.assign({}, a), b)), {}); } +// #endregion /** - * Append a string to URL query. + * Creates a new factory for a RequestPolicy that applies a bearer token to + * the requests' `Authorization` headers. * - * @param url - Source URL string. - * @param queryParts - String to be appended to the URL query. - * @returns An updated URL string. + * @param credential - The TokenCredential implementation that can supply the bearer token. + * @param scopes - The scopes for which the bearer token applies. */ -function appendToURLQuery(url, queryParts) { - const urlParsed = coreHttp.URLBuilder.parse(url); - let query = urlParsed.getQuery(); - if (query) { - query += "&" + queryParts; - } - else { - query = queryParts; +function storageBearerTokenChallengeAuthenticationPolicy(credential, scopes) { + // This simple function encapsulates the entire process of reliably retrieving the token + let getToken = createTokenCycler(credential, scopes); + class StorageBearerTokenChallengeAuthenticationPolicy extends coreHttp.BaseRequestPolicy { + constructor(nextPolicy, options) { + super(nextPolicy, options); + } + async sendRequest(webResource) { + if (!webResource.url.toLowerCase().startsWith("https://")) { + throw new Error("Bearer token authentication is not permitted for non-TLS protected (non-https) URLs."); + } + const getTokenInternal = getToken; + const token = (await getTokenInternal({ + abortSignal: webResource.abortSignal, + tracingOptions: { + tracingContext: webResource.tracingContext, + }, + })).token; + webResource.headers.set(Constants.HeaderConstants.AUTHORIZATION, `Bearer ${token}`); + const response = await this._nextPolicy.sendRequest(webResource); + if ((response === null || response === void 0 ? void 0 : response.status) === 401) { + const challenge = getChallenge(response); + if (challenge) { + const challengeInfo = parseChallenge(challenge); + const challengeScopes = challengeInfo.resource_id + Constants.DefaultScope; + const parsedAuthUri = coreHttp.URLBuilder.parse(challengeInfo.authorization_uri); + const pathSegments = parsedAuthUri.getPath().split("/"); + const tenantId = pathSegments[1]; + const getTokenForChallenge = createTokenCycler(credential, challengeScopes); + const tokenForChallenge = (await getTokenForChallenge({ + abortSignal: webResource.abortSignal, + tracingOptions: { + tracingContext: webResource.tracingContext, + }, + tenantId: tenantId, + })).token; + getToken = getTokenForChallenge; + webResource.headers.set(Constants.HeaderConstants.AUTHORIZATION, `Bearer ${tokenForChallenge}`); + return this._nextPolicy.sendRequest(webResource); + } + } + return response; + } } - urlParsed.setQuery(query); - return urlParsed.toString(); + return { + create: (nextPolicy, options) => { + return new StorageBearerTokenChallengeAuthenticationPolicy(nextPolicy, options); + }, + }; } + +// Copyright (c) Microsoft Corporation. /** - * Rounds a date off to seconds. - * - * @param date - - * @param withMilliseconds - If true, YYYY-MM-DDThh:mm:ss.fffffffZ will be returned; - * If false, YYYY-MM-DDThh:mm:ssZ will be returned. - * @returns Date string in ISO8061 format, with or without 7 milliseconds component + * A helper to decide if a given argument satisfies the Pipeline contract + * @param pipeline - An argument that may be a Pipeline + * @returns true when the argument satisfies the Pipeline contract */ -function truncatedISO8061Date(date, withMilliseconds = true) { - // Date.toISOString() will return like "2018-10-29T06:34:36.139Z" - const dateString = date.toISOString(); - return withMilliseconds - ? dateString.substring(0, dateString.length - 1) + "0000" + "Z" - : dateString.substring(0, dateString.length - 5) + "Z"; +function isPipelineLike(pipeline) { + if (!pipeline || typeof pipeline !== "object") { + return false; + } + const castPipeline = pipeline; + return (Array.isArray(castPipeline.factories) && + typeof castPipeline.options === "object" && + typeof castPipeline.toServiceClientOptions === "function"); } /** - * Base64 encode. + * A Pipeline class containing HTTP request policies. + * You can create a default Pipeline by calling {@link newPipeline}. + * Or you can create a Pipeline with your own policies by the constructor of Pipeline. * - * @param content - + * Refer to {@link newPipeline} and provided policies before implementing your + * customized Pipeline. */ -function base64encode(content) { - return !coreHttp.isNode ? btoa(content) : Buffer.from(content).toString("base64"); +class Pipeline { + /** + * Creates an instance of Pipeline. Customize HTTPClient by implementing IHttpClient interface. + * + * @param factories - + * @param options - + */ + constructor(factories, options = {}) { + this.factories = factories; + // when options.httpClient is not specified, passing in a DefaultHttpClient instance to + // avoid each client creating its own http client. + this.options = Object.assign(Object.assign({}, options), { httpClient: options.httpClient || getCachedDefaultHttpClient() }); + } + /** + * Transfer Pipeline object to ServiceClientOptions object which is required by + * ServiceClient constructor. + * + * @returns The ServiceClientOptions object from this Pipeline. + */ + toServiceClientOptions() { + return { + httpClient: this.options.httpClient, + requestPolicyFactories: this.factories, + }; + } } /** - * Generate a 64 bytes base64 block ID string. + * Creates a new Pipeline object with Credential provided. * - * @param blockIndex - + * @param credential - Such as AnonymousCredential, StorageSharedKeyCredential or any credential from the `@azure/identity` package to authenticate requests to the service. You can also provide an object that implements the TokenCredential interface. If not specified, AnonymousCredential is used. + * @param pipelineOptions - Optional. Options. + * @returns A new Pipeline object. */ -function generateBlockID(blockIDPrefix, blockIndex) { - // To generate a 64 bytes base64 string, source string should be 48 - const maxSourceStringLength = 48; - // A blob can have a maximum of 100,000 uncommitted blocks at any given time - const maxBlockIndexLength = 6; - const maxAllowedBlockIDPrefixLength = maxSourceStringLength - maxBlockIndexLength; - if (blockIDPrefix.length > maxAllowedBlockIDPrefixLength) { - blockIDPrefix = blockIDPrefix.slice(0, maxAllowedBlockIDPrefixLength); +function newPipeline(credential, pipelineOptions = {}) { + var _a; + if (credential === undefined) { + credential = new AnonymousCredential(); } - const res = blockIDPrefix + - padStart(blockIndex.toString(), maxSourceStringLength - blockIDPrefix.length, "0"); - return base64encode(res); + // Order is important. Closer to the API at the top & closer to the network at the bottom. + // The credential's policy factory must appear close to the wire so it can sign any + // changes made by other factories (like UniqueRequestIDPolicyFactory) + const telemetryPolicy = new TelemetryPolicyFactory(pipelineOptions.userAgentOptions); + const factories = [ + coreHttp.tracingPolicy({ userAgent: telemetryPolicy.telemetryString }), + coreHttp.keepAlivePolicy(pipelineOptions.keepAliveOptions), + telemetryPolicy, + coreHttp.generateClientRequestIdPolicy(), + new StorageBrowserPolicyFactory(), + new StorageRetryPolicyFactory(pipelineOptions.retryOptions), + // Default deserializationPolicy is provided by protocol layer + // Use customized XML char key of "#" so we could deserialize metadata + // with "_" key + coreHttp.deserializationPolicy(undefined, { xmlCharKey: "#" }), + coreHttp.logPolicy({ + logger: logger.info, + allowedHeaderNames: StorageBlobLoggingAllowedHeaderNames, + allowedQueryParameters: StorageBlobLoggingAllowedQueryParameters, + }), + ]; + if (coreHttp.isNode) { + // policies only available in Node.js runtime, not in browsers + factories.push(coreHttp.proxyPolicy(pipelineOptions.proxyOptions)); + factories.push(coreHttp.disableResponseDecompressionPolicy()); + } + factories.push(coreHttp.isTokenCredential(credential) + ? attachCredential(storageBearerTokenChallengeAuthenticationPolicy(credential, (_a = pipelineOptions.audience) !== null && _a !== void 0 ? _a : StorageOAuthScopes), credential) + : credential); + return new Pipeline(factories, pipelineOptions); } + +// Copyright (c) Microsoft Corporation. /** - * Delay specified time interval. - * - * @param timeInMs - - * @param aborter - - * @param abortError - + * StorageSharedKeyCredentialPolicy is a policy used to sign HTTP request with a shared key. */ -async function delay(timeInMs, aborter, abortError) { - return new Promise((resolve, reject) => { - /* eslint-disable-next-line prefer-const */ - let timeout; - const abortHandler = () => { - if (timeout !== undefined) { - clearTimeout(timeout); +class StorageSharedKeyCredentialPolicy extends CredentialPolicy { + /** + * Creates an instance of StorageSharedKeyCredentialPolicy. + * @param nextPolicy - + * @param options - + * @param factory - + */ + constructor(nextPolicy, options, factory) { + super(nextPolicy, options); + this.factory = factory; + } + /** + * Signs request. + * + * @param request - + */ + signRequest(request) { + request.headers.set(HeaderConstants.X_MS_DATE, new Date().toUTCString()); + if (request.body && + (typeof request.body === "string" || request.body !== undefined) && + request.body.length > 0) { + request.headers.set(HeaderConstants.CONTENT_LENGTH, Buffer.byteLength(request.body)); + } + const stringToSign = [ + request.method.toUpperCase(), + this.getHeaderValueToSign(request, HeaderConstants.CONTENT_LANGUAGE), + this.getHeaderValueToSign(request, HeaderConstants.CONTENT_ENCODING), + this.getHeaderValueToSign(request, HeaderConstants.CONTENT_LENGTH), + this.getHeaderValueToSign(request, HeaderConstants.CONTENT_MD5), + this.getHeaderValueToSign(request, HeaderConstants.CONTENT_TYPE), + this.getHeaderValueToSign(request, HeaderConstants.DATE), + this.getHeaderValueToSign(request, HeaderConstants.IF_MODIFIED_SINCE), + this.getHeaderValueToSign(request, HeaderConstants.IF_MATCH), + this.getHeaderValueToSign(request, HeaderConstants.IF_NONE_MATCH), + this.getHeaderValueToSign(request, HeaderConstants.IF_UNMODIFIED_SINCE), + this.getHeaderValueToSign(request, HeaderConstants.RANGE), + ].join("\n") + + "\n" + + this.getCanonicalizedHeadersString(request) + + this.getCanonicalizedResourceString(request); + const signature = this.factory.computeHMACSHA256(stringToSign); + request.headers.set(HeaderConstants.AUTHORIZATION, `SharedKey ${this.factory.accountName}:${signature}`); + // console.log(`[URL]:${request.url}`); + // console.log(`[HEADERS]:${request.headers.toString()}`); + // console.log(`[STRING TO SIGN]:${JSON.stringify(stringToSign)}`); + // console.log(`[KEY]: ${request.headers.get(HeaderConstants.AUTHORIZATION)}`); + return request; + } + /** + * Retrieve header value according to shared key sign rules. + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/authenticate-with-shared-key + * + * @param request - + * @param headerName - + */ + getHeaderValueToSign(request, headerName) { + const value = request.headers.get(headerName); + if (!value) { + return ""; + } + // When using version 2015-02-21 or later, if Content-Length is zero, then + // set the Content-Length part of the StringToSign to an empty string. + // https://docs.microsoft.com/en-us/rest/api/storageservices/authenticate-with-shared-key + if (headerName === HeaderConstants.CONTENT_LENGTH && value === "0") { + return ""; + } + return value; + } + /** + * To construct the CanonicalizedHeaders portion of the signature string, follow these steps: + * 1. Retrieve all headers for the resource that begin with x-ms-, including the x-ms-date header. + * 2. Convert each HTTP header name to lowercase. + * 3. Sort the headers lexicographically by header name, in ascending order. + * Each header may appear only once in the string. + * 4. Replace any linear whitespace in the header value with a single space. + * 5. Trim any whitespace around the colon in the header. + * 6. Finally, append a new-line character to each canonicalized header in the resulting list. + * Construct the CanonicalizedHeaders string by concatenating all headers in this list into a single string. + * + * @param request - + */ + getCanonicalizedHeadersString(request) { + let headersArray = request.headers.headersArray().filter((value) => { + return value.name.toLowerCase().startsWith(HeaderConstants.PREFIX_FOR_STORAGE); + }); + headersArray.sort((a, b) => { + return a.name.toLowerCase().localeCompare(b.name.toLowerCase()); + }); + // Remove duplicate headers + headersArray = headersArray.filter((value, index, array) => { + if (index > 0 && value.name.toLowerCase() === array[index - 1].name.toLowerCase()) { + return false; } - reject(abortError); - }; - const resolveHandler = () => { - if (aborter !== undefined) { - aborter.removeEventListener("abort", abortHandler); + return true; + }); + let canonicalizedHeadersStringToSign = ""; + headersArray.forEach((header) => { + canonicalizedHeadersStringToSign += `${header.name + .toLowerCase() + .trimRight()}:${header.value.trimLeft()}\n`; + }); + return canonicalizedHeadersStringToSign; + } + /** + * Retrieves the webResource canonicalized resource string. + * + * @param request - + */ + getCanonicalizedResourceString(request) { + const path = getURLPath(request.url) || "/"; + let canonicalizedResourceString = ""; + canonicalizedResourceString += `/${this.factory.accountName}${path}`; + const queries = getURLQueries(request.url); + const lowercaseQueries = {}; + if (queries) { + const queryKeys = []; + for (const key in queries) { + if (Object.prototype.hasOwnProperty.call(queries, key)) { + const lowercaseKey = key.toLowerCase(); + lowercaseQueries[lowercaseKey] = queries[key]; + queryKeys.push(lowercaseKey); + } + } + queryKeys.sort(); + for (const key of queryKeys) { + canonicalizedResourceString += `\n${key}:${decodeURIComponent(lowercaseQueries[key])}`; } - resolve(); - }; - timeout = setTimeout(resolveHandler, timeInMs); - if (aborter !== undefined) { - aborter.addEventListener("abort", abortHandler); } - }); + return canonicalizedResourceString; + } } + +// Copyright (c) Microsoft Corporation. /** - * String.prototype.padStart() + * ONLY AVAILABLE IN NODE.JS RUNTIME. * - * @param currentString - - * @param targetLength - - * @param padString - + * StorageSharedKeyCredential for account key authorization of Azure Storage service. */ -function padStart(currentString, targetLength, padString = " ") { - // @ts-expect-error: TS doesn't know this code needs to run downlevel sometimes - if (String.prototype.padStart) { - return currentString.padStart(targetLength, padString); +class StorageSharedKeyCredential extends Credential { + /** + * Creates an instance of StorageSharedKeyCredential. + * @param accountName - + * @param accountKey - + */ + constructor(accountName, accountKey) { + super(); + this.accountName = accountName; + this.accountKey = Buffer.from(accountKey, "base64"); } - padString = padString || " "; - if (currentString.length > targetLength) { - return currentString; + /** + * Creates a StorageSharedKeyCredentialPolicy object. + * + * @param nextPolicy - + * @param options - + */ + create(nextPolicy, options) { + return new StorageSharedKeyCredentialPolicy(nextPolicy, options, this); } - else { - targetLength = targetLength - currentString.length; - if (targetLength > padString.length) { - padString += padString.repeat(targetLength / padString.length); - } - return padString.slice(0, targetLength) + currentString; + /** + * Generates a hash signature for an HTTP request or for a SAS. + * + * @param stringToSign - + */ + computeHMACSHA256(stringToSign) { + return crypto.createHmac("sha256", this.accountKey).update(stringToSign, "utf8").digest("base64"); } } -/** - * If two strings are equal when compared case insensitive. + +/* + * Copyright (c) Microsoft Corporation. + * Licensed under the MIT License. * - * @param str1 - - * @param str2 - - */ -function iEqual(str1, str2) { - return str1.toLocaleLowerCase() === str2.toLocaleLowerCase(); -} -/** - * Extracts account name from the url - * @param url - url to extract the account name from - * @returns with the account name + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ -function getAccountNameFromUrl(url) { - const parsedUrl = coreHttp.URLBuilder.parse(url); - let accountName; - try { - if (parsedUrl.getHost().split(".")[1] === "blob") { - // `${defaultEndpointsProtocol}://${accountName}.blob.${endpointSuffix}`; - accountName = parsedUrl.getHost().split(".")[0]; +const packageName = "azure-storage-blob"; +const packageVersion = "12.12.0"; +class StorageClientContext extends coreHttp__namespace.ServiceClient { + /** + * Initializes a new instance of the StorageClientContext class. + * @param url The URL of the service account, container, or blob that is the target of the desired + * operation. + * @param options The parameter options + */ + constructor(url, options) { + if (url === undefined) { + throw new Error("'url' cannot be null"); } - else if (isIpEndpointStyle(parsedUrl)) { - // IPv4/IPv6 address hosts... Example - http://192.0.0.10:10001/devstoreaccount1/ - // Single word domain without a [dot] in the endpoint... Example - http://localhost:10001/devstoreaccount1/ - // .getPath() -> /devstoreaccount1/ - accountName = parsedUrl.getPath().split("/")[1]; + // Initializing default values for options + if (!options) { + options = {}; } - else { - // Custom domain case: "https://customdomain.com/containername/blob". - accountName = ""; + if (!options.userAgent) { + const defaultUserAgent = coreHttp__namespace.getDefaultUserAgentValue(); + options.userAgent = `${packageName}/${packageVersion} ${defaultUserAgent}`; } - return accountName; - } - catch (error) { - throw new Error("Unable to extract accountName with provided information."); - } -} -function isIpEndpointStyle(parsedUrl) { - if (parsedUrl.getHost() === undefined) { - return false; + super(undefined, options); + this.requestContentType = "application/json; charset=utf-8"; + this.baseUri = options.endpoint || "{url}"; + // Parameter assignments + this.url = url; + // Assigning values to Constant parameters + this.version = options.version || "2021-10-04"; } - const host = parsedUrl.getHost() + (parsedUrl.getPort() === undefined ? "" : ":" + parsedUrl.getPort()); - // Case 1: Ipv6, use a broad regex to find out candidates whose host contains two ':'. - // Case 2: localhost(:port), use broad regex to match port part. - // Case 3: Ipv4, use broad regex which just check if host contains Ipv4. - // For valid host please refer to https://man7.org/linux/man-pages/man7/hostname.7.html. - return (/^.*:.*:.*$|^localhost(:[0-9]+)?$|^(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])(\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])){3}(:[0-9]+)?$/.test(host) || - (parsedUrl.getPort() !== undefined && PathStylePorts.includes(parsedUrl.getPort()))); } + +// Copyright (c) Microsoft Corporation. /** - * Convert Tags to encoded string. - * - * @param tags - + * A StorageClient represents a based URL class for {@link BlobServiceClient}, {@link ContainerClient} + * and etc. */ -function toBlobTagsString(tags) { - if (tags === undefined) { - return undefined; - } - const tagPairs = []; - for (const key in tags) { - if (Object.prototype.hasOwnProperty.call(tags, key)) { - const value = tags[key]; - tagPairs.push(`${encodeURIComponent(key)}=${encodeURIComponent(value)}`); +class StorageClient { + /** + * Creates an instance of StorageClient. + * @param url - url to resource + * @param pipeline - request policy pipeline. + */ + constructor(url, pipeline) { + // URL should be encoded and only once, protocol layer shouldn't encode URL again + this.url = escapeURLPath(url); + this.accountName = getAccountNameFromUrl(url); + this.pipeline = pipeline; + this.storageClientContext = new StorageClientContext(this.url, pipeline.toServiceClientOptions()); + this.isHttps = iEqual(getURLScheme(this.url) || "", "https"); + this.credential = new AnonymousCredential(); + for (const factory of this.pipeline.factories) { + if ((coreHttp.isNode && factory instanceof StorageSharedKeyCredential) || + factory instanceof AnonymousCredential) { + this.credential = factory; + } + else if (coreHttp.isTokenCredential(factory.credential)) { + // Only works if the factory has been attached a "credential" property. + // We do that in newPipeline() when using TokenCredential. + this.credential = factory.credential; + } } + // Override protocol layer's default content-type + const storageClientContext = this.storageClientContext; + storageClientContext.requestContentType = undefined; } - return tagPairs.join("&"); } + +// Copyright (c) Microsoft Corporation. /** - * Convert Tags type to BlobTags. + * Creates a span using the global tracer. + * @internal + */ +const createSpan = coreTracing.createSpanFunction({ + packagePrefix: "Azure.Storage.Blob", + namespace: "Microsoft.Storage", +}); +/** + * @internal * - * @param tags - + * Adapt the tracing options from OperationOptions to what they need to be for + * RequestOptionsBase (when we update to later OpenTelemetry versions this is now + * two separate fields, not just one). */ -function toBlobTags(tags) { - if (tags === undefined) { - return undefined; - } - const res = { - blobTagSet: [], +function convertTracingToRequestOptionsBase(options) { + var _a, _b; + return { + // By passing spanOptions if they exist at runtime, we're backwards compatible with @azure/core-tracing@preview.13 and earlier. + spanOptions: (_a = options === null || options === void 0 ? void 0 : options.tracingOptions) === null || _a === void 0 ? void 0 : _a.spanOptions, + tracingContext: (_b = options === null || options === void 0 ? void 0 : options.tracingOptions) === null || _b === void 0 ? void 0 : _b.tracingContext, }; - for (const key in tags) { - if (Object.prototype.hasOwnProperty.call(tags, key)) { - const value = tags[key]; - res.blobTagSet.push({ - key, - value, - }); - } - } - return res; } + +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. /** - * Covert BlobTags to Tags type. + * ONLY AVAILABLE IN NODE.JS RUNTIME. * - * @param tags - + * This is a helper class to construct a string representing the permissions granted by a ServiceSAS to a blob. Setting + * a value to true means that any SAS which uses these permissions will grant permissions for that operation. Once all + * the values are set, this should be serialized with toString and set as the permissions field on a + * {@link BlobSASSignatureValues} object. It is possible to construct the permissions string without this class, but + * the order of the permissions is particular and this class guarantees correctness. */ -function toTags(tags) { - if (tags === undefined) { - return undefined; +class BlobSASPermissions { + constructor() { + /** + * Specifies Read access granted. + */ + this.read = false; + /** + * Specifies Add access granted. + */ + this.add = false; + /** + * Specifies Create access granted. + */ + this.create = false; + /** + * Specifies Write access granted. + */ + this.write = false; + /** + * Specifies Delete access granted. + */ + this.delete = false; + /** + * Specifies Delete version access granted. + */ + this.deleteVersion = false; + /** + * Specfies Tag access granted. + */ + this.tag = false; + /** + * Specifies Move access granted. + */ + this.move = false; + /** + * Specifies Execute access granted. + */ + this.execute = false; + /** + * Specifies SetImmutabilityPolicy access granted. + */ + this.setImmutabilityPolicy = false; + /** + * Specifies that Permanent Delete is permitted. + */ + this.permanentDelete = false; } - const res = {}; - for (const blobTag of tags.blobTagSet) { - res[blobTag.key] = blobTag.value; + /** + * Creates a {@link BlobSASPermissions} from the specified permissions string. This method will throw an + * Error if it encounters a character that does not correspond to a valid permission. + * + * @param permissions - + */ + static parse(permissions) { + const blobSASPermissions = new BlobSASPermissions(); + for (const char of permissions) { + switch (char) { + case "r": + blobSASPermissions.read = true; + break; + case "a": + blobSASPermissions.add = true; + break; + case "c": + blobSASPermissions.create = true; + break; + case "w": + blobSASPermissions.write = true; + break; + case "d": + blobSASPermissions.delete = true; + break; + case "x": + blobSASPermissions.deleteVersion = true; + break; + case "t": + blobSASPermissions.tag = true; + break; + case "m": + blobSASPermissions.move = true; + break; + case "e": + blobSASPermissions.execute = true; + break; + case "i": + blobSASPermissions.setImmutabilityPolicy = true; + break; + case "y": + blobSASPermissions.permanentDelete = true; + break; + default: + throw new RangeError(`Invalid permission: ${char}`); + } + } + return blobSASPermissions; + } + /** + * Creates a {@link BlobSASPermissions} from a raw object which contains same keys as it + * and boolean values for them. + * + * @param permissionLike - + */ + static from(permissionLike) { + const blobSASPermissions = new BlobSASPermissions(); + if (permissionLike.read) { + blobSASPermissions.read = true; + } + if (permissionLike.add) { + blobSASPermissions.add = true; + } + if (permissionLike.create) { + blobSASPermissions.create = true; + } + if (permissionLike.write) { + blobSASPermissions.write = true; + } + if (permissionLike.delete) { + blobSASPermissions.delete = true; + } + if (permissionLike.deleteVersion) { + blobSASPermissions.deleteVersion = true; + } + if (permissionLike.tag) { + blobSASPermissions.tag = true; + } + if (permissionLike.move) { + blobSASPermissions.move = true; + } + if (permissionLike.execute) { + blobSASPermissions.execute = true; + } + if (permissionLike.setImmutabilityPolicy) { + blobSASPermissions.setImmutabilityPolicy = true; + } + if (permissionLike.permanentDelete) { + blobSASPermissions.permanentDelete = true; + } + return blobSASPermissions; + } + /** + * Converts the given permissions to a string. Using this method will guarantee the permissions are in an + * order accepted by the service. + * + * @returns A string which represents the BlobSASPermissions + */ + toString() { + const permissions = []; + if (this.read) { + permissions.push("r"); + } + if (this.add) { + permissions.push("a"); + } + if (this.create) { + permissions.push("c"); + } + if (this.write) { + permissions.push("w"); + } + if (this.delete) { + permissions.push("d"); + } + if (this.deleteVersion) { + permissions.push("x"); + } + if (this.tag) { + permissions.push("t"); + } + if (this.move) { + permissions.push("m"); + } + if (this.execute) { + permissions.push("e"); + } + if (this.setImmutabilityPolicy) { + permissions.push("i"); + } + if (this.permanentDelete) { + permissions.push("y"); + } + return permissions.join(""); } - return res; } + +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. /** - * Convert BlobQueryTextConfiguration to QuerySerialization type. - * - * @param textConfiguration - + * This is a helper class to construct a string representing the permissions granted by a ServiceSAS to a container. + * Setting a value to true means that any SAS which uses these permissions will grant permissions for that operation. + * Once all the values are set, this should be serialized with toString and set as the permissions field on a + * {@link BlobSASSignatureValues} object. It is possible to construct the permissions string without this class, but + * the order of the permissions is particular and this class guarantees correctness. */ -function toQuerySerialization(textConfiguration) { - if (textConfiguration === undefined) { - return undefined; - } - switch (textConfiguration.kind) { - case "csv": - return { - format: { - type: "delimited", - delimitedTextConfiguration: { - columnSeparator: textConfiguration.columnSeparator || ",", - fieldQuote: textConfiguration.fieldQuote || "", - recordSeparator: textConfiguration.recordSeparator, - escapeChar: textConfiguration.escapeCharacter || "", - headersPresent: textConfiguration.hasHeaders || false, - }, - }, - }; - case "json": - return { - format: { - type: "json", - jsonTextConfiguration: { - recordSeparator: textConfiguration.recordSeparator, - }, - }, - }; - case "arrow": - return { - format: { - type: "arrow", - arrowConfiguration: { - schema: textConfiguration.schema, - }, - }, - }; - case "parquet": - return { - format: { - type: "parquet", - }, - }; - default: - throw Error("Invalid BlobQueryTextConfiguration."); +class ContainerSASPermissions { + constructor() { + /** + * Specifies Read access granted. + */ + this.read = false; + /** + * Specifies Add access granted. + */ + this.add = false; + /** + * Specifies Create access granted. + */ + this.create = false; + /** + * Specifies Write access granted. + */ + this.write = false; + /** + * Specifies Delete access granted. + */ + this.delete = false; + /** + * Specifies Delete version access granted. + */ + this.deleteVersion = false; + /** + * Specifies List access granted. + */ + this.list = false; + /** + * Specfies Tag access granted. + */ + this.tag = false; + /** + * Specifies Move access granted. + */ + this.move = false; + /** + * Specifies Execute access granted. + */ + this.execute = false; + /** + * Specifies SetImmutabilityPolicy access granted. + */ + this.setImmutabilityPolicy = false; + /** + * Specifies that Permanent Delete is permitted. + */ + this.permanentDelete = false; + /** + * Specifies that Filter Blobs by Tags is permitted. + */ + this.filterByTags = false; } -} -function parseObjectReplicationRecord(objectReplicationRecord) { - if (!objectReplicationRecord) { - return undefined; + /** + * Creates an {@link ContainerSASPermissions} from the specified permissions string. This method will throw an + * Error if it encounters a character that does not correspond to a valid permission. + * + * @param permissions - + */ + static parse(permissions) { + const containerSASPermissions = new ContainerSASPermissions(); + for (const char of permissions) { + switch (char) { + case "r": + containerSASPermissions.read = true; + break; + case "a": + containerSASPermissions.add = true; + break; + case "c": + containerSASPermissions.create = true; + break; + case "w": + containerSASPermissions.write = true; + break; + case "d": + containerSASPermissions.delete = true; + break; + case "l": + containerSASPermissions.list = true; + break; + case "t": + containerSASPermissions.tag = true; + break; + case "x": + containerSASPermissions.deleteVersion = true; + break; + case "m": + containerSASPermissions.move = true; + break; + case "e": + containerSASPermissions.execute = true; + break; + case "i": + containerSASPermissions.setImmutabilityPolicy = true; + break; + case "y": + containerSASPermissions.permanentDelete = true; + break; + case "f": + containerSASPermissions.filterByTags = true; + break; + default: + throw new RangeError(`Invalid permission ${char}`); + } + } + return containerSASPermissions; } - if ("policy-id" in objectReplicationRecord) { - // If the dictionary contains a key with policy id, we are not required to do any parsing since - // the policy id should already be stored in the ObjectReplicationDestinationPolicyId. - return undefined; + /** + * Creates a {@link ContainerSASPermissions} from a raw object which contains same keys as it + * and boolean values for them. + * + * @param permissionLike - + */ + static from(permissionLike) { + const containerSASPermissions = new ContainerSASPermissions(); + if (permissionLike.read) { + containerSASPermissions.read = true; + } + if (permissionLike.add) { + containerSASPermissions.add = true; + } + if (permissionLike.create) { + containerSASPermissions.create = true; + } + if (permissionLike.write) { + containerSASPermissions.write = true; + } + if (permissionLike.delete) { + containerSASPermissions.delete = true; + } + if (permissionLike.list) { + containerSASPermissions.list = true; + } + if (permissionLike.deleteVersion) { + containerSASPermissions.deleteVersion = true; + } + if (permissionLike.tag) { + containerSASPermissions.tag = true; + } + if (permissionLike.move) { + containerSASPermissions.move = true; + } + if (permissionLike.execute) { + containerSASPermissions.execute = true; + } + if (permissionLike.setImmutabilityPolicy) { + containerSASPermissions.setImmutabilityPolicy = true; + } + if (permissionLike.permanentDelete) { + containerSASPermissions.permanentDelete = true; + } + if (permissionLike.filterByTags) { + containerSASPermissions.filterByTags = true; + } + return containerSASPermissions; } - const orProperties = []; - for (const key in objectReplicationRecord) { - const ids = key.split("_"); - const policyPrefix = "or-"; - if (ids[0].startsWith(policyPrefix)) { - ids[0] = ids[0].substring(policyPrefix.length); + /** + * Converts the given permissions to a string. Using this method will guarantee the permissions are in an + * order accepted by the service. + * + * The order of the characters should be as specified here to ensure correctness. + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas + * + */ + toString() { + const permissions = []; + if (this.read) { + permissions.push("r"); + } + if (this.add) { + permissions.push("a"); + } + if (this.create) { + permissions.push("c"); + } + if (this.write) { + permissions.push("w"); + } + if (this.delete) { + permissions.push("d"); + } + if (this.deleteVersion) { + permissions.push("x"); + } + if (this.list) { + permissions.push("l"); + } + if (this.tag) { + permissions.push("t"); + } + if (this.move) { + permissions.push("m"); } - const rule = { - ruleId: ids[1], - replicationStatus: objectReplicationRecord[key], - }; - const policyIndex = orProperties.findIndex((policy) => policy.policyId === ids[0]); - if (policyIndex > -1) { - orProperties[policyIndex].rules.push(rule); + if (this.execute) { + permissions.push("e"); } - else { - orProperties.push({ - policyId: ids[0], - rules: [rule], - }); + if (this.setImmutabilityPolicy) { + permissions.push("i"); + } + if (this.permanentDelete) { + permissions.push("y"); } + if (this.filterByTags) { + permissions.push("f"); + } + return permissions.join(""); } - return orProperties; } + +// Copyright (c) Microsoft Corporation. /** - * Attach a TokenCredential to an object. + * ONLY AVAILABLE IN NODE.JS RUNTIME. * - * @param thing - - * @param credential - + * UserDelegationKeyCredential is only used for generation of user delegation SAS. + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/create-user-delegation-sas */ -function attachCredential(thing, credential) { - thing.credential = credential; - return thing; -} -function httpAuthorizationToString(httpAuthorization) { - return httpAuthorization ? httpAuthorization.scheme + " " + httpAuthorization.value : undefined; -} -function BlobNameToString(name) { - if (name.encoded) { - return decodeURIComponent(name.content); +class UserDelegationKeyCredential { + /** + * Creates an instance of UserDelegationKeyCredential. + * @param accountName - + * @param userDelegationKey - + */ + constructor(accountName, userDelegationKey) { + this.accountName = accountName; + this.userDelegationKey = userDelegationKey; + this.key = Buffer.from(userDelegationKey.value, "base64"); } - else { - return name.content; + /** + * Generates a hash signature for an HTTP request or for a SAS. + * + * @param stringToSign - + */ + computeHMACSHA256(stringToSign) { + // console.log(`stringToSign: ${JSON.stringify(stringToSign)}`); + return crypto.createHmac("sha256", this.key).update(stringToSign, "utf8").digest("base64"); } } -function ConvertInternalResponseOfListBlobFlat(internalResponse) { - return Object.assign(Object.assign({}, internalResponse), { segment: { - blobItems: internalResponse.segment.blobItems.map((blobItemInteral) => { - const blobItem = Object.assign(Object.assign({}, blobItemInteral), { name: BlobNameToString(blobItemInteral.name) }); - return blobItem; - }), - } }); -} -function ConvertInternalResponseOfListBlobHierarchy(internalResponse) { - var _a; - return Object.assign(Object.assign({}, internalResponse), { segment: { - blobPrefixes: (_a = internalResponse.segment.blobPrefixes) === null || _a === void 0 ? void 0 : _a.map((blobPrefixInternal) => { - const blobPrefix = { - name: BlobNameToString(blobPrefixInternal.name), - }; - return blobPrefix; - }), - blobItems: internalResponse.segment.blobItems.map((blobItemInteral) => { - const blobItem = Object.assign(Object.assign({}, blobItemInteral), { name: BlobNameToString(blobItemInteral.name) }); - return blobItem; - }), - } }); + +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +/** + * Generate SasIPRange format string. For example: + * + * "8.8.8.8" or "1.1.1.1-255.255.255.255" + * + * @param ipRange - + */ +function ipRangeToString(ipRange) { + return ipRange.end ? `${ipRange.start}-${ipRange.end}` : ipRange.start; } -function decodeBase64String(value) { - if (coreHttp.isNode) { - return Buffer.from(value, "base64"); - } - else { - const byteString = atob(value); - const arr = new Uint8Array(byteString.length); - for (let i = 0; i < byteString.length; i++) { - arr[i] = byteString.charCodeAt(i); + +// Copyright (c) Microsoft Corporation. +/** + * Protocols for generated SAS. + */ +exports.SASProtocol = void 0; +(function (SASProtocol) { + /** + * Protocol that allows HTTPS only + */ + SASProtocol["Https"] = "https"; + /** + * Protocol that allows both HTTPS and HTTP + */ + SASProtocol["HttpsAndHttp"] = "https,http"; +})(exports.SASProtocol || (exports.SASProtocol = {})); +/** + * Represents the components that make up an Azure Storage SAS' query parameters. This type is not constructed directly + * by the user; it is only generated by the {@link AccountSASSignatureValues} and {@link BlobSASSignatureValues} + * types. Once generated, it can be encoded into a {@link String} and appended to a URL directly (though caution should + * be taken here in case there are existing query parameters, which might affect the appropriate means of appending + * these query parameters). + * + * NOTE: Instances of this class are immutable. + */ +class SASQueryParameters { + constructor(version, signature, permissionsOrOptions, services, resourceTypes, protocol, startsOn, expiresOn, ipRange, identifier, resource, cacheControl, contentDisposition, contentEncoding, contentLanguage, contentType, userDelegationKey, preauthorizedAgentObjectId, correlationId, encryptionScope) { + this.version = version; + this.signature = signature; + if (permissionsOrOptions !== undefined && typeof permissionsOrOptions !== "string") { + // SASQueryParametersOptions + this.permissions = permissionsOrOptions.permissions; + this.services = permissionsOrOptions.services; + this.resourceTypes = permissionsOrOptions.resourceTypes; + this.protocol = permissionsOrOptions.protocol; + this.startsOn = permissionsOrOptions.startsOn; + this.expiresOn = permissionsOrOptions.expiresOn; + this.ipRangeInner = permissionsOrOptions.ipRange; + this.identifier = permissionsOrOptions.identifier; + this.encryptionScope = permissionsOrOptions.encryptionScope; + this.resource = permissionsOrOptions.resource; + this.cacheControl = permissionsOrOptions.cacheControl; + this.contentDisposition = permissionsOrOptions.contentDisposition; + this.contentEncoding = permissionsOrOptions.contentEncoding; + this.contentLanguage = permissionsOrOptions.contentLanguage; + this.contentType = permissionsOrOptions.contentType; + if (permissionsOrOptions.userDelegationKey) { + this.signedOid = permissionsOrOptions.userDelegationKey.signedObjectId; + this.signedTenantId = permissionsOrOptions.userDelegationKey.signedTenantId; + this.signedStartsOn = permissionsOrOptions.userDelegationKey.signedStartsOn; + this.signedExpiresOn = permissionsOrOptions.userDelegationKey.signedExpiresOn; + this.signedService = permissionsOrOptions.userDelegationKey.signedService; + this.signedVersion = permissionsOrOptions.userDelegationKey.signedVersion; + this.preauthorizedAgentObjectId = permissionsOrOptions.preauthorizedAgentObjectId; + this.correlationId = permissionsOrOptions.correlationId; + } + } + else { + this.services = services; + this.resourceTypes = resourceTypes; + this.expiresOn = expiresOn; + this.permissions = permissionsOrOptions; + this.protocol = protocol; + this.startsOn = startsOn; + this.ipRangeInner = ipRange; + this.encryptionScope = encryptionScope; + this.identifier = identifier; + this.resource = resource; + this.cacheControl = cacheControl; + this.contentDisposition = contentDisposition; + this.contentEncoding = contentEncoding; + this.contentLanguage = contentLanguage; + this.contentType = contentType; + if (userDelegationKey) { + this.signedOid = userDelegationKey.signedObjectId; + this.signedTenantId = userDelegationKey.signedTenantId; + this.signedStartsOn = userDelegationKey.signedStartsOn; + this.signedExpiresOn = userDelegationKey.signedExpiresOn; + this.signedService = userDelegationKey.signedService; + this.signedVersion = userDelegationKey.signedVersion; + this.preauthorizedAgentObjectId = preauthorizedAgentObjectId; + this.correlationId = correlationId; + } } - return arr; } -} -function ParseBoolean(content) { - if (content === undefined) + /** + * Optional. IP range allowed for this SAS. + * + * @readonly + */ + get ipRange() { + if (this.ipRangeInner) { + return { + end: this.ipRangeInner.end, + start: this.ipRangeInner.start, + }; + } return undefined; - if (content === "true") - return true; - if (content === "false") - return false; - return undefined; -} -function ParseBlobName(blobNameInXML) { - if (blobNameInXML["$"] !== undefined && blobNameInXML["#"] !== undefined) { - return { - encoded: ParseBoolean(blobNameInXML["$"]["Encoded"]), - content: blobNameInXML["#"], - }; - } - else { - return { - encoded: false, - content: blobNameInXML, - }; - } -} -function ParseBlobProperties(blobPropertiesInXML) { - const blobProperties = blobPropertiesInXML; - if (blobPropertiesInXML["Creation-Time"]) { - blobProperties.createdOn = new Date(blobPropertiesInXML["Creation-Time"]); - delete blobProperties["Creation-Time"]; - } - if (blobPropertiesInXML["Last-Modified"]) { - blobProperties.lastModified = new Date(blobPropertiesInXML["Last-Modified"]); - delete blobProperties["Last-Modified"]; - } - if (blobPropertiesInXML["Etag"]) { - blobProperties.etag = blobPropertiesInXML["Etag"]; - delete blobProperties["Etag"]; - } - if (blobPropertiesInXML["Content-Length"]) { - blobProperties.contentLength = parseFloat(blobPropertiesInXML["Content-Length"]); - delete blobProperties["Content-Length"]; - } - if (blobPropertiesInXML["Content-Type"]) { - blobProperties.contentType = blobPropertiesInXML["Content-Type"]; - delete blobProperties["Content-Type"]; - } - if (blobPropertiesInXML["Content-Encoding"]) { - blobProperties.contentEncoding = blobPropertiesInXML["Content-Encoding"]; - delete blobProperties["Content-Encoding"]; - } - if (blobPropertiesInXML["Content-Language"]) { - blobProperties.contentLanguage = blobPropertiesInXML["Content-Language"]; - delete blobProperties["Content-Language"]; - } - if (blobPropertiesInXML["Content-MD5"]) { - blobProperties.contentMD5 = decodeBase64String(blobPropertiesInXML["Content-MD5"]); - delete blobProperties["Content-MD5"]; - } - if (blobPropertiesInXML["Content-Disposition"]) { - blobProperties.contentDisposition = blobPropertiesInXML["Content-Disposition"]; - delete blobProperties["Content-Disposition"]; - } - if (blobPropertiesInXML["Cache-Control"]) { - blobProperties.cacheControl = blobPropertiesInXML["Cache-Control"]; - delete blobProperties["Cache-Control"]; - } - if (blobPropertiesInXML["x-ms-blob-sequence-number"]) { - blobProperties.blobSequenceNumber = parseFloat(blobPropertiesInXML["x-ms-blob-sequence-number"]); - delete blobProperties["x-ms-blob-sequence-number"]; - } - if (blobPropertiesInXML["BlobType"]) { - blobProperties.blobType = blobPropertiesInXML["BlobType"]; - delete blobProperties["BlobType"]; - } - if (blobPropertiesInXML["LeaseStatus"]) { - blobProperties.leaseStatus = blobPropertiesInXML["LeaseStatus"]; - delete blobProperties["LeaseStatus"]; - } - if (blobPropertiesInXML["LeaseState"]) { - blobProperties.leaseState = blobPropertiesInXML["LeaseState"]; - delete blobProperties["LeaseState"]; - } - if (blobPropertiesInXML["LeaseDuration"]) { - blobProperties.leaseDuration = blobPropertiesInXML["LeaseDuration"]; - delete blobProperties["LeaseDuration"]; - } - if (blobPropertiesInXML["CopyId"]) { - blobProperties.copyId = blobPropertiesInXML["CopyId"]; - delete blobProperties["CopyId"]; - } - if (blobPropertiesInXML["CopyStatus"]) { - blobProperties.copyStatus = blobPropertiesInXML["CopyStatus"]; - delete blobProperties["CopyStatus"]; - } - if (blobPropertiesInXML["CopySource"]) { - blobProperties.copySource = blobPropertiesInXML["CopySource"]; - delete blobProperties["CopySource"]; - } - if (blobPropertiesInXML["CopyProgress"]) { - blobProperties.copyProgress = blobPropertiesInXML["CopyProgress"]; - delete blobProperties["CopyProgress"]; - } - if (blobPropertiesInXML["CopyCompletionTime"]) { - blobProperties.copyCompletedOn = new Date(blobPropertiesInXML["CopyCompletionTime"]); - delete blobProperties["CopyCompletionTime"]; - } - if (blobPropertiesInXML["CopyStatusDescription"]) { - blobProperties.copyStatusDescription = blobPropertiesInXML["CopyStatusDescription"]; - delete blobProperties["CopyStatusDescription"]; } - if (blobPropertiesInXML["ServerEncrypted"]) { - blobProperties.serverEncrypted = ParseBoolean(blobPropertiesInXML["ServerEncrypted"]); - delete blobProperties["ServerEncrypted"]; + /** + * Encodes all SAS query parameters into a string that can be appended to a URL. + * + */ + toString() { + const params = [ + "sv", + "ss", + "srt", + "spr", + "st", + "se", + "sip", + "si", + "ses", + "skoid", + "sktid", + "skt", + "ske", + "sks", + "skv", + "sr", + "sp", + "sig", + "rscc", + "rscd", + "rsce", + "rscl", + "rsct", + "saoid", + "scid", + ]; + const queries = []; + for (const param of params) { + switch (param) { + case "sv": + this.tryAppendQueryParameter(queries, param, this.version); + break; + case "ss": + this.tryAppendQueryParameter(queries, param, this.services); + break; + case "srt": + this.tryAppendQueryParameter(queries, param, this.resourceTypes); + break; + case "spr": + this.tryAppendQueryParameter(queries, param, this.protocol); + break; + case "st": + this.tryAppendQueryParameter(queries, param, this.startsOn ? truncatedISO8061Date(this.startsOn, false) : undefined); + break; + case "se": + this.tryAppendQueryParameter(queries, param, this.expiresOn ? truncatedISO8061Date(this.expiresOn, false) : undefined); + break; + case "sip": + this.tryAppendQueryParameter(queries, param, this.ipRange ? ipRangeToString(this.ipRange) : undefined); + break; + case "si": + this.tryAppendQueryParameter(queries, param, this.identifier); + break; + case "ses": + this.tryAppendQueryParameter(queries, param, this.encryptionScope); + break; + case "skoid": // Signed object ID + this.tryAppendQueryParameter(queries, param, this.signedOid); + break; + case "sktid": // Signed tenant ID + this.tryAppendQueryParameter(queries, param, this.signedTenantId); + break; + case "skt": // Signed key start time + this.tryAppendQueryParameter(queries, param, this.signedStartsOn ? truncatedISO8061Date(this.signedStartsOn, false) : undefined); + break; + case "ske": // Signed key expiry time + this.tryAppendQueryParameter(queries, param, this.signedExpiresOn ? truncatedISO8061Date(this.signedExpiresOn, false) : undefined); + break; + case "sks": // Signed key service + this.tryAppendQueryParameter(queries, param, this.signedService); + break; + case "skv": // Signed key version + this.tryAppendQueryParameter(queries, param, this.signedVersion); + break; + case "sr": + this.tryAppendQueryParameter(queries, param, this.resource); + break; + case "sp": + this.tryAppendQueryParameter(queries, param, this.permissions); + break; + case "sig": + this.tryAppendQueryParameter(queries, param, this.signature); + break; + case "rscc": + this.tryAppendQueryParameter(queries, param, this.cacheControl); + break; + case "rscd": + this.tryAppendQueryParameter(queries, param, this.contentDisposition); + break; + case "rsce": + this.tryAppendQueryParameter(queries, param, this.contentEncoding); + break; + case "rscl": + this.tryAppendQueryParameter(queries, param, this.contentLanguage); + break; + case "rsct": + this.tryAppendQueryParameter(queries, param, this.contentType); + break; + case "saoid": + this.tryAppendQueryParameter(queries, param, this.preauthorizedAgentObjectId); + break; + case "scid": + this.tryAppendQueryParameter(queries, param, this.correlationId); + break; + } + } + return queries.join("&"); } - if (blobPropertiesInXML["IncrementalCopy"]) { - blobProperties.incrementalCopy = ParseBoolean(blobPropertiesInXML["IncrementalCopy"]); - delete blobProperties["IncrementalCopy"]; + /** + * A private helper method used to filter and append query key/value pairs into an array. + * + * @param queries - + * @param key - + * @param value - + */ + tryAppendQueryParameter(queries, key, value) { + if (!value) { + return; + } + key = encodeURIComponent(key); + value = encodeURIComponent(value); + if (key.length > 0 && value.length > 0) { + queries.push(`${key}=${value}`); + } } - if (blobPropertiesInXML["DestinationSnapshot"]) { - blobProperties.destinationSnapshot = blobPropertiesInXML["DestinationSnapshot"]; - delete blobProperties["DestinationSnapshot"]; +} + +// Copyright (c) Microsoft Corporation. +function generateBlobSASQueryParameters(blobSASSignatureValues, sharedKeyCredentialOrUserDelegationKey, accountName) { + const version = blobSASSignatureValues.version ? blobSASSignatureValues.version : SERVICE_VERSION; + const sharedKeyCredential = sharedKeyCredentialOrUserDelegationKey instanceof StorageSharedKeyCredential + ? sharedKeyCredentialOrUserDelegationKey + : undefined; + let userDelegationKeyCredential; + if (sharedKeyCredential === undefined && accountName !== undefined) { + userDelegationKeyCredential = new UserDelegationKeyCredential(accountName, sharedKeyCredentialOrUserDelegationKey); } - if (blobPropertiesInXML["DeletedTime"]) { - blobProperties.deletedOn = new Date(blobPropertiesInXML["DeletedTime"]); - delete blobProperties["DeletedTime"]; + if (sharedKeyCredential === undefined && userDelegationKeyCredential === undefined) { + throw TypeError("Invalid sharedKeyCredential, userDelegationKey or accountName."); } - if (blobPropertiesInXML["RemainingRetentionDays"]) { - blobProperties.remainingRetentionDays = parseFloat(blobPropertiesInXML["RemainingRetentionDays"]); - delete blobProperties["RemainingRetentionDays"]; + // Version 2020-12-06 adds support for encryptionscope in SAS. + if (version >= "2020-12-06") { + if (sharedKeyCredential !== undefined) { + return generateBlobSASQueryParameters20201206(blobSASSignatureValues, sharedKeyCredential); + } + else { + return generateBlobSASQueryParametersUDK20201206(blobSASSignatureValues, userDelegationKeyCredential); + } } - if (blobPropertiesInXML["AccessTier"]) { - blobProperties.accessTier = blobPropertiesInXML["AccessTier"]; - delete blobProperties["AccessTier"]; + // Version 2019-12-12 adds support for the blob tags permission. + // Version 2018-11-09 adds support for the signed resource and signed blob snapshot time fields. + // https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas#constructing-the-signature-string + if (version >= "2018-11-09") { + if (sharedKeyCredential !== undefined) { + return generateBlobSASQueryParameters20181109(blobSASSignatureValues, sharedKeyCredential); + } + else { + // Version 2020-02-10 delegation SAS signature construction includes preauthorizedAgentObjectId, agentObjectId, correlationId. + if (version >= "2020-02-10") { + return generateBlobSASQueryParametersUDK20200210(blobSASSignatureValues, userDelegationKeyCredential); + } + else { + return generateBlobSASQueryParametersUDK20181109(blobSASSignatureValues, userDelegationKeyCredential); + } + } } - if (blobPropertiesInXML["AccessTierInferred"]) { - blobProperties.accessTierInferred = ParseBoolean(blobPropertiesInXML["AccessTierInferred"]); - delete blobProperties["AccessTierInferred"]; + if (version >= "2015-04-05") { + if (sharedKeyCredential !== undefined) { + return generateBlobSASQueryParameters20150405(blobSASSignatureValues, sharedKeyCredential); + } + else { + throw new RangeError("'version' must be >= '2018-11-09' when generating user delegation SAS using user delegation key."); + } } - if (blobPropertiesInXML["ArchiveStatus"]) { - blobProperties.archiveStatus = blobPropertiesInXML["ArchiveStatus"]; - delete blobProperties["ArchiveStatus"]; + throw new RangeError("'version' must be >= '2015-04-05'."); +} +/** + * ONLY AVAILABLE IN NODE.JS RUNTIME. + * IMPLEMENTATION FOR API VERSION FROM 2015-04-05 AND BEFORE 2018-11-09. + * + * Creates an instance of SASQueryParameters. + * + * Only accepts required settings needed to create a SAS. For optional settings please + * set corresponding properties directly, such as permissions, startsOn and identifier. + * + * WARNING: When identifier is not provided, permissions and expiresOn are required. + * You MUST assign value to identifier or expiresOn & permissions manually if you initial with + * this constructor. + * + * @param blobSASSignatureValues - + * @param sharedKeyCredential - + */ +function generateBlobSASQueryParameters20150405(blobSASSignatureValues, sharedKeyCredential) { + blobSASSignatureValues = SASSignatureValuesSanityCheckAndAutofill(blobSASSignatureValues); + if (!blobSASSignatureValues.identifier && + !(blobSASSignatureValues.permissions && blobSASSignatureValues.expiresOn)) { + throw new RangeError("Must provide 'permissions' and 'expiresOn' for Blob SAS generation when 'identifier' is not provided."); } - if (blobPropertiesInXML["CustomerProvidedKeySha256"]) { - blobProperties.customerProvidedKeySha256 = blobPropertiesInXML["CustomerProvidedKeySha256"]; - delete blobProperties["CustomerProvidedKeySha256"]; + let resource = "c"; + if (blobSASSignatureValues.blobName) { + resource = "b"; } - if (blobPropertiesInXML["EncryptionScope"]) { - blobProperties.encryptionScope = blobPropertiesInXML["EncryptionScope"]; - delete blobProperties["EncryptionScope"]; + // Calling parse and toString guarantees the proper ordering and throws on invalid characters. + let verifiedPermissions; + if (blobSASSignatureValues.permissions) { + if (blobSASSignatureValues.blobName) { + verifiedPermissions = BlobSASPermissions.parse(blobSASSignatureValues.permissions.toString()).toString(); + } + else { + verifiedPermissions = ContainerSASPermissions.parse(blobSASSignatureValues.permissions.toString()).toString(); + } } - if (blobPropertiesInXML["AccessTierChangeTime"]) { - blobProperties.accessTierChangedOn = new Date(blobPropertiesInXML["AccessTierChangeTime"]); - delete blobProperties["AccessTierChangeTime"]; + // Signature is generated on the un-url-encoded values. + const stringToSign = [ + verifiedPermissions ? verifiedPermissions : "", + blobSASSignatureValues.startsOn + ? truncatedISO8061Date(blobSASSignatureValues.startsOn, false) + : "", + blobSASSignatureValues.expiresOn + ? truncatedISO8061Date(blobSASSignatureValues.expiresOn, false) + : "", + getCanonicalName(sharedKeyCredential.accountName, blobSASSignatureValues.containerName, blobSASSignatureValues.blobName), + blobSASSignatureValues.identifier, + blobSASSignatureValues.ipRange ? ipRangeToString(blobSASSignatureValues.ipRange) : "", + blobSASSignatureValues.protocol ? blobSASSignatureValues.protocol : "", + blobSASSignatureValues.version, + blobSASSignatureValues.cacheControl ? blobSASSignatureValues.cacheControl : "", + blobSASSignatureValues.contentDisposition ? blobSASSignatureValues.contentDisposition : "", + blobSASSignatureValues.contentEncoding ? blobSASSignatureValues.contentEncoding : "", + blobSASSignatureValues.contentLanguage ? blobSASSignatureValues.contentLanguage : "", + blobSASSignatureValues.contentType ? blobSASSignatureValues.contentType : "", + ].join("\n"); + const signature = sharedKeyCredential.computeHMACSHA256(stringToSign); + return new SASQueryParameters(blobSASSignatureValues.version, signature, verifiedPermissions, undefined, undefined, blobSASSignatureValues.protocol, blobSASSignatureValues.startsOn, blobSASSignatureValues.expiresOn, blobSASSignatureValues.ipRange, blobSASSignatureValues.identifier, resource, blobSASSignatureValues.cacheControl, blobSASSignatureValues.contentDisposition, blobSASSignatureValues.contentEncoding, blobSASSignatureValues.contentLanguage, blobSASSignatureValues.contentType); +} +/** + * ONLY AVAILABLE IN NODE.JS RUNTIME. + * IMPLEMENTATION FOR API VERSION FROM 2018-11-09. + * + * Creates an instance of SASQueryParameters. + * + * Only accepts required settings needed to create a SAS. For optional settings please + * set corresponding properties directly, such as permissions, startsOn and identifier. + * + * WARNING: When identifier is not provided, permissions and expiresOn are required. + * You MUST assign value to identifier or expiresOn & permissions manually if you initial with + * this constructor. + * + * @param blobSASSignatureValues - + * @param sharedKeyCredential - + */ +function generateBlobSASQueryParameters20181109(blobSASSignatureValues, sharedKeyCredential) { + blobSASSignatureValues = SASSignatureValuesSanityCheckAndAutofill(blobSASSignatureValues); + if (!blobSASSignatureValues.identifier && + !(blobSASSignatureValues.permissions && blobSASSignatureValues.expiresOn)) { + throw new RangeError("Must provide 'permissions' and 'expiresOn' for Blob SAS generation when 'identifier' is not provided."); } - if (blobPropertiesInXML["TagCount"]) { - blobProperties.tagCount = parseFloat(blobPropertiesInXML["TagCount"]); - delete blobProperties["TagCount"]; + let resource = "c"; + let timestamp = blobSASSignatureValues.snapshotTime; + if (blobSASSignatureValues.blobName) { + resource = "b"; + if (blobSASSignatureValues.snapshotTime) { + resource = "bs"; + } + else if (blobSASSignatureValues.versionId) { + resource = "bv"; + timestamp = blobSASSignatureValues.versionId; + } } - if (blobPropertiesInXML["Expiry-Time"]) { - blobProperties.expiresOn = new Date(blobPropertiesInXML["Expiry-Time"]); - delete blobProperties["Expiry-Time"]; + // Calling parse and toString guarantees the proper ordering and throws on invalid characters. + let verifiedPermissions; + if (blobSASSignatureValues.permissions) { + if (blobSASSignatureValues.blobName) { + verifiedPermissions = BlobSASPermissions.parse(blobSASSignatureValues.permissions.toString()).toString(); + } + else { + verifiedPermissions = ContainerSASPermissions.parse(blobSASSignatureValues.permissions.toString()).toString(); + } } - if (blobPropertiesInXML["Sealed"]) { - blobProperties.isSealed = ParseBoolean(blobPropertiesInXML["Sealed"]); - delete blobProperties["Sealed"]; + // Signature is generated on the un-url-encoded values. + const stringToSign = [ + verifiedPermissions ? verifiedPermissions : "", + blobSASSignatureValues.startsOn + ? truncatedISO8061Date(blobSASSignatureValues.startsOn, false) + : "", + blobSASSignatureValues.expiresOn + ? truncatedISO8061Date(blobSASSignatureValues.expiresOn, false) + : "", + getCanonicalName(sharedKeyCredential.accountName, blobSASSignatureValues.containerName, blobSASSignatureValues.blobName), + blobSASSignatureValues.identifier, + blobSASSignatureValues.ipRange ? ipRangeToString(blobSASSignatureValues.ipRange) : "", + blobSASSignatureValues.protocol ? blobSASSignatureValues.protocol : "", + blobSASSignatureValues.version, + resource, + timestamp, + blobSASSignatureValues.cacheControl ? blobSASSignatureValues.cacheControl : "", + blobSASSignatureValues.contentDisposition ? blobSASSignatureValues.contentDisposition : "", + blobSASSignatureValues.contentEncoding ? blobSASSignatureValues.contentEncoding : "", + blobSASSignatureValues.contentLanguage ? blobSASSignatureValues.contentLanguage : "", + blobSASSignatureValues.contentType ? blobSASSignatureValues.contentType : "", + ].join("\n"); + const signature = sharedKeyCredential.computeHMACSHA256(stringToSign); + return new SASQueryParameters(blobSASSignatureValues.version, signature, verifiedPermissions, undefined, undefined, blobSASSignatureValues.protocol, blobSASSignatureValues.startsOn, blobSASSignatureValues.expiresOn, blobSASSignatureValues.ipRange, blobSASSignatureValues.identifier, resource, blobSASSignatureValues.cacheControl, blobSASSignatureValues.contentDisposition, blobSASSignatureValues.contentEncoding, blobSASSignatureValues.contentLanguage, blobSASSignatureValues.contentType); +} +/** + * ONLY AVAILABLE IN NODE.JS RUNTIME. + * IMPLEMENTATION FOR API VERSION FROM 2020-12-06. + * + * Creates an instance of SASQueryParameters. + * + * Only accepts required settings needed to create a SAS. For optional settings please + * set corresponding properties directly, such as permissions, startsOn and identifier. + * + * WARNING: When identifier is not provided, permissions and expiresOn are required. + * You MUST assign value to identifier or expiresOn & permissions manually if you initial with + * this constructor. + * + * @param blobSASSignatureValues - + * @param sharedKeyCredential - + */ +function generateBlobSASQueryParameters20201206(blobSASSignatureValues, sharedKeyCredential) { + blobSASSignatureValues = SASSignatureValuesSanityCheckAndAutofill(blobSASSignatureValues); + if (!blobSASSignatureValues.identifier && + !(blobSASSignatureValues.permissions && blobSASSignatureValues.expiresOn)) { + throw new RangeError("Must provide 'permissions' and 'expiresOn' for Blob SAS generation when 'identifier' is not provided."); } - if (blobPropertiesInXML["RehydratePriority"]) { - blobProperties.rehydratePriority = blobPropertiesInXML["RehydratePriority"]; - delete blobProperties["RehydratePriority"]; + let resource = "c"; + let timestamp = blobSASSignatureValues.snapshotTime; + if (blobSASSignatureValues.blobName) { + resource = "b"; + if (blobSASSignatureValues.snapshotTime) { + resource = "bs"; + } + else if (blobSASSignatureValues.versionId) { + resource = "bv"; + timestamp = blobSASSignatureValues.versionId; + } } - if (blobPropertiesInXML["LastAccessTime"]) { - blobProperties.lastAccessedOn = new Date(blobPropertiesInXML["LastAccessTime"]); - delete blobProperties["LastAccessTime"]; + // Calling parse and toString guarantees the proper ordering and throws on invalid characters. + let verifiedPermissions; + if (blobSASSignatureValues.permissions) { + if (blobSASSignatureValues.blobName) { + verifiedPermissions = BlobSASPermissions.parse(blobSASSignatureValues.permissions.toString()).toString(); + } + else { + verifiedPermissions = ContainerSASPermissions.parse(blobSASSignatureValues.permissions.toString()).toString(); + } } - if (blobPropertiesInXML["ImmutabilityPolicyUntilDate"]) { - blobProperties.immutabilityPolicyExpiresOn = new Date(blobPropertiesInXML["ImmutabilityPolicyUntilDate"]); - delete blobProperties["ImmutabilityPolicyUntilDate"]; + // Signature is generated on the un-url-encoded values. + const stringToSign = [ + verifiedPermissions ? verifiedPermissions : "", + blobSASSignatureValues.startsOn + ? truncatedISO8061Date(blobSASSignatureValues.startsOn, false) + : "", + blobSASSignatureValues.expiresOn + ? truncatedISO8061Date(blobSASSignatureValues.expiresOn, false) + : "", + getCanonicalName(sharedKeyCredential.accountName, blobSASSignatureValues.containerName, blobSASSignatureValues.blobName), + blobSASSignatureValues.identifier, + blobSASSignatureValues.ipRange ? ipRangeToString(blobSASSignatureValues.ipRange) : "", + blobSASSignatureValues.protocol ? blobSASSignatureValues.protocol : "", + blobSASSignatureValues.version, + resource, + timestamp, + blobSASSignatureValues.encryptionScope, + blobSASSignatureValues.cacheControl ? blobSASSignatureValues.cacheControl : "", + blobSASSignatureValues.contentDisposition ? blobSASSignatureValues.contentDisposition : "", + blobSASSignatureValues.contentEncoding ? blobSASSignatureValues.contentEncoding : "", + blobSASSignatureValues.contentLanguage ? blobSASSignatureValues.contentLanguage : "", + blobSASSignatureValues.contentType ? blobSASSignatureValues.contentType : "", + ].join("\n"); + const signature = sharedKeyCredential.computeHMACSHA256(stringToSign); + return new SASQueryParameters(blobSASSignatureValues.version, signature, verifiedPermissions, undefined, undefined, blobSASSignatureValues.protocol, blobSASSignatureValues.startsOn, blobSASSignatureValues.expiresOn, blobSASSignatureValues.ipRange, blobSASSignatureValues.identifier, resource, blobSASSignatureValues.cacheControl, blobSASSignatureValues.contentDisposition, blobSASSignatureValues.contentEncoding, blobSASSignatureValues.contentLanguage, blobSASSignatureValues.contentType, undefined, undefined, undefined, blobSASSignatureValues.encryptionScope); +} +/** + * ONLY AVAILABLE IN NODE.JS RUNTIME. + * IMPLEMENTATION FOR API VERSION FROM 2018-11-09. + * + * Creates an instance of SASQueryParameters. + * + * Only accepts required settings needed to create a SAS. For optional settings please + * set corresponding properties directly, such as permissions, startsOn. + * + * WARNING: identifier will be ignored, permissions and expiresOn are required. + * + * @param blobSASSignatureValues - + * @param userDelegationKeyCredential - + */ +function generateBlobSASQueryParametersUDK20181109(blobSASSignatureValues, userDelegationKeyCredential) { + blobSASSignatureValues = SASSignatureValuesSanityCheckAndAutofill(blobSASSignatureValues); + // Stored access policies are not supported for a user delegation SAS. + if (!blobSASSignatureValues.permissions || !blobSASSignatureValues.expiresOn) { + throw new RangeError("Must provide 'permissions' and 'expiresOn' for Blob SAS generation when generating user delegation SAS."); } - if (blobPropertiesInXML["ImmutabilityPolicyMode"]) { - blobProperties.immutabilityPolicyMode = blobPropertiesInXML["ImmutabilityPolicyMode"]; - delete blobProperties["ImmutabilityPolicyMode"]; + let resource = "c"; + let timestamp = blobSASSignatureValues.snapshotTime; + if (blobSASSignatureValues.blobName) { + resource = "b"; + if (blobSASSignatureValues.snapshotTime) { + resource = "bs"; + } + else if (blobSASSignatureValues.versionId) { + resource = "bv"; + timestamp = blobSASSignatureValues.versionId; + } } - if (blobPropertiesInXML["LegalHold"]) { - blobProperties.legalHold = ParseBoolean(blobPropertiesInXML["LegalHold"]); - delete blobProperties["LegalHold"]; + // Calling parse and toString guarantees the proper ordering and throws on invalid characters. + let verifiedPermissions; + if (blobSASSignatureValues.permissions) { + if (blobSASSignatureValues.blobName) { + verifiedPermissions = BlobSASPermissions.parse(blobSASSignatureValues.permissions.toString()).toString(); + } + else { + verifiedPermissions = ContainerSASPermissions.parse(blobSASSignatureValues.permissions.toString()).toString(); + } } - return blobProperties; + // Signature is generated on the un-url-encoded values. + const stringToSign = [ + verifiedPermissions ? verifiedPermissions : "", + blobSASSignatureValues.startsOn + ? truncatedISO8061Date(blobSASSignatureValues.startsOn, false) + : "", + blobSASSignatureValues.expiresOn + ? truncatedISO8061Date(blobSASSignatureValues.expiresOn, false) + : "", + getCanonicalName(userDelegationKeyCredential.accountName, blobSASSignatureValues.containerName, blobSASSignatureValues.blobName), + userDelegationKeyCredential.userDelegationKey.signedObjectId, + userDelegationKeyCredential.userDelegationKey.signedTenantId, + userDelegationKeyCredential.userDelegationKey.signedStartsOn + ? truncatedISO8061Date(userDelegationKeyCredential.userDelegationKey.signedStartsOn, false) + : "", + userDelegationKeyCredential.userDelegationKey.signedExpiresOn + ? truncatedISO8061Date(userDelegationKeyCredential.userDelegationKey.signedExpiresOn, false) + : "", + userDelegationKeyCredential.userDelegationKey.signedService, + userDelegationKeyCredential.userDelegationKey.signedVersion, + blobSASSignatureValues.ipRange ? ipRangeToString(blobSASSignatureValues.ipRange) : "", + blobSASSignatureValues.protocol ? blobSASSignatureValues.protocol : "", + blobSASSignatureValues.version, + resource, + timestamp, + blobSASSignatureValues.cacheControl, + blobSASSignatureValues.contentDisposition, + blobSASSignatureValues.contentEncoding, + blobSASSignatureValues.contentLanguage, + blobSASSignatureValues.contentType, + ].join("\n"); + const signature = userDelegationKeyCredential.computeHMACSHA256(stringToSign); + return new SASQueryParameters(blobSASSignatureValues.version, signature, verifiedPermissions, undefined, undefined, blobSASSignatureValues.protocol, blobSASSignatureValues.startsOn, blobSASSignatureValues.expiresOn, blobSASSignatureValues.ipRange, blobSASSignatureValues.identifier, resource, blobSASSignatureValues.cacheControl, blobSASSignatureValues.contentDisposition, blobSASSignatureValues.contentEncoding, blobSASSignatureValues.contentLanguage, blobSASSignatureValues.contentType, userDelegationKeyCredential.userDelegationKey); } -function ParseBlobItem(blobInXML) { - const blobItem = blobInXML; - blobItem.properties = ParseBlobProperties(blobInXML["Properties"]); - delete blobItem["Properties"]; - blobItem.name = ParseBlobName(blobInXML["Name"]); - delete blobItem["Name"]; - blobItem.deleted = ParseBoolean(blobInXML["Deleted"]); - delete blobItem["Deleted"]; - if (blobInXML["Snapshot"]) { - blobItem.snapshot = blobInXML["Snapshot"]; - delete blobItem["Snapshot"]; - } - if (blobInXML["VersionId"]) { - blobItem.versionId = blobInXML["VersionId"]; - delete blobItem["VersionId"]; +/** + * ONLY AVAILABLE IN NODE.JS RUNTIME. + * IMPLEMENTATION FOR API VERSION FROM 2020-02-10. + * + * Creates an instance of SASQueryParameters. + * + * Only accepts required settings needed to create a SAS. For optional settings please + * set corresponding properties directly, such as permissions, startsOn. + * + * WARNING: identifier will be ignored, permissions and expiresOn are required. + * + * @param blobSASSignatureValues - + * @param userDelegationKeyCredential - + */ +function generateBlobSASQueryParametersUDK20200210(blobSASSignatureValues, userDelegationKeyCredential) { + blobSASSignatureValues = SASSignatureValuesSanityCheckAndAutofill(blobSASSignatureValues); + // Stored access policies are not supported for a user delegation SAS. + if (!blobSASSignatureValues.permissions || !blobSASSignatureValues.expiresOn) { + throw new RangeError("Must provide 'permissions' and 'expiresOn' for Blob SAS generation when generating user delegation SAS."); } - if (blobInXML["IsCurrentVersion"]) { - blobItem.isCurrentVersion = ParseBoolean(blobInXML["IsCurrentVersion"]); - delete blobItem["IsCurrentVersion"]; + let resource = "c"; + let timestamp = blobSASSignatureValues.snapshotTime; + if (blobSASSignatureValues.blobName) { + resource = "b"; + if (blobSASSignatureValues.snapshotTime) { + resource = "bs"; + } + else if (blobSASSignatureValues.versionId) { + resource = "bv"; + timestamp = blobSASSignatureValues.versionId; + } } - if (blobInXML["Metadata"]) { - blobItem.metadata = blobInXML["Metadata"]; - delete blobItem["Metadata"]; + // Calling parse and toString guarantees the proper ordering and throws on invalid characters. + let verifiedPermissions; + if (blobSASSignatureValues.permissions) { + if (blobSASSignatureValues.blobName) { + verifiedPermissions = BlobSASPermissions.parse(blobSASSignatureValues.permissions.toString()).toString(); + } + else { + verifiedPermissions = ContainerSASPermissions.parse(blobSASSignatureValues.permissions.toString()).toString(); + } } - if (blobInXML["Tags"]) { - blobItem.blobTags = ParseBlobTags(blobInXML["Tags"]); - delete blobItem["Tags"]; + // Signature is generated on the un-url-encoded values. + const stringToSign = [ + verifiedPermissions ? verifiedPermissions : "", + blobSASSignatureValues.startsOn + ? truncatedISO8061Date(blobSASSignatureValues.startsOn, false) + : "", + blobSASSignatureValues.expiresOn + ? truncatedISO8061Date(blobSASSignatureValues.expiresOn, false) + : "", + getCanonicalName(userDelegationKeyCredential.accountName, blobSASSignatureValues.containerName, blobSASSignatureValues.blobName), + userDelegationKeyCredential.userDelegationKey.signedObjectId, + userDelegationKeyCredential.userDelegationKey.signedTenantId, + userDelegationKeyCredential.userDelegationKey.signedStartsOn + ? truncatedISO8061Date(userDelegationKeyCredential.userDelegationKey.signedStartsOn, false) + : "", + userDelegationKeyCredential.userDelegationKey.signedExpiresOn + ? truncatedISO8061Date(userDelegationKeyCredential.userDelegationKey.signedExpiresOn, false) + : "", + userDelegationKeyCredential.userDelegationKey.signedService, + userDelegationKeyCredential.userDelegationKey.signedVersion, + blobSASSignatureValues.preauthorizedAgentObjectId, + undefined, + blobSASSignatureValues.correlationId, + blobSASSignatureValues.ipRange ? ipRangeToString(blobSASSignatureValues.ipRange) : "", + blobSASSignatureValues.protocol ? blobSASSignatureValues.protocol : "", + blobSASSignatureValues.version, + resource, + timestamp, + blobSASSignatureValues.cacheControl, + blobSASSignatureValues.contentDisposition, + blobSASSignatureValues.contentEncoding, + blobSASSignatureValues.contentLanguage, + blobSASSignatureValues.contentType, + ].join("\n"); + const signature = userDelegationKeyCredential.computeHMACSHA256(stringToSign); + return new SASQueryParameters(blobSASSignatureValues.version, signature, verifiedPermissions, undefined, undefined, blobSASSignatureValues.protocol, blobSASSignatureValues.startsOn, blobSASSignatureValues.expiresOn, blobSASSignatureValues.ipRange, blobSASSignatureValues.identifier, resource, blobSASSignatureValues.cacheControl, blobSASSignatureValues.contentDisposition, blobSASSignatureValues.contentEncoding, blobSASSignatureValues.contentLanguage, blobSASSignatureValues.contentType, userDelegationKeyCredential.userDelegationKey, blobSASSignatureValues.preauthorizedAgentObjectId, blobSASSignatureValues.correlationId); +} +/** + * ONLY AVAILABLE IN NODE.JS RUNTIME. + * IMPLEMENTATION FOR API VERSION FROM 2020-12-06. + * + * Creates an instance of SASQueryParameters. + * + * Only accepts required settings needed to create a SAS. For optional settings please + * set corresponding properties directly, such as permissions, startsOn. + * + * WARNING: identifier will be ignored, permissions and expiresOn are required. + * + * @param blobSASSignatureValues - + * @param userDelegationKeyCredential - + */ +function generateBlobSASQueryParametersUDK20201206(blobSASSignatureValues, userDelegationKeyCredential) { + blobSASSignatureValues = SASSignatureValuesSanityCheckAndAutofill(blobSASSignatureValues); + // Stored access policies are not supported for a user delegation SAS. + if (!blobSASSignatureValues.permissions || !blobSASSignatureValues.expiresOn) { + throw new RangeError("Must provide 'permissions' and 'expiresOn' for Blob SAS generation when generating user delegation SAS."); } - if (blobInXML["OrMetadata"]) { - blobItem.objectReplicationMetadata = blobInXML["OrMetadata"]; - delete blobItem["OrMetadata"]; + let resource = "c"; + let timestamp = blobSASSignatureValues.snapshotTime; + if (blobSASSignatureValues.blobName) { + resource = "b"; + if (blobSASSignatureValues.snapshotTime) { + resource = "bs"; + } + else if (blobSASSignatureValues.versionId) { + resource = "bv"; + timestamp = blobSASSignatureValues.versionId; + } } - if (blobInXML["HasVersionsOnly"]) { - blobItem.hasVersionsOnly = ParseBoolean(blobInXML["HasVersionsOnly"]); - delete blobItem["HasVersionsOnly"]; + // Calling parse and toString guarantees the proper ordering and throws on invalid characters. + let verifiedPermissions; + if (blobSASSignatureValues.permissions) { + if (blobSASSignatureValues.blobName) { + verifiedPermissions = BlobSASPermissions.parse(blobSASSignatureValues.permissions.toString()).toString(); + } + else { + verifiedPermissions = ContainerSASPermissions.parse(blobSASSignatureValues.permissions.toString()).toString(); + } } - return blobItem; -} -function ParseBlobPrefix(blobPrefixInXML) { - return { - name: ParseBlobName(blobPrefixInXML["Name"]), - }; + // Signature is generated on the un-url-encoded values. + const stringToSign = [ + verifiedPermissions ? verifiedPermissions : "", + blobSASSignatureValues.startsOn + ? truncatedISO8061Date(blobSASSignatureValues.startsOn, false) + : "", + blobSASSignatureValues.expiresOn + ? truncatedISO8061Date(blobSASSignatureValues.expiresOn, false) + : "", + getCanonicalName(userDelegationKeyCredential.accountName, blobSASSignatureValues.containerName, blobSASSignatureValues.blobName), + userDelegationKeyCredential.userDelegationKey.signedObjectId, + userDelegationKeyCredential.userDelegationKey.signedTenantId, + userDelegationKeyCredential.userDelegationKey.signedStartsOn + ? truncatedISO8061Date(userDelegationKeyCredential.userDelegationKey.signedStartsOn, false) + : "", + userDelegationKeyCredential.userDelegationKey.signedExpiresOn + ? truncatedISO8061Date(userDelegationKeyCredential.userDelegationKey.signedExpiresOn, false) + : "", + userDelegationKeyCredential.userDelegationKey.signedService, + userDelegationKeyCredential.userDelegationKey.signedVersion, + blobSASSignatureValues.preauthorizedAgentObjectId, + undefined, + blobSASSignatureValues.correlationId, + blobSASSignatureValues.ipRange ? ipRangeToString(blobSASSignatureValues.ipRange) : "", + blobSASSignatureValues.protocol ? blobSASSignatureValues.protocol : "", + blobSASSignatureValues.version, + resource, + timestamp, + blobSASSignatureValues.encryptionScope, + blobSASSignatureValues.cacheControl, + blobSASSignatureValues.contentDisposition, + blobSASSignatureValues.contentEncoding, + blobSASSignatureValues.contentLanguage, + blobSASSignatureValues.contentType, + ].join("\n"); + const signature = userDelegationKeyCredential.computeHMACSHA256(stringToSign); + return new SASQueryParameters(blobSASSignatureValues.version, signature, verifiedPermissions, undefined, undefined, blobSASSignatureValues.protocol, blobSASSignatureValues.startsOn, blobSASSignatureValues.expiresOn, blobSASSignatureValues.ipRange, blobSASSignatureValues.identifier, resource, blobSASSignatureValues.cacheControl, blobSASSignatureValues.contentDisposition, blobSASSignatureValues.contentEncoding, blobSASSignatureValues.contentLanguage, blobSASSignatureValues.contentType, userDelegationKeyCredential.userDelegationKey, blobSASSignatureValues.preauthorizedAgentObjectId, blobSASSignatureValues.correlationId, blobSASSignatureValues.encryptionScope); } -function ParseBlobTag(blobTagInXML) { - return { - key: blobTagInXML["Key"], - value: blobTagInXML["Value"], - }; +function getCanonicalName(accountName, containerName, blobName) { + // Container: "/blob/account/containerName" + // Blob: "/blob/account/containerName/blobName" + const elements = [`/blob/${accountName}/${containerName}`]; + if (blobName) { + elements.push(`/${blobName}`); + } + return elements.join(""); } -function ParseBlobTags(blobTagsInXML) { - if (blobTagsInXML === undefined || - blobTagsInXML["TagSet"] === undefined || - blobTagsInXML["TagSet"]["Tag"] === undefined) { - return undefined; +function SASSignatureValuesSanityCheckAndAutofill(blobSASSignatureValues) { + const version = blobSASSignatureValues.version ? blobSASSignatureValues.version : SERVICE_VERSION; + if (blobSASSignatureValues.snapshotTime && version < "2018-11-09") { + throw RangeError("'version' must be >= '2018-11-09' when providing 'snapshotTime'."); } - const blobTagSet = []; - if (blobTagsInXML["TagSet"]["Tag"] instanceof Array) { - blobTagsInXML["TagSet"]["Tag"].forEach((blobTagInXML) => { - blobTagSet.push(ParseBlobTag(blobTagInXML)); - }); + if (blobSASSignatureValues.blobName === undefined && blobSASSignatureValues.snapshotTime) { + throw RangeError("Must provide 'blobName' when providing 'snapshotTime'."); + } + if (blobSASSignatureValues.versionId && version < "2019-10-10") { + throw RangeError("'version' must be >= '2019-10-10' when providing 'versionId'."); } - else { - blobTagSet.push(ParseBlobTag(blobTagsInXML["TagSet"]["Tag"])); + if (blobSASSignatureValues.blobName === undefined && blobSASSignatureValues.versionId) { + throw RangeError("Must provide 'blobName' when providing 'versionId'."); } - return { blobTagSet: blobTagSet }; -} -function ProcessBlobItems(blobArrayInXML) { - const blobItems = []; - if (blobArrayInXML instanceof Array) { - blobArrayInXML.forEach((blobInXML) => { - blobItems.push(ParseBlobItem(blobInXML)); - }); + if (blobSASSignatureValues.permissions && + blobSASSignatureValues.permissions.setImmutabilityPolicy && + version < "2020-08-04") { + throw RangeError("'version' must be >= '2020-08-04' when provided 'i' permission."); } - else { - blobItems.push(ParseBlobItem(blobArrayInXML)); + if (blobSASSignatureValues.permissions && + blobSASSignatureValues.permissions.deleteVersion && + version < "2019-10-10") { + throw RangeError("'version' must be >= '2019-10-10' when providing 'x' permission."); } - return blobItems; -} -function ProcessBlobPrefixes(blobPrefixesInXML) { - const blobPrefixes = []; - if (blobPrefixesInXML instanceof Array) { - blobPrefixesInXML.forEach((blobPrefixInXML) => { - blobPrefixes.push(ParseBlobPrefix(blobPrefixInXML)); - }); + if (blobSASSignatureValues.permissions && + blobSASSignatureValues.permissions.permanentDelete && + version < "2019-10-10") { + throw RangeError("'version' must be >= '2019-10-10' when providing 'y' permission."); } - else { - blobPrefixes.push(ParseBlobPrefix(blobPrefixesInXML)); + if (blobSASSignatureValues.permissions && + blobSASSignatureValues.permissions.tag && + version < "2019-12-12") { + throw RangeError("'version' must be >= '2019-12-12' when providing 't' permission."); } - return blobPrefixes; -} -function* ExtractPageRangeInfoItems(getPageRangesSegment) { - let pageRange = []; - let clearRange = []; - if (getPageRangesSegment.pageRange) - pageRange = getPageRangesSegment.pageRange; - if (getPageRangesSegment.clearRange) - clearRange = getPageRangesSegment.clearRange; - let pageRangeIndex = 0; - let clearRangeIndex = 0; - while (pageRangeIndex < pageRange.length && clearRangeIndex < clearRange.length) { - if (pageRange[pageRangeIndex].start < clearRange[clearRangeIndex].start) { - yield { - start: pageRange[pageRangeIndex].start, - end: pageRange[pageRangeIndex].end, - isClear: false, - }; - ++pageRangeIndex; - } - else { - yield { - start: clearRange[clearRangeIndex].start, - end: clearRange[clearRangeIndex].end, - isClear: true, - }; - ++clearRangeIndex; - } + if (version < "2020-02-10" && + blobSASSignatureValues.permissions && + (blobSASSignatureValues.permissions.move || blobSASSignatureValues.permissions.execute)) { + throw RangeError("'version' must be >= '2020-02-10' when providing the 'm' or 'e' permission."); } - for (; pageRangeIndex < pageRange.length; ++pageRangeIndex) { - yield { - start: pageRange[pageRangeIndex].start, - end: pageRange[pageRangeIndex].end, - isClear: false, - }; + if (version < "2021-04-10" && + blobSASSignatureValues.permissions && + blobSASSignatureValues.permissions.filterByTags) { + throw RangeError("'version' must be >= '2021-04-10' when providing the 'f' permission."); } - for (; clearRangeIndex < clearRange.length; ++clearRangeIndex) { - yield { - start: clearRange[clearRangeIndex].start, - end: clearRange[clearRangeIndex].end, - isClear: true, - }; + if (version < "2020-02-10" && + (blobSASSignatureValues.preauthorizedAgentObjectId || blobSASSignatureValues.correlationId)) { + throw RangeError("'version' must be >= '2020-02-10' when providing 'preauthorizedAgentObjectId' or 'correlationId'."); } -} -/** - * Escape the blobName but keep path separator ('/'). - */ -function EscapePath(blobName) { - const split = blobName.split("/"); - for (let i = 0; i < split.length; i++) { - split[i] = encodeURIComponent(split[i]); + if (blobSASSignatureValues.encryptionScope && version < "2020-12-06") { + throw RangeError("'version' must be >= '2020-12-06' when provided 'encryptionScope' in SAS."); } - return split.join("/"); + blobSASSignatureValues.version = version; + return blobSASSignatureValues; } // Copyright (c) Microsoft Corporation. /** - * StorageBrowserPolicy will handle differences between Node.js and browser runtime, including: - * - * 1. Browsers cache GET/HEAD requests by adding conditional headers such as 'IF_MODIFIED_SINCE'. - * StorageBrowserPolicy is a policy used to add a timestamp query to GET/HEAD request URL - * thus avoid the browser cache. - * - * 2. Remove cookie header for security - * - * 3. Remove content-length header to avoid browsers warning + * A client that manages leases for a {@link ContainerClient} or a {@link BlobClient}. */ -class StorageBrowserPolicy extends coreHttp.BaseRequestPolicy { - /** - * Creates an instance of StorageBrowserPolicy. - * @param nextPolicy - - * @param options - - */ - // The base class has a protected constructor. Adding a public one to enable constructing of this class. - /* eslint-disable-next-line @typescript-eslint/no-useless-constructor*/ - constructor(nextPolicy, options) { - super(nextPolicy, options); - } +class BlobLeaseClient { /** - * Sends out request. - * - * @param request - + * Creates an instance of BlobLeaseClient. + * @param client - The client to make the lease operation requests. + * @param leaseId - Initial proposed lease id. */ - async sendRequest(request) { - if (coreHttp.isNode) { - return this._nextPolicy.sendRequest(request); + constructor(client, leaseId) { + const clientContext = new StorageClientContext(client.url, client.pipeline.toServiceClientOptions()); + this._url = client.url; + if (client.name === undefined) { + this._isContainer = true; + this._containerOrBlobOperation = new Container(clientContext); } - if (request.method.toUpperCase() === "GET" || request.method.toUpperCase() === "HEAD") { - request.url = setURLParameter(request.url, URLConstants.Parameters.FORCE_BROWSER_NO_CACHE, new Date().getTime().toString()); + else { + this._isContainer = false; + this._containerOrBlobOperation = new Blob$1(clientContext); } - request.headers.remove(HeaderConstants.COOKIE); - // According to XHR standards, content-length should be fully controlled by browsers - request.headers.remove(HeaderConstants.CONTENT_LENGTH); - return this._nextPolicy.sendRequest(request); - } -} - -// Copyright (c) Microsoft Corporation. -/** - * StorageBrowserPolicyFactory is a factory class helping generating StorageBrowserPolicy objects. - */ -class StorageBrowserPolicyFactory { - /** - * Creates a StorageBrowserPolicyFactory object. - * - * @param nextPolicy - - * @param options - - */ - create(nextPolicy, options) { - return new StorageBrowserPolicy(nextPolicy, options); + if (!leaseId) { + leaseId = coreHttp.generateUuid(); + } + this._leaseId = leaseId; } -} - -// Copyright (c) Microsoft Corporation. -/** - * RetryPolicy types. - */ -exports.StorageRetryPolicyType = void 0; -(function (StorageRetryPolicyType) { - /** - * Exponential retry. Retry time delay grows exponentially. - */ - StorageRetryPolicyType[StorageRetryPolicyType["EXPONENTIAL"] = 0] = "EXPONENTIAL"; - /** - * Linear retry. Retry time delay grows linearly. - */ - StorageRetryPolicyType[StorageRetryPolicyType["FIXED"] = 1] = "FIXED"; -})(exports.StorageRetryPolicyType || (exports.StorageRetryPolicyType = {})); -// Default values of StorageRetryOptions -const DEFAULT_RETRY_OPTIONS = { - maxRetryDelayInMs: 120 * 1000, - maxTries: 4, - retryDelayInMs: 4 * 1000, - retryPolicyType: exports.StorageRetryPolicyType.EXPONENTIAL, - secondaryHost: "", - tryTimeoutInMs: undefined, // Use server side default timeout strategy -}; -const RETRY_ABORT_ERROR = new abortController.AbortError("The operation was aborted."); -/** - * Retry policy with exponential retry and linear retry implemented. - */ -class StorageRetryPolicy extends coreHttp.BaseRequestPolicy { /** - * Creates an instance of RetryPolicy. + * Gets the lease Id. * - * @param nextPolicy - - * @param options - - * @param retryOptions - + * @readonly */ - constructor(nextPolicy, options, retryOptions = DEFAULT_RETRY_OPTIONS) { - super(nextPolicy, options); - // Initialize retry options - this.retryOptions = { - retryPolicyType: retryOptions.retryPolicyType - ? retryOptions.retryPolicyType - : DEFAULT_RETRY_OPTIONS.retryPolicyType, - maxTries: retryOptions.maxTries && retryOptions.maxTries >= 1 - ? Math.floor(retryOptions.maxTries) - : DEFAULT_RETRY_OPTIONS.maxTries, - tryTimeoutInMs: retryOptions.tryTimeoutInMs && retryOptions.tryTimeoutInMs >= 0 - ? retryOptions.tryTimeoutInMs - : DEFAULT_RETRY_OPTIONS.tryTimeoutInMs, - retryDelayInMs: retryOptions.retryDelayInMs && retryOptions.retryDelayInMs >= 0 - ? Math.min(retryOptions.retryDelayInMs, retryOptions.maxRetryDelayInMs - ? retryOptions.maxRetryDelayInMs - : DEFAULT_RETRY_OPTIONS.maxRetryDelayInMs) - : DEFAULT_RETRY_OPTIONS.retryDelayInMs, - maxRetryDelayInMs: retryOptions.maxRetryDelayInMs && retryOptions.maxRetryDelayInMs >= 0 - ? retryOptions.maxRetryDelayInMs - : DEFAULT_RETRY_OPTIONS.maxRetryDelayInMs, - secondaryHost: retryOptions.secondaryHost - ? retryOptions.secondaryHost - : DEFAULT_RETRY_OPTIONS.secondaryHost, - }; + get leaseId() { + return this._leaseId; } /** - * Sends request. + * Gets the url. * - * @param request - + * @readonly */ - async sendRequest(request) { - return this.attemptSendRequest(request, false, 1); + get url() { + return this._url; } /** - * Decide and perform next retry. Won't mutate request parameter. + * Establishes and manages a lock on a container for delete operations, or on a blob + * for write and delete operations. + * The lock duration can be 15 to 60 seconds, or can be infinite. + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/lease-container + * and + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/lease-blob * - * @param request - - * @param secondaryHas404 - If attempt was against the secondary & it returned a StatusNotFound (404), then - * the resource was not found. This may be due to replication delay. So, in this - * case, we'll never try the secondary again for this operation. - * @param attempt - How many retries has been attempted to performed, starting from 1, which includes - * the attempt will be performed by this method call. + * @param duration - Must be between 15 to 60 seconds, or infinite (-1) + * @param options - option to configure lease management operations. + * @returns Response data for acquire lease operation. */ - async attemptSendRequest(request, secondaryHas404, attempt) { - const newRequest = request.clone(); - const isPrimaryRetry = secondaryHas404 || - !this.retryOptions.secondaryHost || - !(request.method === "GET" || request.method === "HEAD" || request.method === "OPTIONS") || - attempt % 2 === 1; - if (!isPrimaryRetry) { - newRequest.url = setURLHost(newRequest.url, this.retryOptions.secondaryHost); - } - // Set the server-side timeout query parameter "timeout=[seconds]" - if (this.retryOptions.tryTimeoutInMs) { - newRequest.url = setURLParameter(newRequest.url, URLConstants.Parameters.TIMEOUT, Math.floor(this.retryOptions.tryTimeoutInMs / 1000).toString()); + async acquireLease(duration, options = {}) { + var _a, _b, _c, _d, _e, _f; + const { span, updatedOptions } = createSpan("BlobLeaseClient-acquireLease", options); + if (this._isContainer && + ((((_a = options.conditions) === null || _a === void 0 ? void 0 : _a.ifMatch) && ((_b = options.conditions) === null || _b === void 0 ? void 0 : _b.ifMatch) !== ETagNone) || + (((_c = options.conditions) === null || _c === void 0 ? void 0 : _c.ifNoneMatch) && ((_d = options.conditions) === null || _d === void 0 ? void 0 : _d.ifNoneMatch) !== ETagNone) || + ((_e = options.conditions) === null || _e === void 0 ? void 0 : _e.tagConditions))) { + throw new RangeError("The IfMatch, IfNoneMatch and tags access conditions are ignored by the service. Values other than undefined or their default values are not acceptable."); } - let response; try { - logger.info(`RetryPolicy: =====> Try=${attempt} ${isPrimaryRetry ? "Primary" : "Secondary"}`); - response = await this._nextPolicy.sendRequest(newRequest); - if (!this.shouldRetry(isPrimaryRetry, attempt, response)) { - return response; - } - secondaryHas404 = secondaryHas404 || (!isPrimaryRetry && response.status === 404); + return await this._containerOrBlobOperation.acquireLease(Object.assign({ abortSignal: options.abortSignal, duration, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_f = options.conditions) === null || _f === void 0 ? void 0 : _f.tagConditions }), proposedLeaseId: this._leaseId }, convertTracingToRequestOptionsBase(updatedOptions))); } - catch (err) { - logger.error(`RetryPolicy: Caught error, message: ${err.message}, code: ${err.code}`); - if (!this.shouldRetry(isPrimaryRetry, attempt, response, err)) { - throw err; - } + catch (e) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; + } + finally { + span.end(); } - await this.delay(isPrimaryRetry, attempt, request.abortSignal); - return this.attemptSendRequest(request, secondaryHas404, ++attempt); } /** - * Decide whether to retry according to last HTTP response and retry counters. + * To change the ID of the lease. + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/lease-container + * and + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/lease-blob * - * @param isPrimaryRetry - - * @param attempt - - * @param response - - * @param err - + * @param proposedLeaseId - the proposed new lease Id. + * @param options - option to configure lease management operations. + * @returns Response data for change lease operation. */ - shouldRetry(isPrimaryRetry, attempt, response, err) { - if (attempt >= this.retryOptions.maxTries) { - logger.info(`RetryPolicy: Attempt(s) ${attempt} >= maxTries ${this.retryOptions - .maxTries}, no further try.`); - return false; + async changeLease(proposedLeaseId, options = {}) { + var _a, _b, _c, _d, _e, _f; + const { span, updatedOptions } = createSpan("BlobLeaseClient-changeLease", options); + if (this._isContainer && + ((((_a = options.conditions) === null || _a === void 0 ? void 0 : _a.ifMatch) && ((_b = options.conditions) === null || _b === void 0 ? void 0 : _b.ifMatch) !== ETagNone) || + (((_c = options.conditions) === null || _c === void 0 ? void 0 : _c.ifNoneMatch) && ((_d = options.conditions) === null || _d === void 0 ? void 0 : _d.ifNoneMatch) !== ETagNone) || + ((_e = options.conditions) === null || _e === void 0 ? void 0 : _e.tagConditions))) { + throw new RangeError("The IfMatch, IfNoneMatch and tags access conditions are ignored by the service. Values other than undefined or their default values are not acceptable."); } - // Handle network failures, you may need to customize the list when you implement - // your own http client - const retriableErrors = [ - "ETIMEDOUT", - "ESOCKETTIMEDOUT", - "ECONNREFUSED", - "ECONNRESET", - "ENOENT", - "ENOTFOUND", - "TIMEOUT", - "EPIPE", - "REQUEST_SEND_ERROR", // For default xhr based http client provided in ms-rest-js - ]; - if (err) { - for (const retriableError of retriableErrors) { - if (err.name.toUpperCase().includes(retriableError) || - err.message.toUpperCase().includes(retriableError) || - (err.code && err.code.toString().toUpperCase() === retriableError)) { - logger.info(`RetryPolicy: Network error ${retriableError} found, will retry.`); - return true; - } - } + try { + const response = await this._containerOrBlobOperation.changeLease(this._leaseId, proposedLeaseId, Object.assign({ abortSignal: options.abortSignal, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_f = options.conditions) === null || _f === void 0 ? void 0 : _f.tagConditions }) }, convertTracingToRequestOptionsBase(updatedOptions))); + this._leaseId = proposedLeaseId; + return response; } - // If attempt was against the secondary & it returned a StatusNotFound (404), then - // the resource was not found. This may be due to replication delay. So, in this - // case, we'll never try the secondary again for this operation. - if (response || err) { - const statusCode = response ? response.status : err ? err.statusCode : 0; - if (!isPrimaryRetry && statusCode === 404) { - logger.info(`RetryPolicy: Secondary access with 404, will retry.`); - return true; - } - // Server internal error or server timeout - if (statusCode === 503 || statusCode === 500) { - logger.info(`RetryPolicy: Will retry for status code ${statusCode}.`); - return true; - } + catch (e) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; } - if ((err === null || err === void 0 ? void 0 : err.code) === "PARSE_ERROR" && (err === null || err === void 0 ? void 0 : err.message.startsWith(`Error "Error: Unclosed root tag`))) { - logger.info("RetryPolicy: Incomplete XML response likely due to service timeout, will retry."); - return true; + finally { + span.end(); } - return false; } /** - * Delay a calculated time between retries. + * To free the lease if it is no longer needed so that another client may + * immediately acquire a lease against the container or the blob. + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/lease-container + * and + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/lease-blob * - * @param isPrimaryRetry - - * @param attempt - - * @param abortSignal - + * @param options - option to configure lease management operations. + * @returns Response data for release lease operation. */ - async delay(isPrimaryRetry, attempt, abortSignal) { - let delayTimeInMs = 0; - if (isPrimaryRetry) { - switch (this.retryOptions.retryPolicyType) { - case exports.StorageRetryPolicyType.EXPONENTIAL: - delayTimeInMs = Math.min((Math.pow(2, attempt - 1) - 1) * this.retryOptions.retryDelayInMs, this.retryOptions.maxRetryDelayInMs); - break; - case exports.StorageRetryPolicyType.FIXED: - delayTimeInMs = this.retryOptions.retryDelayInMs; - break; - } + async releaseLease(options = {}) { + var _a, _b, _c, _d, _e, _f; + const { span, updatedOptions } = createSpan("BlobLeaseClient-releaseLease", options); + if (this._isContainer && + ((((_a = options.conditions) === null || _a === void 0 ? void 0 : _a.ifMatch) && ((_b = options.conditions) === null || _b === void 0 ? void 0 : _b.ifMatch) !== ETagNone) || + (((_c = options.conditions) === null || _c === void 0 ? void 0 : _c.ifNoneMatch) && ((_d = options.conditions) === null || _d === void 0 ? void 0 : _d.ifNoneMatch) !== ETagNone) || + ((_e = options.conditions) === null || _e === void 0 ? void 0 : _e.tagConditions))) { + throw new RangeError("The IfMatch, IfNoneMatch and tags access conditions are ignored by the service. Values other than undefined or their default values are not acceptable."); } - else { - delayTimeInMs = Math.random() * 1000; + try { + return await this._containerOrBlobOperation.releaseLease(this._leaseId, Object.assign({ abortSignal: options.abortSignal, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_f = options.conditions) === null || _f === void 0 ? void 0 : _f.tagConditions }) }, convertTracingToRequestOptionsBase(updatedOptions))); + } + catch (e) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; + } + finally { + span.end(); } - logger.info(`RetryPolicy: Delay for ${delayTimeInMs}ms`); - return delay(delayTimeInMs, abortSignal, RETRY_ABORT_ERROR); - } -} - -// Copyright (c) Microsoft Corporation. -/** - * StorageRetryPolicyFactory is a factory class helping generating {@link StorageRetryPolicy} objects. - */ -class StorageRetryPolicyFactory { - /** - * Creates an instance of StorageRetryPolicyFactory. - * @param retryOptions - - */ - constructor(retryOptions) { - this.retryOptions = retryOptions; - } - /** - * Creates a StorageRetryPolicy object. - * - * @param nextPolicy - - * @param options - - */ - create(nextPolicy, options) { - return new StorageRetryPolicy(nextPolicy, options, this.retryOptions); } -} - -// Copyright (c) Microsoft Corporation. -/** - * Credential policy used to sign HTTP(S) requests before sending. This is an - * abstract class. - */ -class CredentialPolicy extends coreHttp.BaseRequestPolicy { /** - * Sends out request. + * To renew the lease. + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/lease-container + * and + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/lease-blob * - * @param request - + * @param options - Optional option to configure lease management operations. + * @returns Response data for renew lease operation. */ - sendRequest(request) { - return this._nextPolicy.sendRequest(this.signRequest(request)); + async renewLease(options = {}) { + var _a, _b, _c, _d, _e, _f; + const { span, updatedOptions } = createSpan("BlobLeaseClient-renewLease", options); + if (this._isContainer && + ((((_a = options.conditions) === null || _a === void 0 ? void 0 : _a.ifMatch) && ((_b = options.conditions) === null || _b === void 0 ? void 0 : _b.ifMatch) !== ETagNone) || + (((_c = options.conditions) === null || _c === void 0 ? void 0 : _c.ifNoneMatch) && ((_d = options.conditions) === null || _d === void 0 ? void 0 : _d.ifNoneMatch) !== ETagNone) || + ((_e = options.conditions) === null || _e === void 0 ? void 0 : _e.tagConditions))) { + throw new RangeError("The IfMatch, IfNoneMatch and tags access conditions are ignored by the service. Values other than undefined or their default values are not acceptable."); + } + try { + return await this._containerOrBlobOperation.renewLease(this._leaseId, Object.assign({ abortSignal: options.abortSignal, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_f = options.conditions) === null || _f === void 0 ? void 0 : _f.tagConditions }) }, convertTracingToRequestOptionsBase(updatedOptions))); + } + catch (e) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; + } + finally { + span.end(); + } } /** - * Child classes must implement this method with request signing. This method - * will be executed in {@link sendRequest}. + * To end the lease but ensure that another client cannot acquire a new lease + * until the current lease period has expired. + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/lease-container + * and + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/lease-blob * - * @param request - + * @param breakPeriod - Break period + * @param options - Optional options to configure lease management operations. + * @returns Response data for break lease operation. */ - signRequest(request) { - // Child classes must override this method with request signing. This method - // will be executed in sendRequest(). - return request; + async breakLease(breakPeriod, options = {}) { + var _a, _b, _c, _d, _e, _f; + const { span, updatedOptions } = createSpan("BlobLeaseClient-breakLease", options); + if (this._isContainer && + ((((_a = options.conditions) === null || _a === void 0 ? void 0 : _a.ifMatch) && ((_b = options.conditions) === null || _b === void 0 ? void 0 : _b.ifMatch) !== ETagNone) || + (((_c = options.conditions) === null || _c === void 0 ? void 0 : _c.ifNoneMatch) && ((_d = options.conditions) === null || _d === void 0 ? void 0 : _d.ifNoneMatch) !== ETagNone) || + ((_e = options.conditions) === null || _e === void 0 ? void 0 : _e.tagConditions))) { + throw new RangeError("The IfMatch, IfNoneMatch and tags access conditions are ignored by the service. Values other than undefined or their default values are not acceptable."); + } + try { + const operationOptions = Object.assign({ abortSignal: options.abortSignal, breakPeriod, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_f = options.conditions) === null || _f === void 0 ? void 0 : _f.tagConditions }) }, convertTracingToRequestOptionsBase(updatedOptions)); + return await this._containerOrBlobOperation.breakLease(operationOptions); + } + catch (e) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; + } + finally { + span.end(); + } } } // Copyright (c) Microsoft Corporation. /** - * AnonymousCredentialPolicy is used with HTTP(S) requests that read public resources - * or for use with Shared Access Signatures (SAS). + * ONLY AVAILABLE IN NODE.JS RUNTIME. + * + * A Node.js ReadableStream will internally retry when internal ReadableStream unexpected ends. */ -class AnonymousCredentialPolicy extends CredentialPolicy { +class RetriableReadableStream extends stream.Readable { /** - * Creates an instance of AnonymousCredentialPolicy. - * @param nextPolicy - + * Creates an instance of RetriableReadableStream. + * + * @param source - The current ReadableStream returned from getter + * @param getter - A method calling downloading request returning + * a new ReadableStream from specified offset + * @param offset - Offset position in original data source to read + * @param count - How much data in original data source to read * @param options - */ - // The base class has a protected constructor. Adding a public one to enable constructing of this class. - /* eslint-disable-next-line @typescript-eslint/no-useless-constructor*/ - constructor(nextPolicy, options) { - super(nextPolicy, options); + constructor(source, getter, offset, count, options = {}) { + super({ highWaterMark: options.highWaterMark }); + this.retries = 0; + this.sourceDataHandler = (data) => { + if (this.options.doInjectErrorOnce) { + this.options.doInjectErrorOnce = undefined; + this.source.pause(); + this.source.removeAllListeners("data"); + this.source.emit("end"); + return; + } + // console.log( + // `Offset: ${this.offset}, Received ${data.length} from internal stream` + // ); + this.offset += data.length; + if (this.onProgress) { + this.onProgress({ loadedBytes: this.offset - this.start }); + } + if (!this.push(data)) { + this.source.pause(); + } + }; + this.sourceErrorOrEndHandler = (err) => { + if (err && err.name === "AbortError") { + this.destroy(err); + return; + } + // console.log( + // `Source stream emits end or error, offset: ${ + // this.offset + // }, dest end : ${this.end}` + // ); + this.removeSourceEventHandlers(); + if (this.offset - 1 === this.end) { + this.push(null); + } + else if (this.offset <= this.end) { + // console.log( + // `retries: ${this.retries}, max retries: ${this.maxRetries}` + // ); + if (this.retries < this.maxRetryRequests) { + this.retries += 1; + this.getter(this.offset) + .then((newSource) => { + this.source = newSource; + this.setSourceEventHandlers(); + return; + }) + .catch((error) => { + this.destroy(error); + }); + } + else { + this.destroy(new Error(`Data corruption failure: received less data than required and reached maxRetires limitation. Received data offset: ${this.offset - 1}, data needed offset: ${this.end}, retries: ${this.retries}, max retries: ${this.maxRetryRequests}`)); + } + } + else { + this.destroy(new Error(`Data corruption failure: Received more data than original request, data needed offset is ${this.end}, received offset: ${this.offset - 1}`)); + } + }; + this.getter = getter; + this.source = source; + this.start = offset; + this.offset = offset; + this.end = offset + count - 1; + this.maxRetryRequests = + options.maxRetryRequests && options.maxRetryRequests >= 0 ? options.maxRetryRequests : 0; + this.onProgress = options.onProgress; + this.options = options; + this.setSourceEventHandlers(); + } + _read() { + this.source.resume(); + } + setSourceEventHandlers() { + this.source.on("data", this.sourceDataHandler); + this.source.on("end", this.sourceErrorOrEndHandler); + this.source.on("error", this.sourceErrorOrEndHandler); + } + removeSourceEventHandlers() { + this.source.removeListener("data", this.sourceDataHandler); + this.source.removeListener("end", this.sourceErrorOrEndHandler); + this.source.removeListener("error", this.sourceErrorOrEndHandler); + } + _destroy(error, callback) { + // remove listener from source and release source + this.removeSourceEventHandlers(); + this.source.destroy(); + callback(error === null ? undefined : error); } } // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. /** - * Credential is an abstract class for Azure Storage HTTP requests signing. This - * class will host an credentialPolicyCreator factory which generates CredentialPolicy. + * ONLY AVAILABLE IN NODE.JS RUNTIME. + * + * BlobDownloadResponse implements BlobDownloadResponseParsed interface, and in Node.js runtime it will + * automatically retry when internal read stream unexpected ends. (This kind of unexpected ends cannot + * trigger retries defined in pipeline retry policy.) + * + * The {@link readableStreamBody} stream will retry underlayer, you can just use it as a normal Node.js + * Readable stream. */ -class Credential { +class BlobDownloadResponse { /** - * Creates a RequestPolicy object. + * Creates an instance of BlobDownloadResponse. * - * @param _nextPolicy - - * @param _options - + * @param originalResponse - + * @param getter - + * @param offset - + * @param count - + * @param options - */ - create(_nextPolicy, _options) { - throw new Error("Method should be implemented in children classes."); + constructor(originalResponse, getter, offset, count, options = {}) { + this.originalResponse = originalResponse; + this.blobDownloadStream = new RetriableReadableStream(this.originalResponse.readableStreamBody, getter, offset, count, options); } -} - -// Copyright (c) Microsoft Corporation. -/** - * AnonymousCredential provides a credentialPolicyCreator member used to create - * AnonymousCredentialPolicy objects. AnonymousCredentialPolicy is used with - * HTTP(S) requests that read public resources or for use with Shared Access - * Signatures (SAS). - */ -class AnonymousCredential extends Credential { /** - * Creates an {@link AnonymousCredentialPolicy} object. + * Indicates that the service supports + * requests for partial file content. * - * @param nextPolicy - - * @param options - + * @readonly */ - create(nextPolicy, options) { - return new AnonymousCredentialPolicy(nextPolicy, options); + get acceptRanges() { + return this.originalResponse.acceptRanges; } -} - -// Copyright (c) Microsoft Corporation. -/** - * TelemetryPolicy is a policy used to tag user-agent header for every requests. - */ -class TelemetryPolicy extends coreHttp.BaseRequestPolicy { /** - * Creates an instance of TelemetryPolicy. - * @param nextPolicy - - * @param options - - * @param telemetry - + * Returns if it was previously specified + * for the file. + * + * @readonly */ - constructor(nextPolicy, options, telemetry) { - super(nextPolicy, options); - this.telemetry = telemetry; + get cacheControl() { + return this.originalResponse.cacheControl; } /** - * Sends out request. + * Returns the value that was specified + * for the 'x-ms-content-disposition' header and specifies how to process the + * response. * - * @param request - + * @readonly */ - async sendRequest(request) { - if (coreHttp.isNode) { - if (!request.headers) { - request.headers = new coreHttp.HttpHeaders(); - } - if (!request.headers.get(HeaderConstants.USER_AGENT)) { - request.headers.set(HeaderConstants.USER_AGENT, this.telemetry); - } - } - return this._nextPolicy.sendRequest(request); + get contentDisposition() { + return this.originalResponse.contentDisposition; } -} - -// Copyright (c) Microsoft Corporation. -/** - * TelemetryPolicyFactory is a factory class helping generating {@link TelemetryPolicy} objects. - */ -class TelemetryPolicyFactory { /** - * Creates an instance of TelemetryPolicyFactory. - * @param telemetry - + * Returns the value that was specified + * for the Content-Encoding request header. + * + * @readonly */ - constructor(telemetry) { - const userAgentInfo = []; - if (coreHttp.isNode) { - if (telemetry) { - const telemetryString = telemetry.userAgentPrefix || ""; - if (telemetryString.length > 0 && userAgentInfo.indexOf(telemetryString) === -1) { - userAgentInfo.push(telemetryString); - } - } - // e.g. azsdk-js-storageblob/10.0.0 - const libInfo = `azsdk-js-storageblob/${SDK_VERSION}`; - if (userAgentInfo.indexOf(libInfo) === -1) { - userAgentInfo.push(libInfo); - } - // e.g. (NODE-VERSION 4.9.1; Windows_NT 10.0.16299) - let runtimeInfo = `(NODE-VERSION ${process.version})`; - if (os__namespace) { - runtimeInfo = `(NODE-VERSION ${process.version}; ${os__namespace.type()} ${os__namespace.release()})`; - } - if (userAgentInfo.indexOf(runtimeInfo) === -1) { - userAgentInfo.push(runtimeInfo); - } - } - this.telemetryString = userAgentInfo.join(" "); + get contentEncoding() { + return this.originalResponse.contentEncoding; } /** - * Creates a TelemetryPolicy object. + * Returns the value that was specified + * for the Content-Language request header. * - * @param nextPolicy - - * @param options - + * @readonly */ - create(nextPolicy, options) { - return new TelemetryPolicy(nextPolicy, options, this.telemetryString); + get contentLanguage() { + return this.originalResponse.contentLanguage; } -} - -// Copyright (c) Microsoft Corporation. -const _defaultHttpClient = new coreHttp.DefaultHttpClient(); -function getCachedDefaultHttpClient() { - return _defaultHttpClient; -} - -// Copyright (c) Microsoft Corporation. -/** - * A set of constants used internally when processing requests. - */ -const Constants = { - DefaultScope: "/.default", /** - * Defines constants for use with HTTP headers. + * The current sequence number for a + * page blob. This header is not returned for block blobs or append blobs. + * + * @readonly */ - HeaderConstants: { - /** - * The Authorization header. - */ - AUTHORIZATION: "authorization", - }, -}; -// Default options for the cycler if none are provided -const DEFAULT_CYCLER_OPTIONS = { - forcedRefreshWindowInMs: 1000, - retryIntervalInMs: 3000, - refreshWindowInMs: 1000 * 60 * 2, // Start refreshing 2m before expiry -}; -/** - * Converts an an unreliable access token getter (which may resolve with null) - * into an AccessTokenGetter by retrying the unreliable getter in a regular - * interval. - * - * @param getAccessToken - a function that produces a promise of an access - * token that may fail by returning null - * @param retryIntervalInMs - the time (in milliseconds) to wait between retry - * attempts - * @param timeoutInMs - the timestamp after which the refresh attempt will fail, - * throwing an exception - * @returns - a promise that, if it resolves, will resolve with an access token - */ -async function beginRefresh(getAccessToken, retryIntervalInMs, timeoutInMs) { - // This wrapper handles exceptions gracefully as long as we haven't exceeded - // the timeout. - async function tryGetAccessToken() { - if (Date.now() < timeoutInMs) { - try { - return await getAccessToken(); - } - catch (_a) { - return null; - } - } - else { - const finalToken = await getAccessToken(); - // Timeout is up, so throw if it's still null - if (finalToken === null) { - throw new Error("Failed to refresh access token."); - } - return finalToken; - } - } - let token = await tryGetAccessToken(); - while (token === null) { - await coreHttp.delay(retryIntervalInMs); - token = await tryGetAccessToken(); + get blobSequenceNumber() { + return this.originalResponse.blobSequenceNumber; } - return token; -} -/** - * Creates a token cycler from a credential, scopes, and optional settings. - * - * A token cycler represents a way to reliably retrieve a valid access token - * from a TokenCredential. It will handle initializing the token, refreshing it - * when it nears expiration, and synchronizes refresh attempts to avoid - * concurrency hazards. - * - * @param credential - the underlying TokenCredential that provides the access - * token - * @param scopes - the scopes to request authorization for - * @param tokenCyclerOptions - optionally override default settings for the cycler - * - * @returns - a function that reliably produces a valid access token - */ -function createTokenCycler(credential, scopes, tokenCyclerOptions) { - let refreshWorker = null; - let token = null; - const options = Object.assign(Object.assign({}, DEFAULT_CYCLER_OPTIONS), tokenCyclerOptions); /** - * This little holder defines several predicates that we use to construct - * the rules of refreshing the token. + * The blob's type. Possible values include: + * 'BlockBlob', 'PageBlob', 'AppendBlob'. + * + * @readonly */ - const cycler = { - /** - * Produces true if a refresh job is currently in progress. - */ - get isRefreshing() { - return refreshWorker !== null; - }, - /** - * Produces true if the cycler SHOULD refresh (we are within the refresh - * window and not already refreshing) - */ - get shouldRefresh() { - var _a; - return (!cycler.isRefreshing && - ((_a = token === null || token === void 0 ? void 0 : token.expiresOnTimestamp) !== null && _a !== void 0 ? _a : 0) - options.refreshWindowInMs < Date.now()); - }, - /** - * Produces true if the cycler MUST refresh (null or nearly-expired - * token). - */ - get mustRefresh() { - return (token === null || token.expiresOnTimestamp - options.forcedRefreshWindowInMs < Date.now()); - }, - }; + get blobType() { + return this.originalResponse.blobType; + } /** - * Starts a refresh job or returns the existing job if one is already - * running. + * The number of bytes present in the + * response body. + * + * @readonly */ - function refresh(getTokenOptions) { - var _a; - if (!cycler.isRefreshing) { - // We bind `scopes` here to avoid passing it around a lot - const tryGetAccessToken = () => credential.getToken(scopes, getTokenOptions); - // Take advantage of promise chaining to insert an assignment to `token` - // before the refresh can be considered done. - refreshWorker = beginRefresh(tryGetAccessToken, options.retryIntervalInMs, - // If we don't have a token, then we should timeout immediately - (_a = token === null || token === void 0 ? void 0 : token.expiresOnTimestamp) !== null && _a !== void 0 ? _a : Date.now()) - .then((_token) => { - refreshWorker = null; - token = _token; - return token; - }) - .catch((reason) => { - // We also should reset the refresher if we enter a failed state. All - // existing awaiters will throw, but subsequent requests will start a - // new retry chain. - refreshWorker = null; - token = null; - throw reason; - }); - } - return refreshWorker; - } - return async (tokenOptions) => { - // - // Simple rules: - // - If we MUST refresh, then return the refresh task, blocking - // the pipeline until a token is available. - // - If we SHOULD refresh, then run refresh but don't return it - // (we can still use the cached token). - // - Return the token, since it's fine if we didn't return in - // step 1. - // - if (cycler.mustRefresh) - return refresh(tokenOptions); - if (cycler.shouldRefresh) { - refresh(tokenOptions); - } - return token; - }; -} -/** - * We will retrieve the challenge only if the response status code was 401, - * and if the response contained the header "WWW-Authenticate" with a non-empty value. - */ -function getChallenge(response) { - const challenge = response.headers.get("WWW-Authenticate"); - if (response.status === 401 && challenge) { - return challenge; + get contentLength() { + return this.originalResponse.contentLength; } - return; -} -/** - * Converts: `Bearer a="b" c="d"`. - * Into: `[ { a: 'b', c: 'd' }]`. - * - * @internal - */ -function parseChallenge(challenge) { - const bearerChallenge = challenge.slice("Bearer ".length); - const challengeParts = `${bearerChallenge.trim()} `.split(" ").filter((x) => x); - const keyValuePairs = challengeParts.map((keyValue) => (([key, value]) => ({ [key]: value }))(keyValue.trim().split("="))); - // Key-value pairs to plain object: - return keyValuePairs.reduce((a, b) => (Object.assign(Object.assign({}, a), b)), {}); -} -// #endregion -/** - * Creates a new factory for a RequestPolicy that applies a bearer token to - * the requests' `Authorization` headers. - * - * @param credential - The TokenCredential implementation that can supply the bearer token. - * @param scopes - The scopes for which the bearer token applies. - */ -function storageBearerTokenChallengeAuthenticationPolicy(credential, scopes) { - // This simple function encapsulates the entire process of reliably retrieving the token - let getToken = createTokenCycler(credential, scopes); - class StorageBearerTokenChallengeAuthenticationPolicy extends coreHttp.BaseRequestPolicy { - constructor(nextPolicy, options) { - super(nextPolicy, options); - } - async sendRequest(webResource) { - if (!webResource.url.toLowerCase().startsWith("https://")) { - throw new Error("Bearer token authentication is not permitted for non-TLS protected (non-https) URLs."); - } - const getTokenInternal = getToken; - const token = (await getTokenInternal({ - abortSignal: webResource.abortSignal, - tracingOptions: { - tracingContext: webResource.tracingContext, - }, - })).token; - webResource.headers.set(Constants.HeaderConstants.AUTHORIZATION, `Bearer ${token}`); - const response = await this._nextPolicy.sendRequest(webResource); - if ((response === null || response === void 0 ? void 0 : response.status) === 401) { - const challenge = getChallenge(response); - if (challenge) { - const challengeInfo = parseChallenge(challenge); - const challengeScopes = challengeInfo.resource_id + Constants.DefaultScope; - const parsedAuthUri = coreHttp.URLBuilder.parse(challengeInfo.authorization_uri); - const pathSegments = parsedAuthUri.getPath().split("/"); - const tenantId = pathSegments[1]; - const getTokenForChallenge = createTokenCycler(credential, challengeScopes); - const tokenForChallenge = (await getTokenForChallenge({ - abortSignal: webResource.abortSignal, - tracingOptions: { - tracingContext: webResource.tracingContext, - }, - tenantId: tenantId, - })).token; - getToken = getTokenForChallenge; - webResource.headers.set(Constants.HeaderConstants.AUTHORIZATION, `Bearer ${tokenForChallenge}`); - return this._nextPolicy.sendRequest(webResource); - } - } - return response; - } + /** + * If the file has an MD5 hash and the + * request is to read the full file, this response header is returned so that + * the client can check for message content integrity. If the request is to + * read a specified range and the 'x-ms-range-get-content-md5' is set to + * true, then the request returns an MD5 hash for the range, as long as the + * range size is less than or equal to 4 MB. If neither of these sets of + * conditions is true, then no value is returned for the 'Content-MD5' + * header. + * + * @readonly + */ + get contentMD5() { + return this.originalResponse.contentMD5; } - return { - create: (nextPolicy, options) => { - return new StorageBearerTokenChallengeAuthenticationPolicy(nextPolicy, options); - }, - }; -} - -// Copyright (c) Microsoft Corporation. -/** - * A helper to decide if a given argument satisfies the Pipeline contract - * @param pipeline - An argument that may be a Pipeline - * @returns true when the argument satisfies the Pipeline contract - */ -function isPipelineLike(pipeline) { - if (!pipeline || typeof pipeline !== "object") { - return false; + /** + * Indicates the range of bytes returned if + * the client requested a subset of the file by setting the Range request + * header. + * + * @readonly + */ + get contentRange() { + return this.originalResponse.contentRange; } - const castPipeline = pipeline; - return (Array.isArray(castPipeline.factories) && - typeof castPipeline.options === "object" && - typeof castPipeline.toServiceClientOptions === "function"); -} -/** - * A Pipeline class containing HTTP request policies. - * You can create a default Pipeline by calling {@link newPipeline}. - * Or you can create a Pipeline with your own policies by the constructor of Pipeline. - * - * Refer to {@link newPipeline} and provided policies before implementing your - * customized Pipeline. - */ -class Pipeline { /** - * Creates an instance of Pipeline. Customize HTTPClient by implementing IHttpClient interface. + * The content type specified for the file. + * The default content type is 'application/octet-stream' * - * @param factories - - * @param options - + * @readonly */ - constructor(factories, options = {}) { - this.factories = factories; - // when options.httpClient is not specified, passing in a DefaultHttpClient instance to - // avoid each client creating its own http client. - this.options = Object.assign(Object.assign({}, options), { httpClient: options.httpClient || getCachedDefaultHttpClient() }); + get contentType() { + return this.originalResponse.contentType; } /** - * Transfer Pipeline object to ServiceClientOptions object which is required by - * ServiceClient constructor. + * Conclusion time of the last attempted + * Copy File operation where this file was the destination file. This value + * can specify the time of a completed, aborted, or failed copy attempt. * - * @returns The ServiceClientOptions object from this Pipeline. + * @readonly */ - toServiceClientOptions() { - return { - httpClient: this.options.httpClient, - requestPolicyFactories: this.factories, - }; + get copyCompletedOn() { + return this.originalResponse.copyCompletedOn; } -} -/** - * Creates a new Pipeline object with Credential provided. - * - * @param credential - Such as AnonymousCredential, StorageSharedKeyCredential or any credential from the `@azure/identity` package to authenticate requests to the service. You can also provide an object that implements the TokenCredential interface. If not specified, AnonymousCredential is used. - * @param pipelineOptions - Optional. Options. - * @returns A new Pipeline object. - */ -function newPipeline(credential, pipelineOptions = {}) { - var _a; - if (credential === undefined) { - credential = new AnonymousCredential(); + /** + * String identifier for the last attempted Copy + * File operation where this file was the destination file. + * + * @readonly + */ + get copyId() { + return this.originalResponse.copyId; } - // Order is important. Closer to the API at the top & closer to the network at the bottom. - // The credential's policy factory must appear close to the wire so it can sign any - // changes made by other factories (like UniqueRequestIDPolicyFactory) - const telemetryPolicy = new TelemetryPolicyFactory(pipelineOptions.userAgentOptions); - const factories = [ - coreHttp.tracingPolicy({ userAgent: telemetryPolicy.telemetryString }), - coreHttp.keepAlivePolicy(pipelineOptions.keepAliveOptions), - telemetryPolicy, - coreHttp.generateClientRequestIdPolicy(), - new StorageBrowserPolicyFactory(), - new StorageRetryPolicyFactory(pipelineOptions.retryOptions), - // Default deserializationPolicy is provided by protocol layer - // Use customized XML char key of "#" so we could deserialize metadata - // with "_" key - coreHttp.deserializationPolicy(undefined, { xmlCharKey: "#" }), - coreHttp.logPolicy({ - logger: logger.info, - allowedHeaderNames: StorageBlobLoggingAllowedHeaderNames, - allowedQueryParameters: StorageBlobLoggingAllowedQueryParameters, - }), - ]; - if (coreHttp.isNode) { - // policies only available in Node.js runtime, not in browsers - factories.push(coreHttp.proxyPolicy(pipelineOptions.proxyOptions)); - factories.push(coreHttp.disableResponseDecompressionPolicy()); + /** + * Contains the number of bytes copied and + * the total bytes in the source in the last attempted Copy File operation + * where this file was the destination file. Can show between 0 and + * Content-Length bytes copied. + * + * @readonly + */ + get copyProgress() { + return this.originalResponse.copyProgress; } - factories.push(coreHttp.isTokenCredential(credential) - ? attachCredential(storageBearerTokenChallengeAuthenticationPolicy(credential, (_a = pipelineOptions.audience) !== null && _a !== void 0 ? _a : StorageOAuthScopes), credential) - : credential); - return new Pipeline(factories, pipelineOptions); -} - -// Copyright (c) Microsoft Corporation. -/** - * StorageSharedKeyCredentialPolicy is a policy used to sign HTTP request with a shared key. - */ -class StorageSharedKeyCredentialPolicy extends CredentialPolicy { /** - * Creates an instance of StorageSharedKeyCredentialPolicy. - * @param nextPolicy - - * @param options - - * @param factory - + * URL up to 2KB in length that specifies the + * source file used in the last attempted Copy File operation where this file + * was the destination file. + * + * @readonly */ - constructor(nextPolicy, options, factory) { - super(nextPolicy, options); - this.factory = factory; + get copySource() { + return this.originalResponse.copySource; } /** - * Signs request. + * State of the copy operation + * identified by 'x-ms-copy-id'. Possible values include: 'pending', + * 'success', 'aborted', 'failed' * - * @param request - + * @readonly */ - signRequest(request) { - request.headers.set(HeaderConstants.X_MS_DATE, new Date().toUTCString()); - if (request.body && - (typeof request.body === "string" || request.body !== undefined) && - request.body.length > 0) { - request.headers.set(HeaderConstants.CONTENT_LENGTH, Buffer.byteLength(request.body)); - } - const stringToSign = [ - request.method.toUpperCase(), - this.getHeaderValueToSign(request, HeaderConstants.CONTENT_LANGUAGE), - this.getHeaderValueToSign(request, HeaderConstants.CONTENT_ENCODING), - this.getHeaderValueToSign(request, HeaderConstants.CONTENT_LENGTH), - this.getHeaderValueToSign(request, HeaderConstants.CONTENT_MD5), - this.getHeaderValueToSign(request, HeaderConstants.CONTENT_TYPE), - this.getHeaderValueToSign(request, HeaderConstants.DATE), - this.getHeaderValueToSign(request, HeaderConstants.IF_MODIFIED_SINCE), - this.getHeaderValueToSign(request, HeaderConstants.IF_MATCH), - this.getHeaderValueToSign(request, HeaderConstants.IF_NONE_MATCH), - this.getHeaderValueToSign(request, HeaderConstants.IF_UNMODIFIED_SINCE), - this.getHeaderValueToSign(request, HeaderConstants.RANGE), - ].join("\n") + - "\n" + - this.getCanonicalizedHeadersString(request) + - this.getCanonicalizedResourceString(request); - const signature = this.factory.computeHMACSHA256(stringToSign); - request.headers.set(HeaderConstants.AUTHORIZATION, `SharedKey ${this.factory.accountName}:${signature}`); - // console.log(`[URL]:${request.url}`); - // console.log(`[HEADERS]:${request.headers.toString()}`); - // console.log(`[STRING TO SIGN]:${JSON.stringify(stringToSign)}`); - // console.log(`[KEY]: ${request.headers.get(HeaderConstants.AUTHORIZATION)}`); - return request; + get copyStatus() { + return this.originalResponse.copyStatus; } /** - * Retrieve header value according to shared key sign rules. - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/authenticate-with-shared-key + * Only appears when + * x-ms-copy-status is failed or pending. Describes cause of fatal or + * non-fatal copy operation failure. * - * @param request - - * @param headerName - + * @readonly */ - getHeaderValueToSign(request, headerName) { - const value = request.headers.get(headerName); - if (!value) { - return ""; - } - // When using version 2015-02-21 or later, if Content-Length is zero, then - // set the Content-Length part of the StringToSign to an empty string. - // https://docs.microsoft.com/en-us/rest/api/storageservices/authenticate-with-shared-key - if (headerName === HeaderConstants.CONTENT_LENGTH && value === "0") { - return ""; - } - return value; + get copyStatusDescription() { + return this.originalResponse.copyStatusDescription; } /** - * To construct the CanonicalizedHeaders portion of the signature string, follow these steps: - * 1. Retrieve all headers for the resource that begin with x-ms-, including the x-ms-date header. - * 2. Convert each HTTP header name to lowercase. - * 3. Sort the headers lexicographically by header name, in ascending order. - * Each header may appear only once in the string. - * 4. Replace any linear whitespace in the header value with a single space. - * 5. Trim any whitespace around the colon in the header. - * 6. Finally, append a new-line character to each canonicalized header in the resulting list. - * Construct the CanonicalizedHeaders string by concatenating all headers in this list into a single string. + * When a blob is leased, + * specifies whether the lease is of infinite or fixed duration. Possible + * values include: 'infinite', 'fixed'. * - * @param request - + * @readonly */ - getCanonicalizedHeadersString(request) { - let headersArray = request.headers.headersArray().filter((value) => { - return value.name.toLowerCase().startsWith(HeaderConstants.PREFIX_FOR_STORAGE); - }); - headersArray.sort((a, b) => { - return a.name.toLowerCase().localeCompare(b.name.toLowerCase()); - }); - // Remove duplicate headers - headersArray = headersArray.filter((value, index, array) => { - if (index > 0 && value.name.toLowerCase() === array[index - 1].name.toLowerCase()) { - return false; - } - return true; - }); - let canonicalizedHeadersStringToSign = ""; - headersArray.forEach((header) => { - canonicalizedHeadersStringToSign += `${header.name - .toLowerCase() - .trimRight()}:${header.value.trimLeft()}\n`; - }); - return canonicalizedHeadersStringToSign; + get leaseDuration() { + return this.originalResponse.leaseDuration; } /** - * Retrieves the webResource canonicalized resource string. + * Lease state of the blob. Possible + * values include: 'available', 'leased', 'expired', 'breaking', 'broken'. * - * @param request - + * @readonly */ - getCanonicalizedResourceString(request) { - const path = getURLPath(request.url) || "/"; - let canonicalizedResourceString = ""; - canonicalizedResourceString += `/${this.factory.accountName}${path}`; - const queries = getURLQueries(request.url); - const lowercaseQueries = {}; - if (queries) { - const queryKeys = []; - for (const key in queries) { - if (Object.prototype.hasOwnProperty.call(queries, key)) { - const lowercaseKey = key.toLowerCase(); - lowercaseQueries[lowercaseKey] = queries[key]; - queryKeys.push(lowercaseKey); - } - } - queryKeys.sort(); - for (const key of queryKeys) { - canonicalizedResourceString += `\n${key}:${decodeURIComponent(lowercaseQueries[key])}`; - } - } - return canonicalizedResourceString; + get leaseState() { + return this.originalResponse.leaseState; } -} - -// Copyright (c) Microsoft Corporation. -/** - * ONLY AVAILABLE IN NODE.JS RUNTIME. - * - * StorageSharedKeyCredential for account key authorization of Azure Storage service. - */ -class StorageSharedKeyCredential extends Credential { /** - * Creates an instance of StorageSharedKeyCredential. - * @param accountName - - * @param accountKey - + * The current lease status of the + * blob. Possible values include: 'locked', 'unlocked'. + * + * @readonly */ - constructor(accountName, accountKey) { - super(); - this.accountName = accountName; - this.accountKey = Buffer.from(accountKey, "base64"); + get leaseStatus() { + return this.originalResponse.leaseStatus; } /** - * Creates a StorageSharedKeyCredentialPolicy object. + * A UTC date/time value generated by the service that + * indicates the time at which the response was initiated. * - * @param nextPolicy - - * @param options - + * @readonly */ - create(nextPolicy, options) { - return new StorageSharedKeyCredentialPolicy(nextPolicy, options, this); + get date() { + return this.originalResponse.date; } /** - * Generates a hash signature for an HTTP request or for a SAS. + * The number of committed blocks + * present in the blob. This header is returned only for append blobs. * - * @param stringToSign - + * @readonly */ - computeHMACSHA256(stringToSign) { - return crypto.createHmac("sha256", this.accountKey).update(stringToSign, "utf8").digest("base64"); + get blobCommittedBlockCount() { + return this.originalResponse.blobCommittedBlockCount; } -} - -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ -const packageName = "azure-storage-blob"; -const packageVersion = "12.12.0"; -class StorageClientContext extends coreHttp__namespace.ServiceClient { /** - * Initializes a new instance of the StorageClientContext class. - * @param url The URL of the service account, container, or blob that is the target of the desired - * operation. - * @param options The parameter options + * The ETag contains a value that you can use to + * perform operations conditionally, in quotes. + * + * @readonly */ - constructor(url, options) { - if (url === undefined) { - throw new Error("'url' cannot be null"); - } - // Initializing default values for options - if (!options) { - options = {}; - } - if (!options.userAgent) { - const defaultUserAgent = coreHttp__namespace.getDefaultUserAgentValue(); - options.userAgent = `${packageName}/${packageVersion} ${defaultUserAgent}`; - } - super(undefined, options); - this.requestContentType = "application/json; charset=utf-8"; - this.baseUri = options.endpoint || "{url}"; - // Parameter assignments - this.url = url; - // Assigning values to Constant parameters - this.version = options.version || "2021-10-04"; + get etag() { + return this.originalResponse.etag; } -} - -// Copyright (c) Microsoft Corporation. -/** - * A StorageClient represents a based URL class for {@link BlobServiceClient}, {@link ContainerClient} - * and etc. - */ -class StorageClient { /** - * Creates an instance of StorageClient. - * @param url - url to resource - * @param pipeline - request policy pipeline. + * The number of tags associated with the blob + * + * @readonly */ - constructor(url, pipeline) { - // URL should be encoded and only once, protocol layer shouldn't encode URL again - this.url = escapeURLPath(url); - this.accountName = getAccountNameFromUrl(url); - this.pipeline = pipeline; - this.storageClientContext = new StorageClientContext(this.url, pipeline.toServiceClientOptions()); - this.isHttps = iEqual(getURLScheme(this.url) || "", "https"); - this.credential = new AnonymousCredential(); - for (const factory of this.pipeline.factories) { - if ((coreHttp.isNode && factory instanceof StorageSharedKeyCredential) || - factory instanceof AnonymousCredential) { - this.credential = factory; - } - else if (coreHttp.isTokenCredential(factory.credential)) { - // Only works if the factory has been attached a "credential" property. - // We do that in newPipeline() when using TokenCredential. - this.credential = factory.credential; - } - } - // Override protocol layer's default content-type - const storageClientContext = this.storageClientContext; - storageClientContext.requestContentType = undefined; + get tagCount() { + return this.originalResponse.tagCount; } -} - -// Copyright (c) Microsoft Corporation. -/** - * Creates a span using the global tracer. - * @internal - */ -const createSpan = coreTracing.createSpanFunction({ - packagePrefix: "Azure.Storage.Blob", - namespace: "Microsoft.Storage", -}); -/** - * @internal - * - * Adapt the tracing options from OperationOptions to what they need to be for - * RequestOptionsBase (when we update to later OpenTelemetry versions this is now - * two separate fields, not just one). - */ -function convertTracingToRequestOptionsBase(options) { - var _a, _b; - return { - // By passing spanOptions if they exist at runtime, we're backwards compatible with @azure/core-tracing@preview.13 and earlier. - spanOptions: (_a = options === null || options === void 0 ? void 0 : options.tracingOptions) === null || _a === void 0 ? void 0 : _a.spanOptions, - tracingContext: (_b = options === null || options === void 0 ? void 0 : options.tracingOptions) === null || _b === void 0 ? void 0 : _b.tracingContext, - }; -} - -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. -/** - * ONLY AVAILABLE IN NODE.JS RUNTIME. - * - * This is a helper class to construct a string representing the permissions granted by a ServiceSAS to a blob. Setting - * a value to true means that any SAS which uses these permissions will grant permissions for that operation. Once all - * the values are set, this should be serialized with toString and set as the permissions field on a - * {@link BlobSASSignatureValues} object. It is possible to construct the permissions string without this class, but - * the order of the permissions is particular and this class guarantees correctness. - */ -class BlobSASPermissions { - constructor() { - /** - * Specifies Read access granted. - */ - this.read = false; - /** - * Specifies Add access granted. - */ - this.add = false; - /** - * Specifies Create access granted. - */ - this.create = false; - /** - * Specifies Write access granted. - */ - this.write = false; - /** - * Specifies Delete access granted. - */ - this.delete = false; - /** - * Specifies Delete version access granted. - */ - this.deleteVersion = false; - /** - * Specfies Tag access granted. - */ - this.tag = false; - /** - * Specifies Move access granted. - */ - this.move = false; - /** - * Specifies Execute access granted. - */ - this.execute = false; - /** - * Specifies SetImmutabilityPolicy access granted. - */ - this.setImmutabilityPolicy = false; - /** - * Specifies that Permanent Delete is permitted. - */ - this.permanentDelete = false; + /** + * The error code. + * + * @readonly + */ + get errorCode() { + return this.originalResponse.errorCode; + } + /** + * The value of this header is set to + * true if the file data and application metadata are completely encrypted + * using the specified algorithm. Otherwise, the value is set to false (when + * the file is unencrypted, or if only parts of the file/application metadata + * are encrypted). + * + * @readonly + */ + get isServerEncrypted() { + return this.originalResponse.isServerEncrypted; + } + /** + * If the blob has a MD5 hash, and if + * request contains range header (Range or x-ms-range), this response header + * is returned with the value of the whole blob's MD5 value. This value may + * or may not be equal to the value returned in Content-MD5 header, with the + * latter calculated from the requested range. + * + * @readonly + */ + get blobContentMD5() { + return this.originalResponse.blobContentMD5; } /** - * Creates a {@link BlobSASPermissions} from the specified permissions string. This method will throw an - * Error if it encounters a character that does not correspond to a valid permission. + * Returns the date and time the file was last + * modified. Any operation that modifies the file or its properties updates + * the last modified time. * - * @param permissions - + * @readonly */ - static parse(permissions) { - const blobSASPermissions = new BlobSASPermissions(); - for (const char of permissions) { - switch (char) { - case "r": - blobSASPermissions.read = true; - break; - case "a": - blobSASPermissions.add = true; - break; - case "c": - blobSASPermissions.create = true; - break; - case "w": - blobSASPermissions.write = true; - break; - case "d": - blobSASPermissions.delete = true; - break; - case "x": - blobSASPermissions.deleteVersion = true; - break; - case "t": - blobSASPermissions.tag = true; - break; - case "m": - blobSASPermissions.move = true; - break; - case "e": - blobSASPermissions.execute = true; - break; - case "i": - blobSASPermissions.setImmutabilityPolicy = true; - break; - case "y": - blobSASPermissions.permanentDelete = true; - break; - default: - throw new RangeError(`Invalid permission: ${char}`); - } - } - return blobSASPermissions; + get lastModified() { + return this.originalResponse.lastModified; } /** - * Creates a {@link BlobSASPermissions} from a raw object which contains same keys as it - * and boolean values for them. + * Returns the UTC date and time generated by the service that indicates the time at which the blob was + * last read or written to. * - * @param permissionLike - + * @readonly */ - static from(permissionLike) { - const blobSASPermissions = new BlobSASPermissions(); - if (permissionLike.read) { - blobSASPermissions.read = true; - } - if (permissionLike.add) { - blobSASPermissions.add = true; - } - if (permissionLike.create) { - blobSASPermissions.create = true; - } - if (permissionLike.write) { - blobSASPermissions.write = true; - } - if (permissionLike.delete) { - blobSASPermissions.delete = true; - } - if (permissionLike.deleteVersion) { - blobSASPermissions.deleteVersion = true; - } - if (permissionLike.tag) { - blobSASPermissions.tag = true; - } - if (permissionLike.move) { - blobSASPermissions.move = true; - } - if (permissionLike.execute) { - blobSASPermissions.execute = true; - } - if (permissionLike.setImmutabilityPolicy) { - blobSASPermissions.setImmutabilityPolicy = true; - } - if (permissionLike.permanentDelete) { - blobSASPermissions.permanentDelete = true; - } - return blobSASPermissions; + get lastAccessed() { + return this.originalResponse.lastAccessed; } /** - * Converts the given permissions to a string. Using this method will guarantee the permissions are in an - * order accepted by the service. + * A name-value pair + * to associate with a file storage object. * - * @returns A string which represents the BlobSASPermissions + * @readonly */ - toString() { - const permissions = []; - if (this.read) { - permissions.push("r"); - } - if (this.add) { - permissions.push("a"); - } - if (this.create) { - permissions.push("c"); - } - if (this.write) { - permissions.push("w"); - } - if (this.delete) { - permissions.push("d"); - } - if (this.deleteVersion) { - permissions.push("x"); - } - if (this.tag) { - permissions.push("t"); - } - if (this.move) { - permissions.push("m"); - } - if (this.execute) { - permissions.push("e"); - } - if (this.setImmutabilityPolicy) { - permissions.push("i"); - } - if (this.permanentDelete) { - permissions.push("y"); - } - return permissions.join(""); - } -} - -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. -/** - * This is a helper class to construct a string representing the permissions granted by a ServiceSAS to a container. - * Setting a value to true means that any SAS which uses these permissions will grant permissions for that operation. - * Once all the values are set, this should be serialized with toString and set as the permissions field on a - * {@link BlobSASSignatureValues} object. It is possible to construct the permissions string without this class, but - * the order of the permissions is particular and this class guarantees correctness. - */ -class ContainerSASPermissions { - constructor() { - /** - * Specifies Read access granted. - */ - this.read = false; - /** - * Specifies Add access granted. - */ - this.add = false; - /** - * Specifies Create access granted. - */ - this.create = false; - /** - * Specifies Write access granted. - */ - this.write = false; - /** - * Specifies Delete access granted. - */ - this.delete = false; - /** - * Specifies Delete version access granted. - */ - this.deleteVersion = false; - /** - * Specifies List access granted. - */ - this.list = false; - /** - * Specfies Tag access granted. - */ - this.tag = false; - /** - * Specifies Move access granted. - */ - this.move = false; - /** - * Specifies Execute access granted. - */ - this.execute = false; - /** - * Specifies SetImmutabilityPolicy access granted. - */ - this.setImmutabilityPolicy = false; - /** - * Specifies that Permanent Delete is permitted. - */ - this.permanentDelete = false; - /** - * Specifies that Filter Blobs by Tags is permitted. - */ - this.filterByTags = false; + get metadata() { + return this.originalResponse.metadata; } /** - * Creates an {@link ContainerSASPermissions} from the specified permissions string. This method will throw an - * Error if it encounters a character that does not correspond to a valid permission. + * This header uniquely identifies the request + * that was made and can be used for troubleshooting the request. * - * @param permissions - + * @readonly */ - static parse(permissions) { - const containerSASPermissions = new ContainerSASPermissions(); - for (const char of permissions) { - switch (char) { - case "r": - containerSASPermissions.read = true; - break; - case "a": - containerSASPermissions.add = true; - break; - case "c": - containerSASPermissions.create = true; - break; - case "w": - containerSASPermissions.write = true; - break; - case "d": - containerSASPermissions.delete = true; - break; - case "l": - containerSASPermissions.list = true; - break; - case "t": - containerSASPermissions.tag = true; - break; - case "x": - containerSASPermissions.deleteVersion = true; - break; - case "m": - containerSASPermissions.move = true; - break; - case "e": - containerSASPermissions.execute = true; - break; - case "i": - containerSASPermissions.setImmutabilityPolicy = true; - break; - case "y": - containerSASPermissions.permanentDelete = true; - break; - case "f": - containerSASPermissions.filterByTags = true; - break; - default: - throw new RangeError(`Invalid permission ${char}`); - } - } - return containerSASPermissions; + get requestId() { + return this.originalResponse.requestId; } /** - * Creates a {@link ContainerSASPermissions} from a raw object which contains same keys as it - * and boolean values for them. + * If a client request id header is sent in the request, this header will be present in the + * response with the same value. * - * @param permissionLike - + * @readonly */ - static from(permissionLike) { - const containerSASPermissions = new ContainerSASPermissions(); - if (permissionLike.read) { - containerSASPermissions.read = true; - } - if (permissionLike.add) { - containerSASPermissions.add = true; - } - if (permissionLike.create) { - containerSASPermissions.create = true; - } - if (permissionLike.write) { - containerSASPermissions.write = true; - } - if (permissionLike.delete) { - containerSASPermissions.delete = true; - } - if (permissionLike.list) { - containerSASPermissions.list = true; - } - if (permissionLike.deleteVersion) { - containerSASPermissions.deleteVersion = true; - } - if (permissionLike.tag) { - containerSASPermissions.tag = true; - } - if (permissionLike.move) { - containerSASPermissions.move = true; - } - if (permissionLike.execute) { - containerSASPermissions.execute = true; - } - if (permissionLike.setImmutabilityPolicy) { - containerSASPermissions.setImmutabilityPolicy = true; - } - if (permissionLike.permanentDelete) { - containerSASPermissions.permanentDelete = true; - } - if (permissionLike.filterByTags) { - containerSASPermissions.filterByTags = true; - } - return containerSASPermissions; + get clientRequestId() { + return this.originalResponse.clientRequestId; } /** - * Converts the given permissions to a string. Using this method will guarantee the permissions are in an - * order accepted by the service. + * Indicates the version of the Blob service used + * to execute the request. * - * The order of the characters should be as specified here to ensure correctness. - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas + * @readonly + */ + get version() { + return this.originalResponse.version; + } + /** + * Indicates the versionId of the downloaded blob version. * + * @readonly */ - toString() { - const permissions = []; - if (this.read) { - permissions.push("r"); - } - if (this.add) { - permissions.push("a"); - } - if (this.create) { - permissions.push("c"); - } - if (this.write) { - permissions.push("w"); - } - if (this.delete) { - permissions.push("d"); - } - if (this.deleteVersion) { - permissions.push("x"); - } - if (this.list) { - permissions.push("l"); - } - if (this.tag) { - permissions.push("t"); - } - if (this.move) { - permissions.push("m"); - } - if (this.execute) { - permissions.push("e"); - } - if (this.setImmutabilityPolicy) { - permissions.push("i"); - } - if (this.permanentDelete) { - permissions.push("y"); - } - if (this.filterByTags) { - permissions.push("f"); - } - return permissions.join(""); + get versionId() { + return this.originalResponse.versionId; } -} - -// Copyright (c) Microsoft Corporation. -/** - * ONLY AVAILABLE IN NODE.JS RUNTIME. - * - * UserDelegationKeyCredential is only used for generation of user delegation SAS. - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/create-user-delegation-sas - */ -class UserDelegationKeyCredential { /** - * Creates an instance of UserDelegationKeyCredential. - * @param accountName - - * @param userDelegationKey - + * Indicates whether version of this blob is a current version. + * + * @readonly */ - constructor(accountName, userDelegationKey) { - this.accountName = accountName; - this.userDelegationKey = userDelegationKey; - this.key = Buffer.from(userDelegationKey.value, "base64"); + get isCurrentVersion() { + return this.originalResponse.isCurrentVersion; } /** - * Generates a hash signature for an HTTP request or for a SAS. + * The SHA-256 hash of the encryption key used to encrypt the blob. This value is only returned + * when the blob was encrypted with a customer-provided key. * - * @param stringToSign - + * @readonly */ - computeHMACSHA256(stringToSign) { - // console.log(`stringToSign: ${JSON.stringify(stringToSign)}`); - return crypto.createHmac("sha256", this.key).update(stringToSign, "utf8").digest("base64"); + get encryptionKeySha256() { + return this.originalResponse.encryptionKeySha256; } -} - -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. -/** - * Generate SasIPRange format string. For example: - * - * "8.8.8.8" or "1.1.1.1-255.255.255.255" - * - * @param ipRange - - */ -function ipRangeToString(ipRange) { - return ipRange.end ? `${ipRange.start}-${ipRange.end}` : ipRange.start; -} - -// Copyright (c) Microsoft Corporation. -/** - * Protocols for generated SAS. - */ -exports.SASProtocol = void 0; -(function (SASProtocol) { /** - * Protocol that allows HTTPS only + * If the request is to read a specified range and the x-ms-range-get-content-crc64 is set to + * true, then the request returns a crc64 for the range, as long as the range size is less than + * or equal to 4 MB. If both x-ms-range-get-content-crc64 & x-ms-range-get-content-md5 is + * specified in the same request, it will fail with 400(Bad Request) */ - SASProtocol["Https"] = "https"; + get contentCrc64() { + return this.originalResponse.contentCrc64; + } /** - * Protocol that allows both HTTPS and HTTP + * Object Replication Policy Id of the destination blob. + * + * @readonly */ - SASProtocol["HttpsAndHttp"] = "https,http"; -})(exports.SASProtocol || (exports.SASProtocol = {})); -/** - * Represents the components that make up an Azure Storage SAS' query parameters. This type is not constructed directly - * by the user; it is only generated by the {@link AccountSASSignatureValues} and {@link BlobSASSignatureValues} - * types. Once generated, it can be encoded into a {@link String} and appended to a URL directly (though caution should - * be taken here in case there are existing query parameters, which might affect the appropriate means of appending - * these query parameters). - * - * NOTE: Instances of this class are immutable. - */ -class SASQueryParameters { - constructor(version, signature, permissionsOrOptions, services, resourceTypes, protocol, startsOn, expiresOn, ipRange, identifier, resource, cacheControl, contentDisposition, contentEncoding, contentLanguage, contentType, userDelegationKey, preauthorizedAgentObjectId, correlationId, encryptionScope) { - this.version = version; - this.signature = signature; - if (permissionsOrOptions !== undefined && typeof permissionsOrOptions !== "string") { - // SASQueryParametersOptions - this.permissions = permissionsOrOptions.permissions; - this.services = permissionsOrOptions.services; - this.resourceTypes = permissionsOrOptions.resourceTypes; - this.protocol = permissionsOrOptions.protocol; - this.startsOn = permissionsOrOptions.startsOn; - this.expiresOn = permissionsOrOptions.expiresOn; - this.ipRangeInner = permissionsOrOptions.ipRange; - this.identifier = permissionsOrOptions.identifier; - this.encryptionScope = permissionsOrOptions.encryptionScope; - this.resource = permissionsOrOptions.resource; - this.cacheControl = permissionsOrOptions.cacheControl; - this.contentDisposition = permissionsOrOptions.contentDisposition; - this.contentEncoding = permissionsOrOptions.contentEncoding; - this.contentLanguage = permissionsOrOptions.contentLanguage; - this.contentType = permissionsOrOptions.contentType; - if (permissionsOrOptions.userDelegationKey) { - this.signedOid = permissionsOrOptions.userDelegationKey.signedObjectId; - this.signedTenantId = permissionsOrOptions.userDelegationKey.signedTenantId; - this.signedStartsOn = permissionsOrOptions.userDelegationKey.signedStartsOn; - this.signedExpiresOn = permissionsOrOptions.userDelegationKey.signedExpiresOn; - this.signedService = permissionsOrOptions.userDelegationKey.signedService; - this.signedVersion = permissionsOrOptions.userDelegationKey.signedVersion; - this.preauthorizedAgentObjectId = permissionsOrOptions.preauthorizedAgentObjectId; - this.correlationId = permissionsOrOptions.correlationId; - } - } - else { - this.services = services; - this.resourceTypes = resourceTypes; - this.expiresOn = expiresOn; - this.permissions = permissionsOrOptions; - this.protocol = protocol; - this.startsOn = startsOn; - this.ipRangeInner = ipRange; - this.encryptionScope = encryptionScope; - this.identifier = identifier; - this.resource = resource; - this.cacheControl = cacheControl; - this.contentDisposition = contentDisposition; - this.contentEncoding = contentEncoding; - this.contentLanguage = contentLanguage; - this.contentType = contentType; - if (userDelegationKey) { - this.signedOid = userDelegationKey.signedObjectId; - this.signedTenantId = userDelegationKey.signedTenantId; - this.signedStartsOn = userDelegationKey.signedStartsOn; - this.signedExpiresOn = userDelegationKey.signedExpiresOn; - this.signedService = userDelegationKey.signedService; - this.signedVersion = userDelegationKey.signedVersion; - this.preauthorizedAgentObjectId = preauthorizedAgentObjectId; - this.correlationId = correlationId; - } - } + get objectReplicationDestinationPolicyId() { + return this.originalResponse.objectReplicationDestinationPolicyId; } /** - * Optional. IP range allowed for this SAS. + * Parsed Object Replication Policy Id, Rule Id(s) and status of the source blob. * * @readonly */ - get ipRange() { - if (this.ipRangeInner) { - return { - end: this.ipRangeInner.end, - start: this.ipRangeInner.start, - }; - } - return undefined; + get objectReplicationSourceProperties() { + return this.originalResponse.objectReplicationSourceProperties; } /** - * Encodes all SAS query parameters into a string that can be appended to a URL. + * If this blob has been sealed. * + * @readonly */ - toString() { - const params = [ - "sv", - "ss", - "srt", - "spr", - "st", - "se", - "sip", - "si", - "ses", - "skoid", - "sktid", - "skt", - "ske", - "sks", - "skv", - "sr", - "sp", - "sig", - "rscc", - "rscd", - "rsce", - "rscl", - "rsct", - "saoid", - "scid", - ]; - const queries = []; - for (const param of params) { - switch (param) { - case "sv": - this.tryAppendQueryParameter(queries, param, this.version); - break; - case "ss": - this.tryAppendQueryParameter(queries, param, this.services); - break; - case "srt": - this.tryAppendQueryParameter(queries, param, this.resourceTypes); - break; - case "spr": - this.tryAppendQueryParameter(queries, param, this.protocol); - break; - case "st": - this.tryAppendQueryParameter(queries, param, this.startsOn ? truncatedISO8061Date(this.startsOn, false) : undefined); - break; - case "se": - this.tryAppendQueryParameter(queries, param, this.expiresOn ? truncatedISO8061Date(this.expiresOn, false) : undefined); - break; - case "sip": - this.tryAppendQueryParameter(queries, param, this.ipRange ? ipRangeToString(this.ipRange) : undefined); - break; - case "si": - this.tryAppendQueryParameter(queries, param, this.identifier); - break; - case "ses": - this.tryAppendQueryParameter(queries, param, this.encryptionScope); - break; - case "skoid": // Signed object ID - this.tryAppendQueryParameter(queries, param, this.signedOid); - break; - case "sktid": // Signed tenant ID - this.tryAppendQueryParameter(queries, param, this.signedTenantId); - break; - case "skt": // Signed key start time - this.tryAppendQueryParameter(queries, param, this.signedStartsOn ? truncatedISO8061Date(this.signedStartsOn, false) : undefined); - break; - case "ske": // Signed key expiry time - this.tryAppendQueryParameter(queries, param, this.signedExpiresOn ? truncatedISO8061Date(this.signedExpiresOn, false) : undefined); - break; - case "sks": // Signed key service - this.tryAppendQueryParameter(queries, param, this.signedService); - break; - case "skv": // Signed key version - this.tryAppendQueryParameter(queries, param, this.signedVersion); - break; - case "sr": - this.tryAppendQueryParameter(queries, param, this.resource); - break; - case "sp": - this.tryAppendQueryParameter(queries, param, this.permissions); - break; - case "sig": - this.tryAppendQueryParameter(queries, param, this.signature); - break; - case "rscc": - this.tryAppendQueryParameter(queries, param, this.cacheControl); - break; - case "rscd": - this.tryAppendQueryParameter(queries, param, this.contentDisposition); - break; - case "rsce": - this.tryAppendQueryParameter(queries, param, this.contentEncoding); - break; - case "rscl": - this.tryAppendQueryParameter(queries, param, this.contentLanguage); - break; - case "rsct": - this.tryAppendQueryParameter(queries, param, this.contentType); - break; - case "saoid": - this.tryAppendQueryParameter(queries, param, this.preauthorizedAgentObjectId); - break; - case "scid": - this.tryAppendQueryParameter(queries, param, this.correlationId); - break; - } - } - return queries.join("&"); + get isSealed() { + return this.originalResponse.isSealed; } /** - * A private helper method used to filter and append query key/value pairs into an array. + * UTC date/time value generated by the service that indicates the time at which the blob immutability policy will expire. * - * @param queries - - * @param key - - * @param value - + * @readonly */ - tryAppendQueryParameter(queries, key, value) { - if (!value) { - return; - } - key = encodeURIComponent(key); - value = encodeURIComponent(value); - if (key.length > 0 && value.length > 0) { - queries.push(`${key}=${value}`); - } - } -} - -// Copyright (c) Microsoft Corporation. -function generateBlobSASQueryParameters(blobSASSignatureValues, sharedKeyCredentialOrUserDelegationKey, accountName) { - const version = blobSASSignatureValues.version ? blobSASSignatureValues.version : SERVICE_VERSION; - const sharedKeyCredential = sharedKeyCredentialOrUserDelegationKey instanceof StorageSharedKeyCredential - ? sharedKeyCredentialOrUserDelegationKey - : undefined; - let userDelegationKeyCredential; - if (sharedKeyCredential === undefined && accountName !== undefined) { - userDelegationKeyCredential = new UserDelegationKeyCredential(accountName, sharedKeyCredentialOrUserDelegationKey); - } - if (sharedKeyCredential === undefined && userDelegationKeyCredential === undefined) { - throw TypeError("Invalid sharedKeyCredential, userDelegationKey or accountName."); - } - // Version 2020-12-06 adds support for encryptionscope in SAS. - if (version >= "2020-12-06") { - if (sharedKeyCredential !== undefined) { - return generateBlobSASQueryParameters20201206(blobSASSignatureValues, sharedKeyCredential); - } - else { - return generateBlobSASQueryParametersUDK20201206(blobSASSignatureValues, userDelegationKeyCredential); - } + get immutabilityPolicyExpiresOn() { + return this.originalResponse.immutabilityPolicyExpiresOn; } - // Version 2019-12-12 adds support for the blob tags permission. - // Version 2018-11-09 adds support for the signed resource and signed blob snapshot time fields. - // https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas#constructing-the-signature-string - if (version >= "2018-11-09") { - if (sharedKeyCredential !== undefined) { - return generateBlobSASQueryParameters20181109(blobSASSignatureValues, sharedKeyCredential); - } - else { - // Version 2020-02-10 delegation SAS signature construction includes preauthorizedAgentObjectId, agentObjectId, correlationId. - if (version >= "2020-02-10") { - return generateBlobSASQueryParametersUDK20200210(blobSASSignatureValues, userDelegationKeyCredential); - } - else { - return generateBlobSASQueryParametersUDK20181109(blobSASSignatureValues, userDelegationKeyCredential); - } - } + /** + * Indicates immutability policy mode. + * + * @readonly + */ + get immutabilityPolicyMode() { + return this.originalResponse.immutabilityPolicyMode; } - if (version >= "2015-04-05") { - if (sharedKeyCredential !== undefined) { - return generateBlobSASQueryParameters20150405(blobSASSignatureValues, sharedKeyCredential); - } - else { - throw new RangeError("'version' must be >= '2018-11-09' when generating user delegation SAS using user delegation key."); - } + /** + * Indicates if a legal hold is present on the blob. + * + * @readonly + */ + get legalHold() { + return this.originalResponse.legalHold; } - throw new RangeError("'version' must be >= '2015-04-05'."); -} -/** - * ONLY AVAILABLE IN NODE.JS RUNTIME. - * IMPLEMENTATION FOR API VERSION FROM 2015-04-05 AND BEFORE 2018-11-09. - * - * Creates an instance of SASQueryParameters. - * - * Only accepts required settings needed to create a SAS. For optional settings please - * set corresponding properties directly, such as permissions, startsOn and identifier. - * - * WARNING: When identifier is not provided, permissions and expiresOn are required. - * You MUST assign value to identifier or expiresOn & permissions manually if you initial with - * this constructor. - * - * @param blobSASSignatureValues - - * @param sharedKeyCredential - - */ -function generateBlobSASQueryParameters20150405(blobSASSignatureValues, sharedKeyCredential) { - blobSASSignatureValues = SASSignatureValuesSanityCheckAndAutofill(blobSASSignatureValues); - if (!blobSASSignatureValues.identifier && - !(blobSASSignatureValues.permissions && blobSASSignatureValues.expiresOn)) { - throw new RangeError("Must provide 'permissions' and 'expiresOn' for Blob SAS generation when 'identifier' is not provided."); + /** + * The response body as a browser Blob. + * Always undefined in node.js. + * + * @readonly + */ + get contentAsBlob() { + return this.originalResponse.blobBody; } - let resource = "c"; - if (blobSASSignatureValues.blobName) { - resource = "b"; + /** + * The response body as a node.js Readable stream. + * Always undefined in the browser. + * + * It will automatically retry when internal read stream unexpected ends. + * + * @readonly + */ + get readableStreamBody() { + return coreHttp.isNode ? this.blobDownloadStream : undefined; } - // Calling parse and toString guarantees the proper ordering and throws on invalid characters. - let verifiedPermissions; - if (blobSASSignatureValues.permissions) { - if (blobSASSignatureValues.blobName) { - verifiedPermissions = BlobSASPermissions.parse(blobSASSignatureValues.permissions.toString()).toString(); - } - else { - verifiedPermissions = ContainerSASPermissions.parse(blobSASSignatureValues.permissions.toString()).toString(); - } + /** + * The HTTP response. + */ + get _response() { + return this.originalResponse._response; } - // Signature is generated on the un-url-encoded values. - const stringToSign = [ - verifiedPermissions ? verifiedPermissions : "", - blobSASSignatureValues.startsOn - ? truncatedISO8061Date(blobSASSignatureValues.startsOn, false) - : "", - blobSASSignatureValues.expiresOn - ? truncatedISO8061Date(blobSASSignatureValues.expiresOn, false) - : "", - getCanonicalName(sharedKeyCredential.accountName, blobSASSignatureValues.containerName, blobSASSignatureValues.blobName), - blobSASSignatureValues.identifier, - blobSASSignatureValues.ipRange ? ipRangeToString(blobSASSignatureValues.ipRange) : "", - blobSASSignatureValues.protocol ? blobSASSignatureValues.protocol : "", - blobSASSignatureValues.version, - blobSASSignatureValues.cacheControl ? blobSASSignatureValues.cacheControl : "", - blobSASSignatureValues.contentDisposition ? blobSASSignatureValues.contentDisposition : "", - blobSASSignatureValues.contentEncoding ? blobSASSignatureValues.contentEncoding : "", - blobSASSignatureValues.contentLanguage ? blobSASSignatureValues.contentLanguage : "", - blobSASSignatureValues.contentType ? blobSASSignatureValues.contentType : "", - ].join("\n"); - const signature = sharedKeyCredential.computeHMACSHA256(stringToSign); - return new SASQueryParameters(blobSASSignatureValues.version, signature, verifiedPermissions, undefined, undefined, blobSASSignatureValues.protocol, blobSASSignatureValues.startsOn, blobSASSignatureValues.expiresOn, blobSASSignatureValues.ipRange, blobSASSignatureValues.identifier, resource, blobSASSignatureValues.cacheControl, blobSASSignatureValues.contentDisposition, blobSASSignatureValues.contentEncoding, blobSASSignatureValues.contentLanguage, blobSASSignatureValues.contentType); } -/** - * ONLY AVAILABLE IN NODE.JS RUNTIME. - * IMPLEMENTATION FOR API VERSION FROM 2018-11-09. - * - * Creates an instance of SASQueryParameters. - * - * Only accepts required settings needed to create a SAS. For optional settings please - * set corresponding properties directly, such as permissions, startsOn and identifier. - * - * WARNING: When identifier is not provided, permissions and expiresOn are required. - * You MUST assign value to identifier or expiresOn & permissions manually if you initial with - * this constructor. - * - * @param blobSASSignatureValues - - * @param sharedKeyCredential - - */ -function generateBlobSASQueryParameters20181109(blobSASSignatureValues, sharedKeyCredential) { - blobSASSignatureValues = SASSignatureValuesSanityCheckAndAutofill(blobSASSignatureValues); - if (!blobSASSignatureValues.identifier && - !(blobSASSignatureValues.permissions && blobSASSignatureValues.expiresOn)) { - throw new RangeError("Must provide 'permissions' and 'expiresOn' for Blob SAS generation when 'identifier' is not provided."); - } - let resource = "c"; - let timestamp = blobSASSignatureValues.snapshotTime; - if (blobSASSignatureValues.blobName) { - resource = "b"; - if (blobSASSignatureValues.snapshotTime) { - resource = "bs"; - } - else if (blobSASSignatureValues.versionId) { - resource = "bv"; - timestamp = blobSASSignatureValues.versionId; - } - } - // Calling parse and toString guarantees the proper ordering and throws on invalid characters. - let verifiedPermissions; - if (blobSASSignatureValues.permissions) { - if (blobSASSignatureValues.blobName) { - verifiedPermissions = BlobSASPermissions.parse(blobSASSignatureValues.permissions.toString()).toString(); - } - else { - verifiedPermissions = ContainerSASPermissions.parse(blobSASSignatureValues.permissions.toString()).toString(); + +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +const AVRO_SYNC_MARKER_SIZE = 16; +const AVRO_INIT_BYTES = new Uint8Array([79, 98, 106, 1]); +const AVRO_CODEC_KEY = "avro.codec"; +const AVRO_SCHEMA_KEY = "avro.schema"; + +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +class AvroParser { + /** + * Reads a fixed number of bytes from the stream. + * + * @param stream - + * @param length - + * @param options - + */ + static async readFixedBytes(stream, length, options = {}) { + const bytes = await stream.read(length, { abortSignal: options.abortSignal }); + if (bytes.length !== length) { + throw new Error("Hit stream end."); } + return bytes; } - // Signature is generated on the un-url-encoded values. - const stringToSign = [ - verifiedPermissions ? verifiedPermissions : "", - blobSASSignatureValues.startsOn - ? truncatedISO8061Date(blobSASSignatureValues.startsOn, false) - : "", - blobSASSignatureValues.expiresOn - ? truncatedISO8061Date(blobSASSignatureValues.expiresOn, false) - : "", - getCanonicalName(sharedKeyCredential.accountName, blobSASSignatureValues.containerName, blobSASSignatureValues.blobName), - blobSASSignatureValues.identifier, - blobSASSignatureValues.ipRange ? ipRangeToString(blobSASSignatureValues.ipRange) : "", - blobSASSignatureValues.protocol ? blobSASSignatureValues.protocol : "", - blobSASSignatureValues.version, - resource, - timestamp, - blobSASSignatureValues.cacheControl ? blobSASSignatureValues.cacheControl : "", - blobSASSignatureValues.contentDisposition ? blobSASSignatureValues.contentDisposition : "", - blobSASSignatureValues.contentEncoding ? blobSASSignatureValues.contentEncoding : "", - blobSASSignatureValues.contentLanguage ? blobSASSignatureValues.contentLanguage : "", - blobSASSignatureValues.contentType ? blobSASSignatureValues.contentType : "", - ].join("\n"); - const signature = sharedKeyCredential.computeHMACSHA256(stringToSign); - return new SASQueryParameters(blobSASSignatureValues.version, signature, verifiedPermissions, undefined, undefined, blobSASSignatureValues.protocol, blobSASSignatureValues.startsOn, blobSASSignatureValues.expiresOn, blobSASSignatureValues.ipRange, blobSASSignatureValues.identifier, resource, blobSASSignatureValues.cacheControl, blobSASSignatureValues.contentDisposition, blobSASSignatureValues.contentEncoding, blobSASSignatureValues.contentLanguage, blobSASSignatureValues.contentType); -} -/** - * ONLY AVAILABLE IN NODE.JS RUNTIME. - * IMPLEMENTATION FOR API VERSION FROM 2020-12-06. - * - * Creates an instance of SASQueryParameters. - * - * Only accepts required settings needed to create a SAS. For optional settings please - * set corresponding properties directly, such as permissions, startsOn and identifier. - * - * WARNING: When identifier is not provided, permissions and expiresOn are required. - * You MUST assign value to identifier or expiresOn & permissions manually if you initial with - * this constructor. - * - * @param blobSASSignatureValues - - * @param sharedKeyCredential - - */ -function generateBlobSASQueryParameters20201206(blobSASSignatureValues, sharedKeyCredential) { - blobSASSignatureValues = SASSignatureValuesSanityCheckAndAutofill(blobSASSignatureValues); - if (!blobSASSignatureValues.identifier && - !(blobSASSignatureValues.permissions && blobSASSignatureValues.expiresOn)) { - throw new RangeError("Must provide 'permissions' and 'expiresOn' for Blob SAS generation when 'identifier' is not provided."); - } - let resource = "c"; - let timestamp = blobSASSignatureValues.snapshotTime; - if (blobSASSignatureValues.blobName) { - resource = "b"; - if (blobSASSignatureValues.snapshotTime) { - resource = "bs"; - } - else if (blobSASSignatureValues.versionId) { - resource = "bv"; - timestamp = blobSASSignatureValues.versionId; - } + /** + * Reads a single byte from the stream. + * + * @param stream - + * @param options - + */ + static async readByte(stream, options = {}) { + const buf = await AvroParser.readFixedBytes(stream, 1, options); + return buf[0]; } - // Calling parse and toString guarantees the proper ordering and throws on invalid characters. - let verifiedPermissions; - if (blobSASSignatureValues.permissions) { - if (blobSASSignatureValues.blobName) { - verifiedPermissions = BlobSASPermissions.parse(blobSASSignatureValues.permissions.toString()).toString(); - } - else { - verifiedPermissions = ContainerSASPermissions.parse(blobSASSignatureValues.permissions.toString()).toString(); + // int and long are stored in variable-length zig-zag coding. + // variable-length: https://lucene.apache.org/core/3_5_0/fileformats.html#VInt + // zig-zag: https://developers.google.com/protocol-buffers/docs/encoding?csw=1#types + static async readZigZagLong(stream, options = {}) { + let zigZagEncoded = 0; + let significanceInBit = 0; + let byte, haveMoreByte, significanceInFloat; + do { + byte = await AvroParser.readByte(stream, options); + haveMoreByte = byte & 0x80; + zigZagEncoded |= (byte & 0x7f) << significanceInBit; + significanceInBit += 7; + } while (haveMoreByte && significanceInBit < 28); // bitwise operation only works for 32-bit integers + if (haveMoreByte) { + // Switch to float arithmetic + // eslint-disable-next-line no-self-assign + zigZagEncoded = zigZagEncoded; + significanceInFloat = 268435456; // 2 ** 28. + do { + byte = await AvroParser.readByte(stream, options); + zigZagEncoded += (byte & 0x7f) * significanceInFloat; + significanceInFloat *= 128; // 2 ** 7 + } while (byte & 0x80); + const res = (zigZagEncoded % 2 ? -(zigZagEncoded + 1) : zigZagEncoded) / 2; + if (res < Number.MIN_SAFE_INTEGER || res > Number.MAX_SAFE_INTEGER) { + throw new Error("Integer overflow."); + } + return res; } + return (zigZagEncoded >> 1) ^ -(zigZagEncoded & 1); } - // Signature is generated on the un-url-encoded values. - const stringToSign = [ - verifiedPermissions ? verifiedPermissions : "", - blobSASSignatureValues.startsOn - ? truncatedISO8061Date(blobSASSignatureValues.startsOn, false) - : "", - blobSASSignatureValues.expiresOn - ? truncatedISO8061Date(blobSASSignatureValues.expiresOn, false) - : "", - getCanonicalName(sharedKeyCredential.accountName, blobSASSignatureValues.containerName, blobSASSignatureValues.blobName), - blobSASSignatureValues.identifier, - blobSASSignatureValues.ipRange ? ipRangeToString(blobSASSignatureValues.ipRange) : "", - blobSASSignatureValues.protocol ? blobSASSignatureValues.protocol : "", - blobSASSignatureValues.version, - resource, - timestamp, - blobSASSignatureValues.encryptionScope, - blobSASSignatureValues.cacheControl ? blobSASSignatureValues.cacheControl : "", - blobSASSignatureValues.contentDisposition ? blobSASSignatureValues.contentDisposition : "", - blobSASSignatureValues.contentEncoding ? blobSASSignatureValues.contentEncoding : "", - blobSASSignatureValues.contentLanguage ? blobSASSignatureValues.contentLanguage : "", - blobSASSignatureValues.contentType ? blobSASSignatureValues.contentType : "", - ].join("\n"); - const signature = sharedKeyCredential.computeHMACSHA256(stringToSign); - return new SASQueryParameters(blobSASSignatureValues.version, signature, verifiedPermissions, undefined, undefined, blobSASSignatureValues.protocol, blobSASSignatureValues.startsOn, blobSASSignatureValues.expiresOn, blobSASSignatureValues.ipRange, blobSASSignatureValues.identifier, resource, blobSASSignatureValues.cacheControl, blobSASSignatureValues.contentDisposition, blobSASSignatureValues.contentEncoding, blobSASSignatureValues.contentLanguage, blobSASSignatureValues.contentType, undefined, undefined, undefined, blobSASSignatureValues.encryptionScope); -} -/** - * ONLY AVAILABLE IN NODE.JS RUNTIME. - * IMPLEMENTATION FOR API VERSION FROM 2018-11-09. - * - * Creates an instance of SASQueryParameters. - * - * Only accepts required settings needed to create a SAS. For optional settings please - * set corresponding properties directly, such as permissions, startsOn. - * - * WARNING: identifier will be ignored, permissions and expiresOn are required. - * - * @param blobSASSignatureValues - - * @param userDelegationKeyCredential - - */ -function generateBlobSASQueryParametersUDK20181109(blobSASSignatureValues, userDelegationKeyCredential) { - blobSASSignatureValues = SASSignatureValuesSanityCheckAndAutofill(blobSASSignatureValues); - // Stored access policies are not supported for a user delegation SAS. - if (!blobSASSignatureValues.permissions || !blobSASSignatureValues.expiresOn) { - throw new RangeError("Must provide 'permissions' and 'expiresOn' for Blob SAS generation when generating user delegation SAS."); - } - let resource = "c"; - let timestamp = blobSASSignatureValues.snapshotTime; - if (blobSASSignatureValues.blobName) { - resource = "b"; - if (blobSASSignatureValues.snapshotTime) { - resource = "bs"; - } - else if (blobSASSignatureValues.versionId) { - resource = "bv"; - timestamp = blobSASSignatureValues.versionId; - } + static async readLong(stream, options = {}) { + return AvroParser.readZigZagLong(stream, options); } - // Calling parse and toString guarantees the proper ordering and throws on invalid characters. - let verifiedPermissions; - if (blobSASSignatureValues.permissions) { - if (blobSASSignatureValues.blobName) { - verifiedPermissions = BlobSASPermissions.parse(blobSASSignatureValues.permissions.toString()).toString(); + static async readInt(stream, options = {}) { + return AvroParser.readZigZagLong(stream, options); + } + static async readNull() { + return null; + } + static async readBoolean(stream, options = {}) { + const b = await AvroParser.readByte(stream, options); + if (b === 1) { + return true; + } + else if (b === 0) { + return false; } else { - verifiedPermissions = ContainerSASPermissions.parse(blobSASSignatureValues.permissions.toString()).toString(); + throw new Error("Byte was not a boolean."); } } - // Signature is generated on the un-url-encoded values. - const stringToSign = [ - verifiedPermissions ? verifiedPermissions : "", - blobSASSignatureValues.startsOn - ? truncatedISO8061Date(blobSASSignatureValues.startsOn, false) - : "", - blobSASSignatureValues.expiresOn - ? truncatedISO8061Date(blobSASSignatureValues.expiresOn, false) - : "", - getCanonicalName(userDelegationKeyCredential.accountName, blobSASSignatureValues.containerName, blobSASSignatureValues.blobName), - userDelegationKeyCredential.userDelegationKey.signedObjectId, - userDelegationKeyCredential.userDelegationKey.signedTenantId, - userDelegationKeyCredential.userDelegationKey.signedStartsOn - ? truncatedISO8061Date(userDelegationKeyCredential.userDelegationKey.signedStartsOn, false) - : "", - userDelegationKeyCredential.userDelegationKey.signedExpiresOn - ? truncatedISO8061Date(userDelegationKeyCredential.userDelegationKey.signedExpiresOn, false) - : "", - userDelegationKeyCredential.userDelegationKey.signedService, - userDelegationKeyCredential.userDelegationKey.signedVersion, - blobSASSignatureValues.ipRange ? ipRangeToString(blobSASSignatureValues.ipRange) : "", - blobSASSignatureValues.protocol ? blobSASSignatureValues.protocol : "", - blobSASSignatureValues.version, - resource, - timestamp, - blobSASSignatureValues.cacheControl, - blobSASSignatureValues.contentDisposition, - blobSASSignatureValues.contentEncoding, - blobSASSignatureValues.contentLanguage, - blobSASSignatureValues.contentType, - ].join("\n"); - const signature = userDelegationKeyCredential.computeHMACSHA256(stringToSign); - return new SASQueryParameters(blobSASSignatureValues.version, signature, verifiedPermissions, undefined, undefined, blobSASSignatureValues.protocol, blobSASSignatureValues.startsOn, blobSASSignatureValues.expiresOn, blobSASSignatureValues.ipRange, blobSASSignatureValues.identifier, resource, blobSASSignatureValues.cacheControl, blobSASSignatureValues.contentDisposition, blobSASSignatureValues.contentEncoding, blobSASSignatureValues.contentLanguage, blobSASSignatureValues.contentType, userDelegationKeyCredential.userDelegationKey); -} -/** - * ONLY AVAILABLE IN NODE.JS RUNTIME. - * IMPLEMENTATION FOR API VERSION FROM 2020-02-10. - * - * Creates an instance of SASQueryParameters. - * - * Only accepts required settings needed to create a SAS. For optional settings please - * set corresponding properties directly, such as permissions, startsOn. - * - * WARNING: identifier will be ignored, permissions and expiresOn are required. - * - * @param blobSASSignatureValues - - * @param userDelegationKeyCredential - - */ -function generateBlobSASQueryParametersUDK20200210(blobSASSignatureValues, userDelegationKeyCredential) { - blobSASSignatureValues = SASSignatureValuesSanityCheckAndAutofill(blobSASSignatureValues); - // Stored access policies are not supported for a user delegation SAS. - if (!blobSASSignatureValues.permissions || !blobSASSignatureValues.expiresOn) { - throw new RangeError("Must provide 'permissions' and 'expiresOn' for Blob SAS generation when generating user delegation SAS."); + static async readFloat(stream, options = {}) { + const u8arr = await AvroParser.readFixedBytes(stream, 4, options); + const view = new DataView(u8arr.buffer, u8arr.byteOffset, u8arr.byteLength); + return view.getFloat32(0, true); // littleEndian = true } - let resource = "c"; - let timestamp = blobSASSignatureValues.snapshotTime; - if (blobSASSignatureValues.blobName) { - resource = "b"; - if (blobSASSignatureValues.snapshotTime) { - resource = "bs"; - } - else if (blobSASSignatureValues.versionId) { - resource = "bv"; - timestamp = blobSASSignatureValues.versionId; - } + static async readDouble(stream, options = {}) { + const u8arr = await AvroParser.readFixedBytes(stream, 8, options); + const view = new DataView(u8arr.buffer, u8arr.byteOffset, u8arr.byteLength); + return view.getFloat64(0, true); // littleEndian = true } - // Calling parse and toString guarantees the proper ordering and throws on invalid characters. - let verifiedPermissions; - if (blobSASSignatureValues.permissions) { - if (blobSASSignatureValues.blobName) { - verifiedPermissions = BlobSASPermissions.parse(blobSASSignatureValues.permissions.toString()).toString(); - } - else { - verifiedPermissions = ContainerSASPermissions.parse(blobSASSignatureValues.permissions.toString()).toString(); + static async readBytes(stream, options = {}) { + const size = await AvroParser.readLong(stream, options); + if (size < 0) { + throw new Error("Bytes size was negative."); } + return stream.read(size, { abortSignal: options.abortSignal }); } - // Signature is generated on the un-url-encoded values. - const stringToSign = [ - verifiedPermissions ? verifiedPermissions : "", - blobSASSignatureValues.startsOn - ? truncatedISO8061Date(blobSASSignatureValues.startsOn, false) - : "", - blobSASSignatureValues.expiresOn - ? truncatedISO8061Date(blobSASSignatureValues.expiresOn, false) - : "", - getCanonicalName(userDelegationKeyCredential.accountName, blobSASSignatureValues.containerName, blobSASSignatureValues.blobName), - userDelegationKeyCredential.userDelegationKey.signedObjectId, - userDelegationKeyCredential.userDelegationKey.signedTenantId, - userDelegationKeyCredential.userDelegationKey.signedStartsOn - ? truncatedISO8061Date(userDelegationKeyCredential.userDelegationKey.signedStartsOn, false) - : "", - userDelegationKeyCredential.userDelegationKey.signedExpiresOn - ? truncatedISO8061Date(userDelegationKeyCredential.userDelegationKey.signedExpiresOn, false) - : "", - userDelegationKeyCredential.userDelegationKey.signedService, - userDelegationKeyCredential.userDelegationKey.signedVersion, - blobSASSignatureValues.preauthorizedAgentObjectId, - undefined, - blobSASSignatureValues.correlationId, - blobSASSignatureValues.ipRange ? ipRangeToString(blobSASSignatureValues.ipRange) : "", - blobSASSignatureValues.protocol ? blobSASSignatureValues.protocol : "", - blobSASSignatureValues.version, - resource, - timestamp, - blobSASSignatureValues.cacheControl, - blobSASSignatureValues.contentDisposition, - blobSASSignatureValues.contentEncoding, - blobSASSignatureValues.contentLanguage, - blobSASSignatureValues.contentType, - ].join("\n"); - const signature = userDelegationKeyCredential.computeHMACSHA256(stringToSign); - return new SASQueryParameters(blobSASSignatureValues.version, signature, verifiedPermissions, undefined, undefined, blobSASSignatureValues.protocol, blobSASSignatureValues.startsOn, blobSASSignatureValues.expiresOn, blobSASSignatureValues.ipRange, blobSASSignatureValues.identifier, resource, blobSASSignatureValues.cacheControl, blobSASSignatureValues.contentDisposition, blobSASSignatureValues.contentEncoding, blobSASSignatureValues.contentLanguage, blobSASSignatureValues.contentType, userDelegationKeyCredential.userDelegationKey, blobSASSignatureValues.preauthorizedAgentObjectId, blobSASSignatureValues.correlationId); -} -/** - * ONLY AVAILABLE IN NODE.JS RUNTIME. - * IMPLEMENTATION FOR API VERSION FROM 2020-12-06. - * - * Creates an instance of SASQueryParameters. - * - * Only accepts required settings needed to create a SAS. For optional settings please - * set corresponding properties directly, such as permissions, startsOn. - * - * WARNING: identifier will be ignored, permissions and expiresOn are required. - * - * @param blobSASSignatureValues - - * @param userDelegationKeyCredential - - */ -function generateBlobSASQueryParametersUDK20201206(blobSASSignatureValues, userDelegationKeyCredential) { - blobSASSignatureValues = SASSignatureValuesSanityCheckAndAutofill(blobSASSignatureValues); - // Stored access policies are not supported for a user delegation SAS. - if (!blobSASSignatureValues.permissions || !blobSASSignatureValues.expiresOn) { - throw new RangeError("Must provide 'permissions' and 'expiresOn' for Blob SAS generation when generating user delegation SAS."); + static async readString(stream, options = {}) { + const u8arr = await AvroParser.readBytes(stream, options); + const utf8decoder = new TextDecoder(); + return utf8decoder.decode(u8arr); } - let resource = "c"; - let timestamp = blobSASSignatureValues.snapshotTime; - if (blobSASSignatureValues.blobName) { - resource = "b"; - if (blobSASSignatureValues.snapshotTime) { - resource = "bs"; + static async readMapPair(stream, readItemMethod, options = {}) { + const key = await AvroParser.readString(stream, options); + // FUTURE: this won't work with readFixed (currently not supported) which needs a length as the parameter. + const value = await readItemMethod(stream, options); + return { key, value }; + } + static async readMap(stream, readItemMethod, options = {}) { + const readPairMethod = (s, opts = {}) => { + return AvroParser.readMapPair(s, readItemMethod, opts); + }; + const pairs = await AvroParser.readArray(stream, readPairMethod, options); + const dict = {}; + for (const pair of pairs) { + dict[pair.key] = pair.value; } - else if (blobSASSignatureValues.versionId) { - resource = "bv"; - timestamp = blobSASSignatureValues.versionId; + return dict; + } + static async readArray(stream, readItemMethod, options = {}) { + const items = []; + for (let count = await AvroParser.readLong(stream, options); count !== 0; count = await AvroParser.readLong(stream, options)) { + if (count < 0) { + // Ignore block sizes + await AvroParser.readLong(stream, options); + count = -count; + } + while (count--) { + const item = await readItemMethod(stream, options); + items.push(item); + } } + return items; } - // Calling parse and toString guarantees the proper ordering and throws on invalid characters. - let verifiedPermissions; - if (blobSASSignatureValues.permissions) { - if (blobSASSignatureValues.blobName) { - verifiedPermissions = BlobSASPermissions.parse(blobSASSignatureValues.permissions.toString()).toString(); +} +var AvroComplex; +(function (AvroComplex) { + AvroComplex["RECORD"] = "record"; + AvroComplex["ENUM"] = "enum"; + AvroComplex["ARRAY"] = "array"; + AvroComplex["MAP"] = "map"; + AvroComplex["UNION"] = "union"; + AvroComplex["FIXED"] = "fixed"; +})(AvroComplex || (AvroComplex = {})); +var AvroPrimitive; +(function (AvroPrimitive) { + AvroPrimitive["NULL"] = "null"; + AvroPrimitive["BOOLEAN"] = "boolean"; + AvroPrimitive["INT"] = "int"; + AvroPrimitive["LONG"] = "long"; + AvroPrimitive["FLOAT"] = "float"; + AvroPrimitive["DOUBLE"] = "double"; + AvroPrimitive["BYTES"] = "bytes"; + AvroPrimitive["STRING"] = "string"; +})(AvroPrimitive || (AvroPrimitive = {})); +class AvroType { + /** + * Determines the AvroType from the Avro Schema. + */ + static fromSchema(schema) { + if (typeof schema === "string") { + return AvroType.fromStringSchema(schema); + } + else if (Array.isArray(schema)) { + return AvroType.fromArraySchema(schema); } else { - verifiedPermissions = ContainerSASPermissions.parse(blobSASSignatureValues.permissions.toString()).toString(); + return AvroType.fromObjectSchema(schema); } } - // Signature is generated on the un-url-encoded values. - const stringToSign = [ - verifiedPermissions ? verifiedPermissions : "", - blobSASSignatureValues.startsOn - ? truncatedISO8061Date(blobSASSignatureValues.startsOn, false) - : "", - blobSASSignatureValues.expiresOn - ? truncatedISO8061Date(blobSASSignatureValues.expiresOn, false) - : "", - getCanonicalName(userDelegationKeyCredential.accountName, blobSASSignatureValues.containerName, blobSASSignatureValues.blobName), - userDelegationKeyCredential.userDelegationKey.signedObjectId, - userDelegationKeyCredential.userDelegationKey.signedTenantId, - userDelegationKeyCredential.userDelegationKey.signedStartsOn - ? truncatedISO8061Date(userDelegationKeyCredential.userDelegationKey.signedStartsOn, false) - : "", - userDelegationKeyCredential.userDelegationKey.signedExpiresOn - ? truncatedISO8061Date(userDelegationKeyCredential.userDelegationKey.signedExpiresOn, false) - : "", - userDelegationKeyCredential.userDelegationKey.signedService, - userDelegationKeyCredential.userDelegationKey.signedVersion, - blobSASSignatureValues.preauthorizedAgentObjectId, - undefined, - blobSASSignatureValues.correlationId, - blobSASSignatureValues.ipRange ? ipRangeToString(blobSASSignatureValues.ipRange) : "", - blobSASSignatureValues.protocol ? blobSASSignatureValues.protocol : "", - blobSASSignatureValues.version, - resource, - timestamp, - blobSASSignatureValues.encryptionScope, - blobSASSignatureValues.cacheControl, - blobSASSignatureValues.contentDisposition, - blobSASSignatureValues.contentEncoding, - blobSASSignatureValues.contentLanguage, - blobSASSignatureValues.contentType, - ].join("\n"); - const signature = userDelegationKeyCredential.computeHMACSHA256(stringToSign); - return new SASQueryParameters(blobSASSignatureValues.version, signature, verifiedPermissions, undefined, undefined, blobSASSignatureValues.protocol, blobSASSignatureValues.startsOn, blobSASSignatureValues.expiresOn, blobSASSignatureValues.ipRange, blobSASSignatureValues.identifier, resource, blobSASSignatureValues.cacheControl, blobSASSignatureValues.contentDisposition, blobSASSignatureValues.contentEncoding, blobSASSignatureValues.contentLanguage, blobSASSignatureValues.contentType, userDelegationKeyCredential.userDelegationKey, blobSASSignatureValues.preauthorizedAgentObjectId, blobSASSignatureValues.correlationId, blobSASSignatureValues.encryptionScope); -} -function getCanonicalName(accountName, containerName, blobName) { - // Container: "/blob/account/containerName" - // Blob: "/blob/account/containerName/blobName" - const elements = [`/blob/${accountName}/${containerName}`]; - if (blobName) { - elements.push(`/${blobName}`); + static fromStringSchema(schema) { + switch (schema) { + case AvroPrimitive.NULL: + case AvroPrimitive.BOOLEAN: + case AvroPrimitive.INT: + case AvroPrimitive.LONG: + case AvroPrimitive.FLOAT: + case AvroPrimitive.DOUBLE: + case AvroPrimitive.BYTES: + case AvroPrimitive.STRING: + return new AvroPrimitiveType(schema); + default: + throw new Error(`Unexpected Avro type ${schema}`); + } } - return elements.join(""); -} -function SASSignatureValuesSanityCheckAndAutofill(blobSASSignatureValues) { - const version = blobSASSignatureValues.version ? blobSASSignatureValues.version : SERVICE_VERSION; - if (blobSASSignatureValues.snapshotTime && version < "2018-11-09") { - throw RangeError("'version' must be >= '2018-11-09' when providing 'snapshotTime'."); + static fromArraySchema(schema) { + return new AvroUnionType(schema.map(AvroType.fromSchema)); } - if (blobSASSignatureValues.blobName === undefined && blobSASSignatureValues.snapshotTime) { - throw RangeError("Must provide 'blobName' when providing 'snapshotTime'."); + static fromObjectSchema(schema) { + const type = schema.type; + // Primitives can be defined as strings or objects + try { + return AvroType.fromStringSchema(type); + } + catch (err) { + // eslint-disable-line no-empty + } + switch (type) { + case AvroComplex.RECORD: + if (schema.aliases) { + throw new Error(`aliases currently is not supported, schema: ${schema}`); + } + if (!schema.name) { + throw new Error(`Required attribute 'name' doesn't exist on schema: ${schema}`); + } + // eslint-disable-next-line no-case-declarations + const fields = {}; + if (!schema.fields) { + throw new Error(`Required attribute 'fields' doesn't exist on schema: ${schema}`); + } + for (const field of schema.fields) { + fields[field.name] = AvroType.fromSchema(field.type); + } + return new AvroRecordType(fields, schema.name); + case AvroComplex.ENUM: + if (schema.aliases) { + throw new Error(`aliases currently is not supported, schema: ${schema}`); + } + if (!schema.symbols) { + throw new Error(`Required attribute 'symbols' doesn't exist on schema: ${schema}`); + } + return new AvroEnumType(schema.symbols); + case AvroComplex.MAP: + if (!schema.values) { + throw new Error(`Required attribute 'values' doesn't exist on schema: ${schema}`); + } + return new AvroMapType(AvroType.fromSchema(schema.values)); + case AvroComplex.ARRAY: // Unused today + case AvroComplex.FIXED: // Unused today + default: + throw new Error(`Unexpected Avro type ${type} in ${schema}`); + } } - if (blobSASSignatureValues.versionId && version < "2019-10-10") { - throw RangeError("'version' must be >= '2019-10-10' when providing 'versionId'."); +} +class AvroPrimitiveType extends AvroType { + constructor(primitive) { + super(); + this._primitive = primitive; } - if (blobSASSignatureValues.blobName === undefined && blobSASSignatureValues.versionId) { - throw RangeError("Must provide 'blobName' when providing 'versionId'."); + read(stream, options = {}) { + switch (this._primitive) { + case AvroPrimitive.NULL: + return AvroParser.readNull(); + case AvroPrimitive.BOOLEAN: + return AvroParser.readBoolean(stream, options); + case AvroPrimitive.INT: + return AvroParser.readInt(stream, options); + case AvroPrimitive.LONG: + return AvroParser.readLong(stream, options); + case AvroPrimitive.FLOAT: + return AvroParser.readFloat(stream, options); + case AvroPrimitive.DOUBLE: + return AvroParser.readDouble(stream, options); + case AvroPrimitive.BYTES: + return AvroParser.readBytes(stream, options); + case AvroPrimitive.STRING: + return AvroParser.readString(stream, options); + default: + throw new Error("Unknown Avro Primitive"); + } } - if (blobSASSignatureValues.permissions && - blobSASSignatureValues.permissions.setImmutabilityPolicy && - version < "2020-08-04") { - throw RangeError("'version' must be >= '2020-08-04' when provided 'i' permission."); +} +class AvroEnumType extends AvroType { + constructor(symbols) { + super(); + this._symbols = symbols; } - if (blobSASSignatureValues.permissions && - blobSASSignatureValues.permissions.deleteVersion && - version < "2019-10-10") { - throw RangeError("'version' must be >= '2019-10-10' when providing 'x' permission."); + async read(stream, options = {}) { + const value = await AvroParser.readInt(stream, options); + return this._symbols[value]; } - if (blobSASSignatureValues.permissions && - blobSASSignatureValues.permissions.permanentDelete && - version < "2019-10-10") { - throw RangeError("'version' must be >= '2019-10-10' when providing 'y' permission."); +} +class AvroUnionType extends AvroType { + constructor(types) { + super(); + this._types = types; } - if (blobSASSignatureValues.permissions && - blobSASSignatureValues.permissions.tag && - version < "2019-12-12") { - throw RangeError("'version' must be >= '2019-12-12' when providing 't' permission."); + async read(stream, options = {}) { + const typeIndex = await AvroParser.readInt(stream, options); + return this._types[typeIndex].read(stream, options); } - if (version < "2020-02-10" && - blobSASSignatureValues.permissions && - (blobSASSignatureValues.permissions.move || blobSASSignatureValues.permissions.execute)) { - throw RangeError("'version' must be >= '2020-02-10' when providing the 'm' or 'e' permission."); +} +class AvroMapType extends AvroType { + constructor(itemType) { + super(); + this._itemType = itemType; } - if (version < "2021-04-10" && - blobSASSignatureValues.permissions && - blobSASSignatureValues.permissions.filterByTags) { - throw RangeError("'version' must be >= '2021-04-10' when providing the 'f' permission."); + read(stream, options = {}) { + const readItemMethod = (s, opts) => { + return this._itemType.read(s, opts); + }; + return AvroParser.readMap(stream, readItemMethod, options); } - if (version < "2020-02-10" && - (blobSASSignatureValues.preauthorizedAgentObjectId || blobSASSignatureValues.correlationId)) { - throw RangeError("'version' must be >= '2020-02-10' when providing 'preauthorizedAgentObjectId' or 'correlationId'."); +} +class AvroRecordType extends AvroType { + constructor(fields, name) { + super(); + this._fields = fields; + this._name = name; } - if (blobSASSignatureValues.encryptionScope && version < "2020-12-06") { - throw RangeError("'version' must be >= '2020-12-06' when provided 'encryptionScope' in SAS."); + async read(stream, options = {}) { + const record = {}; + record["$schema"] = this._name; + for (const key in this._fields) { + if (Object.prototype.hasOwnProperty.call(this._fields, key)) { + record[key] = await this._fields[key].read(stream, options); + } + } + return record; } - blobSASSignatureValues.version = version; - return blobSASSignatureValues; } // Copyright (c) Microsoft Corporation. -/** - * A client that manages leases for a {@link ContainerClient} or a {@link BlobClient}. - */ -class BlobLeaseClient { - /** - * Creates an instance of BlobLeaseClient. - * @param client - The client to make the lease operation requests. - * @param leaseId - Initial proposed lease id. - */ - constructor(client, leaseId) { - const clientContext = new StorageClientContext(client.url, client.pipeline.toServiceClientOptions()); - this._url = client.url; - if (client.name === undefined) { - this._isContainer = true; - this._containerOrBlobOperation = new Container(clientContext); - } - else { - this._isContainer = false; - this._containerOrBlobOperation = new Blob$1(clientContext); - } - if (!leaseId) { - leaseId = coreHttp.generateUuid(); - } - this._leaseId = leaseId; +// Licensed under the MIT license. +function arraysEqual(a, b) { + if (a === b) + return true; + // eslint-disable-next-line eqeqeq + if (a == null || b == null) + return false; + if (a.length !== b.length) + return false; + for (let i = 0; i < a.length; ++i) { + if (a[i] !== b[i]) + return false; } - /** - * Gets the lease Id. - * - * @readonly - */ - get leaseId() { - return this._leaseId; + return true; +} + +// Copyright (c) Microsoft Corporation. +class AvroReader { + constructor(dataStream, headerStream, currentBlockOffset, indexWithinCurrentBlock) { + this._dataStream = dataStream; + this._headerStream = headerStream || dataStream; + this._initialized = false; + this._blockOffset = currentBlockOffset || 0; + this._objectIndex = indexWithinCurrentBlock || 0; + this._initialBlockOffset = currentBlockOffset || 0; + } + get blockOffset() { + return this._blockOffset; } - /** - * Gets the url. - * - * @readonly - */ - get url() { - return this._url; + get objectIndex() { + return this._objectIndex; } - /** - * Establishes and manages a lock on a container for delete operations, or on a blob - * for write and delete operations. - * The lock duration can be 15 to 60 seconds, or can be infinite. - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/lease-container - * and - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/lease-blob - * - * @param duration - Must be between 15 to 60 seconds, or infinite (-1) - * @param options - option to configure lease management operations. - * @returns Response data for acquire lease operation. - */ - async acquireLease(duration, options = {}) { - var _a, _b, _c, _d, _e, _f; - const { span, updatedOptions } = createSpan("BlobLeaseClient-acquireLease", options); - if (this._isContainer && - ((((_a = options.conditions) === null || _a === void 0 ? void 0 : _a.ifMatch) && ((_b = options.conditions) === null || _b === void 0 ? void 0 : _b.ifMatch) !== ETagNone) || - (((_c = options.conditions) === null || _c === void 0 ? void 0 : _c.ifNoneMatch) && ((_d = options.conditions) === null || _d === void 0 ? void 0 : _d.ifNoneMatch) !== ETagNone) || - ((_e = options.conditions) === null || _e === void 0 ? void 0 : _e.tagConditions))) { - throw new RangeError("The IfMatch, IfNoneMatch and tags access conditions are ignored by the service. Values other than undefined or their default values are not acceptable."); + async initialize(options = {}) { + const header = await AvroParser.readFixedBytes(this._headerStream, AVRO_INIT_BYTES.length, { + abortSignal: options.abortSignal, + }); + if (!arraysEqual(header, AVRO_INIT_BYTES)) { + throw new Error("Stream is not an Avro file."); } - try { - return await this._containerOrBlobOperation.acquireLease(Object.assign({ abortSignal: options.abortSignal, duration, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_f = options.conditions) === null || _f === void 0 ? void 0 : _f.tagConditions }), proposedLeaseId: this._leaseId }, convertTracingToRequestOptionsBase(updatedOptions))); + // File metadata is written as if defined by the following map schema: + // { "type": "map", "values": "bytes"} + this._metadata = await AvroParser.readMap(this._headerStream, AvroParser.readString, { + abortSignal: options.abortSignal, + }); + // Validate codec + const codec = this._metadata[AVRO_CODEC_KEY]; + if (!(codec === undefined || codec === null || codec === "null")) { + throw new Error("Codecs are not supported"); } - catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; + // The 16-byte, randomly-generated sync marker for this file. + this._syncMarker = await AvroParser.readFixedBytes(this._headerStream, AVRO_SYNC_MARKER_SIZE, { + abortSignal: options.abortSignal, + }); + // Parse the schema + const schema = JSON.parse(this._metadata[AVRO_SCHEMA_KEY]); + this._itemType = AvroType.fromSchema(schema); + if (this._blockOffset === 0) { + this._blockOffset = this._initialBlockOffset + this._dataStream.position; } - finally { - span.end(); + this._itemsRemainingInBlock = await AvroParser.readLong(this._dataStream, { + abortSignal: options.abortSignal, + }); + // skip block length + await AvroParser.readLong(this._dataStream, { abortSignal: options.abortSignal }); + this._initialized = true; + if (this._objectIndex && this._objectIndex > 0) { + for (let i = 0; i < this._objectIndex; i++) { + await this._itemType.read(this._dataStream, { abortSignal: options.abortSignal }); + this._itemsRemainingInBlock--; + } } } - /** - * To change the ID of the lease. - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/lease-container - * and - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/lease-blob - * - * @param proposedLeaseId - the proposed new lease Id. - * @param options - option to configure lease management operations. - * @returns Response data for change lease operation. - */ - async changeLease(proposedLeaseId, options = {}) { - var _a, _b, _c, _d, _e, _f; - const { span, updatedOptions } = createSpan("BlobLeaseClient-changeLease", options); - if (this._isContainer && - ((((_a = options.conditions) === null || _a === void 0 ? void 0 : _a.ifMatch) && ((_b = options.conditions) === null || _b === void 0 ? void 0 : _b.ifMatch) !== ETagNone) || - (((_c = options.conditions) === null || _c === void 0 ? void 0 : _c.ifNoneMatch) && ((_d = options.conditions) === null || _d === void 0 ? void 0 : _d.ifNoneMatch) !== ETagNone) || - ((_e = options.conditions) === null || _e === void 0 ? void 0 : _e.tagConditions))) { - throw new RangeError("The IfMatch, IfNoneMatch and tags access conditions are ignored by the service. Values other than undefined or their default values are not acceptable."); - } - try { - const response = await this._containerOrBlobOperation.changeLease(this._leaseId, proposedLeaseId, Object.assign({ abortSignal: options.abortSignal, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_f = options.conditions) === null || _f === void 0 ? void 0 : _f.tagConditions }) }, convertTracingToRequestOptionsBase(updatedOptions))); - this._leaseId = proposedLeaseId; - return response; - } - catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; - } - finally { - span.end(); - } + hasNext() { + return !this._initialized || this._itemsRemainingInBlock > 0; } - /** - * To free the lease if it is no longer needed so that another client may - * immediately acquire a lease against the container or the blob. - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/lease-container - * and - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/lease-blob - * - * @param options - option to configure lease management operations. - * @returns Response data for release lease operation. - */ - async releaseLease(options = {}) { - var _a, _b, _c, _d, _e, _f; - const { span, updatedOptions } = createSpan("BlobLeaseClient-releaseLease", options); - if (this._isContainer && - ((((_a = options.conditions) === null || _a === void 0 ? void 0 : _a.ifMatch) && ((_b = options.conditions) === null || _b === void 0 ? void 0 : _b.ifMatch) !== ETagNone) || - (((_c = options.conditions) === null || _c === void 0 ? void 0 : _c.ifNoneMatch) && ((_d = options.conditions) === null || _d === void 0 ? void 0 : _d.ifNoneMatch) !== ETagNone) || - ((_e = options.conditions) === null || _e === void 0 ? void 0 : _e.tagConditions))) { - throw new RangeError("The IfMatch, IfNoneMatch and tags access conditions are ignored by the service. Values other than undefined or their default values are not acceptable."); - } - try { - return await this._containerOrBlobOperation.releaseLease(this._leaseId, Object.assign({ abortSignal: options.abortSignal, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_f = options.conditions) === null || _f === void 0 ? void 0 : _f.tagConditions }) }, convertTracingToRequestOptionsBase(updatedOptions))); - } - catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; - } - finally { - span.end(); - } + parseObjects(options = {}) { + return tslib.__asyncGenerator(this, arguments, function* parseObjects_1() { + if (!this._initialized) { + yield tslib.__await(this.initialize(options)); + } + while (this.hasNext()) { + const result = yield tslib.__await(this._itemType.read(this._dataStream, { + abortSignal: options.abortSignal, + })); + this._itemsRemainingInBlock--; + this._objectIndex++; + if (this._itemsRemainingInBlock === 0) { + const marker = yield tslib.__await(AvroParser.readFixedBytes(this._dataStream, AVRO_SYNC_MARKER_SIZE, { + abortSignal: options.abortSignal, + })); + this._blockOffset = this._initialBlockOffset + this._dataStream.position; + this._objectIndex = 0; + if (!arraysEqual(this._syncMarker, marker)) { + throw new Error("Stream is not a valid Avro file."); + } + try { + this._itemsRemainingInBlock = yield tslib.__await(AvroParser.readLong(this._dataStream, { + abortSignal: options.abortSignal, + })); + } + catch (err) { + // We hit the end of the stream. + this._itemsRemainingInBlock = 0; + } + if (this._itemsRemainingInBlock > 0) { + // Ignore block size + yield tslib.__await(AvroParser.readLong(this._dataStream, { abortSignal: options.abortSignal })); + } + } + yield yield tslib.__await(result); + } + }); } - /** - * To renew the lease. - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/lease-container - * and - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/lease-blob - * - * @param options - Optional option to configure lease management operations. - * @returns Response data for renew lease operation. - */ - async renewLease(options = {}) { - var _a, _b, _c, _d, _e, _f; - const { span, updatedOptions } = createSpan("BlobLeaseClient-renewLease", options); - if (this._isContainer && - ((((_a = options.conditions) === null || _a === void 0 ? void 0 : _a.ifMatch) && ((_b = options.conditions) === null || _b === void 0 ? void 0 : _b.ifMatch) !== ETagNone) || - (((_c = options.conditions) === null || _c === void 0 ? void 0 : _c.ifNoneMatch) && ((_d = options.conditions) === null || _d === void 0 ? void 0 : _d.ifNoneMatch) !== ETagNone) || - ((_e = options.conditions) === null || _e === void 0 ? void 0 : _e.tagConditions))) { - throw new RangeError("The IfMatch, IfNoneMatch and tags access conditions are ignored by the service. Values other than undefined or their default values are not acceptable."); +} + +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +class AvroReadable { +} + +// Copyright (c) Microsoft Corporation. +const ABORT_ERROR = new abortController.AbortError("Reading from the avro stream was aborted."); +class AvroReadableFromStream extends AvroReadable { + constructor(readable) { + super(); + this._readable = readable; + this._position = 0; + } + toUint8Array(data) { + if (typeof data === "string") { + return Buffer.from(data); } - try { - return await this._containerOrBlobOperation.renewLease(this._leaseId, Object.assign({ abortSignal: options.abortSignal, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_f = options.conditions) === null || _f === void 0 ? void 0 : _f.tagConditions }) }, convertTracingToRequestOptionsBase(updatedOptions))); + return data; + } + get position() { + return this._position; + } + async read(size, options = {}) { + var _a; + if ((_a = options.abortSignal) === null || _a === void 0 ? void 0 : _a.aborted) { + throw ABORT_ERROR; } - catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; + if (size < 0) { + throw new Error(`size parameter should be positive: ${size}`); } - finally { - span.end(); + if (size === 0) { + return new Uint8Array(); } - } - /** - * To end the lease but ensure that another client cannot acquire a new lease - * until the current lease period has expired. - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/lease-container - * and - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/lease-blob - * - * @param breakPeriod - Break period - * @param options - Optional options to configure lease management operations. - * @returns Response data for break lease operation. - */ - async breakLease(breakPeriod, options = {}) { - var _a, _b, _c, _d, _e, _f; - const { span, updatedOptions } = createSpan("BlobLeaseClient-breakLease", options); - if (this._isContainer && - ((((_a = options.conditions) === null || _a === void 0 ? void 0 : _a.ifMatch) && ((_b = options.conditions) === null || _b === void 0 ? void 0 : _b.ifMatch) !== ETagNone) || - (((_c = options.conditions) === null || _c === void 0 ? void 0 : _c.ifNoneMatch) && ((_d = options.conditions) === null || _d === void 0 ? void 0 : _d.ifNoneMatch) !== ETagNone) || - ((_e = options.conditions) === null || _e === void 0 ? void 0 : _e.tagConditions))) { - throw new RangeError("The IfMatch, IfNoneMatch and tags access conditions are ignored by the service. Values other than undefined or their default values are not acceptable."); + if (!this._readable.readable) { + throw new Error("Stream no longer readable."); } - try { - const operationOptions = Object.assign({ abortSignal: options.abortSignal, breakPeriod, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_f = options.conditions) === null || _f === void 0 ? void 0 : _f.tagConditions }) }, convertTracingToRequestOptionsBase(updatedOptions)); - return await this._containerOrBlobOperation.breakLease(operationOptions); + // See if there is already enough data. + const chunk = this._readable.read(size); + if (chunk) { + this._position += chunk.length; + // chunk.length maybe less than desired size if the stream ends. + return this.toUint8Array(chunk); } - catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, + else { + // register callback to wait for enough data to read + return new Promise((resolve, reject) => { + /* eslint-disable @typescript-eslint/no-use-before-define */ + const cleanUp = () => { + this._readable.removeListener("readable", readableCallback); + this._readable.removeListener("error", rejectCallback); + this._readable.removeListener("end", rejectCallback); + this._readable.removeListener("close", rejectCallback); + if (options.abortSignal) { + options.abortSignal.removeEventListener("abort", abortHandler); + } + }; + const readableCallback = () => { + const callbackChunk = this._readable.read(size); + if (callbackChunk) { + this._position += callbackChunk.length; + cleanUp(); + // callbackChunk.length maybe less than desired size if the stream ends. + resolve(this.toUint8Array(callbackChunk)); + } + }; + const rejectCallback = () => { + cleanUp(); + reject(); + }; + const abortHandler = () => { + cleanUp(); + reject(ABORT_ERROR); + }; + this._readable.on("readable", readableCallback); + this._readable.once("error", rejectCallback); + this._readable.once("end", rejectCallback); + this._readable.once("close", rejectCallback); + if (options.abortSignal) { + options.abortSignal.addEventListener("abort", abortHandler); + } + /* eslint-enable @typescript-eslint/no-use-before-define */ }); - throw e; - } - finally { - span.end(); } } } @@ -39188,108 +37789,107 @@ class BlobLeaseClient { /** * ONLY AVAILABLE IN NODE.JS RUNTIME. * - * A Node.js ReadableStream will internally retry when internal ReadableStream unexpected ends. + * A Node.js BlobQuickQueryStream will internally parse avro data stream for blob query. */ -class RetriableReadableStream extends stream.Readable { +class BlobQuickQueryStream extends stream.Readable { /** - * Creates an instance of RetriableReadableStream. + * Creates an instance of BlobQuickQueryStream. * * @param source - The current ReadableStream returned from getter - * @param getter - A method calling downloading request returning - * a new ReadableStream from specified offset - * @param offset - Offset position in original data source to read - * @param count - How much data in original data source to read * @param options - */ - constructor(source, getter, offset, count, options = {}) { - super({ highWaterMark: options.highWaterMark }); - this.retries = 0; - this.sourceDataHandler = (data) => { - if (this.options.doInjectErrorOnce) { - this.options.doInjectErrorOnce = undefined; - this.source.pause(); - this.source.removeAllListeners("data"); - this.source.emit("end"); - return; - } - // console.log( - // `Offset: ${this.offset}, Received ${data.length} from internal stream` - // ); - this.offset += data.length; - if (this.onProgress) { - this.onProgress({ loadedBytes: this.offset - this.start }); - } - if (!this.push(data)) { - this.source.pause(); - } - }; - this.sourceErrorOrEndHandler = (err) => { - if (err && err.name === "AbortError") { - this.destroy(err); - return; - } - // console.log( - // `Source stream emits end or error, offset: ${ - // this.offset - // }, dest end : ${this.end}` - // ); - this.removeSourceEventHandlers(); - if (this.offset - 1 === this.end) { - this.push(null); - } - else if (this.offset <= this.end) { - // console.log( - // `retries: ${this.retries}, max retries: ${this.maxRetries}` - // ); - if (this.retries < this.maxRetryRequests) { - this.retries += 1; - this.getter(this.offset) - .then((newSource) => { - this.source = newSource; - this.setSourceEventHandlers(); - return; - }) - .catch((error) => { - this.destroy(error); - }); - } - else { - this.destroy(new Error(`Data corruption failure: received less data than required and reached maxRetires limitation. Received data offset: ${this.offset - 1}, data needed offset: ${this.end}, retries: ${this.retries}, max retries: ${this.maxRetryRequests}`)); - } - } - else { - this.destroy(new Error(`Data corruption failure: Received more data than original request, data needed offset is ${this.end}, received offset: ${this.offset - 1}`)); - } - }; - this.getter = getter; + constructor(source, options = {}) { + super(); + this.avroPaused = true; this.source = source; - this.start = offset; - this.offset = offset; - this.end = offset + count - 1; - this.maxRetryRequests = - options.maxRetryRequests && options.maxRetryRequests >= 0 ? options.maxRetryRequests : 0; this.onProgress = options.onProgress; - this.options = options; - this.setSourceEventHandlers(); + this.onError = options.onError; + this.avroReader = new AvroReader(new AvroReadableFromStream(this.source)); + this.avroIter = this.avroReader.parseObjects({ abortSignal: options.abortSignal }); } _read() { - this.source.resume(); - } - setSourceEventHandlers() { - this.source.on("data", this.sourceDataHandler); - this.source.on("end", this.sourceErrorOrEndHandler); - this.source.on("error", this.sourceErrorOrEndHandler); - } - removeSourceEventHandlers() { - this.source.removeListener("data", this.sourceDataHandler); - this.source.removeListener("end", this.sourceErrorOrEndHandler); - this.source.removeListener("error", this.sourceErrorOrEndHandler); + if (this.avroPaused) { + this.readInternal().catch((err) => { + this.emit("error", err); + }); + } } - _destroy(error, callback) { - // remove listener from source and release source - this.removeSourceEventHandlers(); - this.source.destroy(); - callback(error === null ? undefined : error); + async readInternal() { + this.avroPaused = false; + let avroNext; + do { + avroNext = await this.avroIter.next(); + if (avroNext.done) { + break; + } + const obj = avroNext.value; + const schema = obj.$schema; + if (typeof schema !== "string") { + throw Error("Missing schema in avro record."); + } + switch (schema) { + case "com.microsoft.azure.storage.queryBlobContents.resultData": + { + const data = obj.data; + if (data instanceof Uint8Array === false) { + throw Error("Invalid data in avro result record."); + } + if (!this.push(Buffer.from(data))) { + this.avroPaused = true; + } + } + break; + case "com.microsoft.azure.storage.queryBlobContents.progress": + { + const bytesScanned = obj.bytesScanned; + if (typeof bytesScanned !== "number") { + throw Error("Invalid bytesScanned in avro progress record."); + } + if (this.onProgress) { + this.onProgress({ loadedBytes: bytesScanned }); + } + } + break; + case "com.microsoft.azure.storage.queryBlobContents.end": + if (this.onProgress) { + const totalBytes = obj.totalBytes; + if (typeof totalBytes !== "number") { + throw Error("Invalid totalBytes in avro end record."); + } + this.onProgress({ loadedBytes: totalBytes }); + } + this.push(null); + break; + case "com.microsoft.azure.storage.queryBlobContents.error": + if (this.onError) { + const fatal = obj.fatal; + if (typeof fatal !== "boolean") { + throw Error("Invalid fatal in avro error record."); + } + const name = obj.name; + if (typeof name !== "string") { + throw Error("Invalid name in avro error record."); + } + const description = obj.description; + if (typeof description !== "string") { + throw Error("Invalid description in avro error record."); + } + const position = obj.position; + if (typeof position !== "number") { + throw Error("Invalid position in avro error record."); + } + this.onError({ + position, + name, + isFatal: fatal, + description, + }); + } + break; + default: + throw Error(`Unknown schema ${schema} in avro progress record.`); + } + } while (!avroNext.done && !this.avroPaused); } } @@ -39297,26 +37897,19 @@ class RetriableReadableStream extends stream.Readable { /** * ONLY AVAILABLE IN NODE.JS RUNTIME. * - * BlobDownloadResponse implements BlobDownloadResponseParsed interface, and in Node.js runtime it will - * automatically retry when internal read stream unexpected ends. (This kind of unexpected ends cannot - * trigger retries defined in pipeline retry policy.) - * - * The {@link readableStreamBody} stream will retry underlayer, you can just use it as a normal Node.js - * Readable stream. + * BlobQueryResponse implements BlobDownloadResponseModel interface, and in Node.js runtime it will + * parse avor data returned by blob query. */ -class BlobDownloadResponse { +class BlobQueryResponse { /** - * Creates an instance of BlobDownloadResponse. + * Creates an instance of BlobQueryResponse. * * @param originalResponse - - * @param getter - - * @param offset - - * @param count - * @param options - */ - constructor(originalResponse, getter, offset, count, options = {}) { + constructor(originalResponse, options = {}) { this.originalResponse = originalResponse; - this.blobDownloadStream = new RetriableReadableStream(this.originalResponse.readableStreamBody, getter, offset, count, options); + this.blobDownloadStream = new BlobQuickQueryStream(this.originalResponse.readableStreamBody, options); } /** * Indicates that the service supports @@ -39433,7 +38026,7 @@ class BlobDownloadResponse { * @readonly */ get copyCompletedOn() { - return this.originalResponse.copyCompletedOn; + return undefined; } /** * String identifier for the last attempted Copy @@ -39477,2194 +38070,2424 @@ class BlobDownloadResponse { } /** * Only appears when - * x-ms-copy-status is failed or pending. Describes cause of fatal or - * non-fatal copy operation failure. - * - * @readonly - */ - get copyStatusDescription() { - return this.originalResponse.copyStatusDescription; - } - /** - * When a blob is leased, - * specifies whether the lease is of infinite or fixed duration. Possible - * values include: 'infinite', 'fixed'. - * - * @readonly - */ - get leaseDuration() { - return this.originalResponse.leaseDuration; - } - /** - * Lease state of the blob. Possible - * values include: 'available', 'leased', 'expired', 'breaking', 'broken'. - * - * @readonly - */ - get leaseState() { - return this.originalResponse.leaseState; - } - /** - * The current lease status of the - * blob. Possible values include: 'locked', 'unlocked'. - * - * @readonly - */ - get leaseStatus() { - return this.originalResponse.leaseStatus; - } - /** - * A UTC date/time value generated by the service that - * indicates the time at which the response was initiated. - * - * @readonly - */ - get date() { - return this.originalResponse.date; - } - /** - * The number of committed blocks - * present in the blob. This header is returned only for append blobs. - * - * @readonly - */ - get blobCommittedBlockCount() { - return this.originalResponse.blobCommittedBlockCount; - } - /** - * The ETag contains a value that you can use to - * perform operations conditionally, in quotes. - * - * @readonly - */ - get etag() { - return this.originalResponse.etag; - } - /** - * The number of tags associated with the blob - * - * @readonly - */ - get tagCount() { - return this.originalResponse.tagCount; - } - /** - * The error code. - * - * @readonly - */ - get errorCode() { - return this.originalResponse.errorCode; - } - /** - * The value of this header is set to - * true if the file data and application metadata are completely encrypted - * using the specified algorithm. Otherwise, the value is set to false (when - * the file is unencrypted, or if only parts of the file/application metadata - * are encrypted). - * - * @readonly - */ - get isServerEncrypted() { - return this.originalResponse.isServerEncrypted; - } - /** - * If the blob has a MD5 hash, and if - * request contains range header (Range or x-ms-range), this response header - * is returned with the value of the whole blob's MD5 value. This value may - * or may not be equal to the value returned in Content-MD5 header, with the - * latter calculated from the requested range. - * - * @readonly - */ - get blobContentMD5() { - return this.originalResponse.blobContentMD5; - } - /** - * Returns the date and time the file was last - * modified. Any operation that modifies the file or its properties updates - * the last modified time. - * - * @readonly - */ - get lastModified() { - return this.originalResponse.lastModified; - } - /** - * Returns the UTC date and time generated by the service that indicates the time at which the blob was - * last read or written to. - * - * @readonly - */ - get lastAccessed() { - return this.originalResponse.lastAccessed; - } - /** - * A name-value pair - * to associate with a file storage object. - * - * @readonly - */ - get metadata() { - return this.originalResponse.metadata; - } - /** - * This header uniquely identifies the request - * that was made and can be used for troubleshooting the request. - * - * @readonly - */ - get requestId() { - return this.originalResponse.requestId; - } - /** - * If a client request id header is sent in the request, this header will be present in the - * response with the same value. - * - * @readonly - */ - get clientRequestId() { - return this.originalResponse.clientRequestId; - } - /** - * Indicates the version of the Blob service used - * to execute the request. - * - * @readonly - */ - get version() { - return this.originalResponse.version; - } - /** - * Indicates the versionId of the downloaded blob version. - * - * @readonly - */ - get versionId() { - return this.originalResponse.versionId; - } - /** - * Indicates whether version of this blob is a current version. - * - * @readonly - */ - get isCurrentVersion() { - return this.originalResponse.isCurrentVersion; - } - /** - * The SHA-256 hash of the encryption key used to encrypt the blob. This value is only returned - * when the blob was encrypted with a customer-provided key. - * - * @readonly - */ - get encryptionKeySha256() { - return this.originalResponse.encryptionKeySha256; - } - /** - * If the request is to read a specified range and the x-ms-range-get-content-crc64 is set to - * true, then the request returns a crc64 for the range, as long as the range size is less than - * or equal to 4 MB. If both x-ms-range-get-content-crc64 & x-ms-range-get-content-md5 is - * specified in the same request, it will fail with 400(Bad Request) - */ - get contentCrc64() { - return this.originalResponse.contentCrc64; - } - /** - * Object Replication Policy Id of the destination blob. - * - * @readonly - */ - get objectReplicationDestinationPolicyId() { - return this.originalResponse.objectReplicationDestinationPolicyId; - } - /** - * Parsed Object Replication Policy Id, Rule Id(s) and status of the source blob. - * - * @readonly - */ - get objectReplicationSourceProperties() { - return this.originalResponse.objectReplicationSourceProperties; - } - /** - * If this blob has been sealed. - * - * @readonly - */ - get isSealed() { - return this.originalResponse.isSealed; - } - /** - * UTC date/time value generated by the service that indicates the time at which the blob immutability policy will expire. - * - * @readonly - */ - get immutabilityPolicyExpiresOn() { - return this.originalResponse.immutabilityPolicyExpiresOn; - } - /** - * Indicates immutability policy mode. - * - * @readonly - */ - get immutabilityPolicyMode() { - return this.originalResponse.immutabilityPolicyMode; - } - /** - * Indicates if a legal hold is present on the blob. - * - * @readonly - */ - get legalHold() { - return this.originalResponse.legalHold; - } - /** - * The response body as a browser Blob. - * Always undefined in node.js. - * - * @readonly - */ - get contentAsBlob() { - return this.originalResponse.blobBody; - } - /** - * The response body as a node.js Readable stream. - * Always undefined in the browser. - * - * It will automatically retry when internal read stream unexpected ends. - * - * @readonly - */ - get readableStreamBody() { - return coreHttp.isNode ? this.blobDownloadStream : undefined; - } - /** - * The HTTP response. - */ - get _response() { - return this.originalResponse._response; - } -} - -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. -const AVRO_SYNC_MARKER_SIZE = 16; -const AVRO_INIT_BYTES = new Uint8Array([79, 98, 106, 1]); -const AVRO_CODEC_KEY = "avro.codec"; -const AVRO_SCHEMA_KEY = "avro.schema"; - -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. -class AvroParser { - /** - * Reads a fixed number of bytes from the stream. - * - * @param stream - - * @param length - - * @param options - - */ - static async readFixedBytes(stream, length, options = {}) { - const bytes = await stream.read(length, { abortSignal: options.abortSignal }); - if (bytes.length !== length) { - throw new Error("Hit stream end."); - } - return bytes; - } - /** - * Reads a single byte from the stream. - * - * @param stream - - * @param options - - */ - static async readByte(stream, options = {}) { - const buf = await AvroParser.readFixedBytes(stream, 1, options); - return buf[0]; - } - // int and long are stored in variable-length zig-zag coding. - // variable-length: https://lucene.apache.org/core/3_5_0/fileformats.html#VInt - // zig-zag: https://developers.google.com/protocol-buffers/docs/encoding?csw=1#types - static async readZigZagLong(stream, options = {}) { - let zigZagEncoded = 0; - let significanceInBit = 0; - let byte, haveMoreByte, significanceInFloat; - do { - byte = await AvroParser.readByte(stream, options); - haveMoreByte = byte & 0x80; - zigZagEncoded |= (byte & 0x7f) << significanceInBit; - significanceInBit += 7; - } while (haveMoreByte && significanceInBit < 28); // bitwise operation only works for 32-bit integers - if (haveMoreByte) { - // Switch to float arithmetic - // eslint-disable-next-line no-self-assign - zigZagEncoded = zigZagEncoded; - significanceInFloat = 268435456; // 2 ** 28. - do { - byte = await AvroParser.readByte(stream, options); - zigZagEncoded += (byte & 0x7f) * significanceInFloat; - significanceInFloat *= 128; // 2 ** 7 - } while (byte & 0x80); - const res = (zigZagEncoded % 2 ? -(zigZagEncoded + 1) : zigZagEncoded) / 2; - if (res < Number.MIN_SAFE_INTEGER || res > Number.MAX_SAFE_INTEGER) { - throw new Error("Integer overflow."); - } - return res; - } - return (zigZagEncoded >> 1) ^ -(zigZagEncoded & 1); - } - static async readLong(stream, options = {}) { - return AvroParser.readZigZagLong(stream, options); - } - static async readInt(stream, options = {}) { - return AvroParser.readZigZagLong(stream, options); - } - static async readNull() { - return null; - } - static async readBoolean(stream, options = {}) { - const b = await AvroParser.readByte(stream, options); - if (b === 1) { - return true; - } - else if (b === 0) { - return false; - } - else { - throw new Error("Byte was not a boolean."); - } - } - static async readFloat(stream, options = {}) { - const u8arr = await AvroParser.readFixedBytes(stream, 4, options); - const view = new DataView(u8arr.buffer, u8arr.byteOffset, u8arr.byteLength); - return view.getFloat32(0, true); // littleEndian = true - } - static async readDouble(stream, options = {}) { - const u8arr = await AvroParser.readFixedBytes(stream, 8, options); - const view = new DataView(u8arr.buffer, u8arr.byteOffset, u8arr.byteLength); - return view.getFloat64(0, true); // littleEndian = true - } - static async readBytes(stream, options = {}) { - const size = await AvroParser.readLong(stream, options); - if (size < 0) { - throw new Error("Bytes size was negative."); - } - return stream.read(size, { abortSignal: options.abortSignal }); - } - static async readString(stream, options = {}) { - const u8arr = await AvroParser.readBytes(stream, options); - const utf8decoder = new TextDecoder(); - return utf8decoder.decode(u8arr); - } - static async readMapPair(stream, readItemMethod, options = {}) { - const key = await AvroParser.readString(stream, options); - // FUTURE: this won't work with readFixed (currently not supported) which needs a length as the parameter. - const value = await readItemMethod(stream, options); - return { key, value }; - } - static async readMap(stream, readItemMethod, options = {}) { - const readPairMethod = (s, opts = {}) => { - return AvroParser.readMapPair(s, readItemMethod, opts); - }; - const pairs = await AvroParser.readArray(stream, readPairMethod, options); - const dict = {}; - for (const pair of pairs) { - dict[pair.key] = pair.value; - } - return dict; - } - static async readArray(stream, readItemMethod, options = {}) { - const items = []; - for (let count = await AvroParser.readLong(stream, options); count !== 0; count = await AvroParser.readLong(stream, options)) { - if (count < 0) { - // Ignore block sizes - await AvroParser.readLong(stream, options); - count = -count; - } - while (count--) { - const item = await readItemMethod(stream, options); - items.push(item); - } - } - return items; - } -} -var AvroComplex; -(function (AvroComplex) { - AvroComplex["RECORD"] = "record"; - AvroComplex["ENUM"] = "enum"; - AvroComplex["ARRAY"] = "array"; - AvroComplex["MAP"] = "map"; - AvroComplex["UNION"] = "union"; - AvroComplex["FIXED"] = "fixed"; -})(AvroComplex || (AvroComplex = {})); -var AvroPrimitive; -(function (AvroPrimitive) { - AvroPrimitive["NULL"] = "null"; - AvroPrimitive["BOOLEAN"] = "boolean"; - AvroPrimitive["INT"] = "int"; - AvroPrimitive["LONG"] = "long"; - AvroPrimitive["FLOAT"] = "float"; - AvroPrimitive["DOUBLE"] = "double"; - AvroPrimitive["BYTES"] = "bytes"; - AvroPrimitive["STRING"] = "string"; -})(AvroPrimitive || (AvroPrimitive = {})); -class AvroType { - /** - * Determines the AvroType from the Avro Schema. - */ - static fromSchema(schema) { - if (typeof schema === "string") { - return AvroType.fromStringSchema(schema); - } - else if (Array.isArray(schema)) { - return AvroType.fromArraySchema(schema); - } - else { - return AvroType.fromObjectSchema(schema); - } - } - static fromStringSchema(schema) { - switch (schema) { - case AvroPrimitive.NULL: - case AvroPrimitive.BOOLEAN: - case AvroPrimitive.INT: - case AvroPrimitive.LONG: - case AvroPrimitive.FLOAT: - case AvroPrimitive.DOUBLE: - case AvroPrimitive.BYTES: - case AvroPrimitive.STRING: - return new AvroPrimitiveType(schema); - default: - throw new Error(`Unexpected Avro type ${schema}`); - } - } - static fromArraySchema(schema) { - return new AvroUnionType(schema.map(AvroType.fromSchema)); - } - static fromObjectSchema(schema) { - const type = schema.type; - // Primitives can be defined as strings or objects - try { - return AvroType.fromStringSchema(type); - } - catch (err) { - // eslint-disable-line no-empty - } - switch (type) { - case AvroComplex.RECORD: - if (schema.aliases) { - throw new Error(`aliases currently is not supported, schema: ${schema}`); - } - if (!schema.name) { - throw new Error(`Required attribute 'name' doesn't exist on schema: ${schema}`); - } - // eslint-disable-next-line no-case-declarations - const fields = {}; - if (!schema.fields) { - throw new Error(`Required attribute 'fields' doesn't exist on schema: ${schema}`); - } - for (const field of schema.fields) { - fields[field.name] = AvroType.fromSchema(field.type); - } - return new AvroRecordType(fields, schema.name); - case AvroComplex.ENUM: - if (schema.aliases) { - throw new Error(`aliases currently is not supported, schema: ${schema}`); - } - if (!schema.symbols) { - throw new Error(`Required attribute 'symbols' doesn't exist on schema: ${schema}`); - } - return new AvroEnumType(schema.symbols); - case AvroComplex.MAP: - if (!schema.values) { - throw new Error(`Required attribute 'values' doesn't exist on schema: ${schema}`); - } - return new AvroMapType(AvroType.fromSchema(schema.values)); - case AvroComplex.ARRAY: // Unused today - case AvroComplex.FIXED: // Unused today - default: - throw new Error(`Unexpected Avro type ${type} in ${schema}`); - } + * x-ms-copy-status is failed or pending. Describes cause of fatal or + * non-fatal copy operation failure. + * + * @readonly + */ + get copyStatusDescription() { + return this.originalResponse.copyStatusDescription; } -} -class AvroPrimitiveType extends AvroType { - constructor(primitive) { - super(); - this._primitive = primitive; + /** + * When a blob is leased, + * specifies whether the lease is of infinite or fixed duration. Possible + * values include: 'infinite', 'fixed'. + * + * @readonly + */ + get leaseDuration() { + return this.originalResponse.leaseDuration; } - read(stream, options = {}) { - switch (this._primitive) { - case AvroPrimitive.NULL: - return AvroParser.readNull(); - case AvroPrimitive.BOOLEAN: - return AvroParser.readBoolean(stream, options); - case AvroPrimitive.INT: - return AvroParser.readInt(stream, options); - case AvroPrimitive.LONG: - return AvroParser.readLong(stream, options); - case AvroPrimitive.FLOAT: - return AvroParser.readFloat(stream, options); - case AvroPrimitive.DOUBLE: - return AvroParser.readDouble(stream, options); - case AvroPrimitive.BYTES: - return AvroParser.readBytes(stream, options); - case AvroPrimitive.STRING: - return AvroParser.readString(stream, options); - default: - throw new Error("Unknown Avro Primitive"); - } + /** + * Lease state of the blob. Possible + * values include: 'available', 'leased', 'expired', 'breaking', 'broken'. + * + * @readonly + */ + get leaseState() { + return this.originalResponse.leaseState; } -} -class AvroEnumType extends AvroType { - constructor(symbols) { - super(); - this._symbols = symbols; + /** + * The current lease status of the + * blob. Possible values include: 'locked', 'unlocked'. + * + * @readonly + */ + get leaseStatus() { + return this.originalResponse.leaseStatus; } - async read(stream, options = {}) { - const value = await AvroParser.readInt(stream, options); - return this._symbols[value]; + /** + * A UTC date/time value generated by the service that + * indicates the time at which the response was initiated. + * + * @readonly + */ + get date() { + return this.originalResponse.date; } -} -class AvroUnionType extends AvroType { - constructor(types) { - super(); - this._types = types; + /** + * The number of committed blocks + * present in the blob. This header is returned only for append blobs. + * + * @readonly + */ + get blobCommittedBlockCount() { + return this.originalResponse.blobCommittedBlockCount; } - async read(stream, options = {}) { - const typeIndex = await AvroParser.readInt(stream, options); - return this._types[typeIndex].read(stream, options); + /** + * The ETag contains a value that you can use to + * perform operations conditionally, in quotes. + * + * @readonly + */ + get etag() { + return this.originalResponse.etag; } -} -class AvroMapType extends AvroType { - constructor(itemType) { - super(); - this._itemType = itemType; + /** + * The error code. + * + * @readonly + */ + get errorCode() { + return this.originalResponse.errorCode; } - read(stream, options = {}) { - const readItemMethod = (s, opts) => { - return this._itemType.read(s, opts); - }; - return AvroParser.readMap(stream, readItemMethod, options); + /** + * The value of this header is set to + * true if the file data and application metadata are completely encrypted + * using the specified algorithm. Otherwise, the value is set to false (when + * the file is unencrypted, or if only parts of the file/application metadata + * are encrypted). + * + * @readonly + */ + get isServerEncrypted() { + return this.originalResponse.isServerEncrypted; } -} -class AvroRecordType extends AvroType { - constructor(fields, name) { - super(); - this._fields = fields; - this._name = name; + /** + * If the blob has a MD5 hash, and if + * request contains range header (Range or x-ms-range), this response header + * is returned with the value of the whole blob's MD5 value. This value may + * or may not be equal to the value returned in Content-MD5 header, with the + * latter calculated from the requested range. + * + * @readonly + */ + get blobContentMD5() { + return this.originalResponse.blobContentMD5; } - async read(stream, options = {}) { - const record = {}; - record["$schema"] = this._name; - for (const key in this._fields) { - if (Object.prototype.hasOwnProperty.call(this._fields, key)) { - record[key] = await this._fields[key].read(stream, options); - } - } - return record; + /** + * Returns the date and time the file was last + * modified. Any operation that modifies the file or its properties updates + * the last modified time. + * + * @readonly + */ + get lastModified() { + return this.originalResponse.lastModified; } -} - -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. -function arraysEqual(a, b) { - if (a === b) - return true; - // eslint-disable-next-line eqeqeq - if (a == null || b == null) - return false; - if (a.length !== b.length) - return false; - for (let i = 0; i < a.length; ++i) { - if (a[i] !== b[i]) - return false; + /** + * A name-value pair + * to associate with a file storage object. + * + * @readonly + */ + get metadata() { + return this.originalResponse.metadata; } - return true; -} - -// Copyright (c) Microsoft Corporation. -class AvroReader { - constructor(dataStream, headerStream, currentBlockOffset, indexWithinCurrentBlock) { - this._dataStream = dataStream; - this._headerStream = headerStream || dataStream; - this._initialized = false; - this._blockOffset = currentBlockOffset || 0; - this._objectIndex = indexWithinCurrentBlock || 0; - this._initialBlockOffset = currentBlockOffset || 0; + /** + * This header uniquely identifies the request + * that was made and can be used for troubleshooting the request. + * + * @readonly + */ + get requestId() { + return this.originalResponse.requestId; } - get blockOffset() { - return this._blockOffset; + /** + * If a client request id header is sent in the request, this header will be present in the + * response with the same value. + * + * @readonly + */ + get clientRequestId() { + return this.originalResponse.clientRequestId; } - get objectIndex() { - return this._objectIndex; + /** + * Indicates the version of the File service used + * to execute the request. + * + * @readonly + */ + get version() { + return this.originalResponse.version; } - async initialize(options = {}) { - const header = await AvroParser.readFixedBytes(this._headerStream, AVRO_INIT_BYTES.length, { - abortSignal: options.abortSignal, - }); - if (!arraysEqual(header, AVRO_INIT_BYTES)) { - throw new Error("Stream is not an Avro file."); - } - // File metadata is written as if defined by the following map schema: - // { "type": "map", "values": "bytes"} - this._metadata = await AvroParser.readMap(this._headerStream, AvroParser.readString, { - abortSignal: options.abortSignal, - }); - // Validate codec - const codec = this._metadata[AVRO_CODEC_KEY]; - if (!(codec === undefined || codec === null || codec === "null")) { - throw new Error("Codecs are not supported"); - } - // The 16-byte, randomly-generated sync marker for this file. - this._syncMarker = await AvroParser.readFixedBytes(this._headerStream, AVRO_SYNC_MARKER_SIZE, { - abortSignal: options.abortSignal, - }); - // Parse the schema - const schema = JSON.parse(this._metadata[AVRO_SCHEMA_KEY]); - this._itemType = AvroType.fromSchema(schema); - if (this._blockOffset === 0) { - this._blockOffset = this._initialBlockOffset + this._dataStream.position; - } - this._itemsRemainingInBlock = await AvroParser.readLong(this._dataStream, { - abortSignal: options.abortSignal, - }); - // skip block length - await AvroParser.readLong(this._dataStream, { abortSignal: options.abortSignal }); - this._initialized = true; - if (this._objectIndex && this._objectIndex > 0) { - for (let i = 0; i < this._objectIndex; i++) { - await this._itemType.read(this._dataStream, { abortSignal: options.abortSignal }); - this._itemsRemainingInBlock--; - } - } + /** + * The SHA-256 hash of the encryption key used to encrypt the blob. This value is only returned + * when the blob was encrypted with a customer-provided key. + * + * @readonly + */ + get encryptionKeySha256() { + return this.originalResponse.encryptionKeySha256; } - hasNext() { - return !this._initialized || this._itemsRemainingInBlock > 0; + /** + * If the request is to read a specified range and the x-ms-range-get-content-crc64 is set to + * true, then the request returns a crc64 for the range, as long as the range size is less than + * or equal to 4 MB. If both x-ms-range-get-content-crc64 & x-ms-range-get-content-md5 is + * specified in the same request, it will fail with 400(Bad Request) + */ + get contentCrc64() { + return this.originalResponse.contentCrc64; } - parseObjects(options = {}) { - return tslib.__asyncGenerator(this, arguments, function* parseObjects_1() { - if (!this._initialized) { - yield tslib.__await(this.initialize(options)); - } - while (this.hasNext()) { - const result = yield tslib.__await(this._itemType.read(this._dataStream, { - abortSignal: options.abortSignal, - })); - this._itemsRemainingInBlock--; - this._objectIndex++; - if (this._itemsRemainingInBlock === 0) { - const marker = yield tslib.__await(AvroParser.readFixedBytes(this._dataStream, AVRO_SYNC_MARKER_SIZE, { - abortSignal: options.abortSignal, - })); - this._blockOffset = this._initialBlockOffset + this._dataStream.position; - this._objectIndex = 0; - if (!arraysEqual(this._syncMarker, marker)) { - throw new Error("Stream is not a valid Avro file."); - } - try { - this._itemsRemainingInBlock = yield tslib.__await(AvroParser.readLong(this._dataStream, { - abortSignal: options.abortSignal, - })); - } - catch (err) { - // We hit the end of the stream. - this._itemsRemainingInBlock = 0; - } - if (this._itemsRemainingInBlock > 0) { - // Ignore block size - yield tslib.__await(AvroParser.readLong(this._dataStream, { abortSignal: options.abortSignal })); - } - } - yield yield tslib.__await(result); - } - }); + /** + * The response body as a browser Blob. + * Always undefined in node.js. + * + * @readonly + */ + get blobBody() { + return undefined; + } + /** + * The response body as a node.js Readable stream. + * Always undefined in the browser. + * + * It will parse avor data returned by blob query. + * + * @readonly + */ + get readableStreamBody() { + return coreHttp.isNode ? this.blobDownloadStream : undefined; + } + /** + * The HTTP response. + */ + get _response() { + return this.originalResponse._response; } } // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. -class AvroReadable { -} - -// Copyright (c) Microsoft Corporation. -const ABORT_ERROR = new abortController.AbortError("Reading from the avro stream was aborted."); -class AvroReadableFromStream extends AvroReadable { - constructor(readable) { - super(); - this._readable = readable; - this._position = 0; - } - toUint8Array(data) { - if (typeof data === "string") { - return Buffer.from(data); - } - return data; +/** + * Represents the access tier on a blob. + * For detailed information about block blob level tiering see {@link https://docs.microsoft.com/azure/storage/blobs/storage-blob-storage-tiers|Hot, cool and archive storage tiers.} + */ +exports.BlockBlobTier = void 0; +(function (BlockBlobTier) { + /** + * Optimized for storing data that is accessed frequently. + */ + BlockBlobTier["Hot"] = "Hot"; + /** + * Optimized for storing data that is infrequently accessed and stored for at least 30 days. + */ + BlockBlobTier["Cool"] = "Cool"; + /** + * Optimized for storing data that is rarely accessed and stored for at least 180 days + * with flexible latency requirements (on the order of hours). + */ + BlockBlobTier["Archive"] = "Archive"; +})(exports.BlockBlobTier || (exports.BlockBlobTier = {})); +/** + * Specifies the page blob tier to set the blob to. This is only applicable to page blobs on premium storage accounts. + * Please see {@link https://docs.microsoft.com/azure/storage/storage-premium-storage#scalability-and-performance-targets|here} + * for detailed information on the corresponding IOPS and throughput per PageBlobTier. + */ +exports.PremiumPageBlobTier = void 0; +(function (PremiumPageBlobTier) { + /** + * P4 Tier. + */ + PremiumPageBlobTier["P4"] = "P4"; + /** + * P6 Tier. + */ + PremiumPageBlobTier["P6"] = "P6"; + /** + * P10 Tier. + */ + PremiumPageBlobTier["P10"] = "P10"; + /** + * P15 Tier. + */ + PremiumPageBlobTier["P15"] = "P15"; + /** + * P20 Tier. + */ + PremiumPageBlobTier["P20"] = "P20"; + /** + * P30 Tier. + */ + PremiumPageBlobTier["P30"] = "P30"; + /** + * P40 Tier. + */ + PremiumPageBlobTier["P40"] = "P40"; + /** + * P50 Tier. + */ + PremiumPageBlobTier["P50"] = "P50"; + /** + * P60 Tier. + */ + PremiumPageBlobTier["P60"] = "P60"; + /** + * P70 Tier. + */ + PremiumPageBlobTier["P70"] = "P70"; + /** + * P80 Tier. + */ + PremiumPageBlobTier["P80"] = "P80"; +})(exports.PremiumPageBlobTier || (exports.PremiumPageBlobTier = {})); +function toAccessTier(tier) { + if (tier === undefined) { + return undefined; } - get position() { - return this._position; + return tier; // No more check if string is a valid AccessTier, and left this to underlay logic to decide(service). +} +function ensureCpkIfSpecified(cpk, isHttps) { + if (cpk && !isHttps) { + throw new RangeError("Customer-provided encryption key must be used over HTTPS."); } - async read(size, options = {}) { - var _a; - if ((_a = options.abortSignal) === null || _a === void 0 ? void 0 : _a.aborted) { - throw ABORT_ERROR; - } - if (size < 0) { - throw new Error(`size parameter should be positive: ${size}`); - } - if (size === 0) { - return new Uint8Array(); - } - if (!this._readable.readable) { - throw new Error("Stream no longer readable."); - } - // See if there is already enough data. - const chunk = this._readable.read(size); - if (chunk) { - this._position += chunk.length; - // chunk.length maybe less than desired size if the stream ends. - return this.toUint8Array(chunk); - } - else { - // register callback to wait for enough data to read - return new Promise((resolve, reject) => { - /* eslint-disable @typescript-eslint/no-use-before-define */ - const cleanUp = () => { - this._readable.removeListener("readable", readableCallback); - this._readable.removeListener("error", rejectCallback); - this._readable.removeListener("end", rejectCallback); - this._readable.removeListener("close", rejectCallback); - if (options.abortSignal) { - options.abortSignal.removeEventListener("abort", abortHandler); - } - }; - const readableCallback = () => { - const callbackChunk = this._readable.read(size); - if (callbackChunk) { - this._position += callbackChunk.length; - cleanUp(); - // callbackChunk.length maybe less than desired size if the stream ends. - resolve(this.toUint8Array(callbackChunk)); - } - }; - const rejectCallback = () => { - cleanUp(); - reject(); - }; - const abortHandler = () => { - cleanUp(); - reject(ABORT_ERROR); - }; - this._readable.on("readable", readableCallback); - this._readable.once("error", rejectCallback); - this._readable.once("end", rejectCallback); - this._readable.once("close", rejectCallback); - if (options.abortSignal) { - options.abortSignal.addEventListener("abort", abortHandler); - } - /* eslint-enable @typescript-eslint/no-use-before-define */ - }); - } + if (cpk && !cpk.encryptionAlgorithm) { + cpk.encryptionAlgorithm = EncryptionAlgorithmAES25; } } +/** + * Defines the known cloud audiences for Storage. + */ +exports.StorageBlobAudience = void 0; +(function (StorageBlobAudience) { + /** + * The OAuth scope to use to retrieve an AAD token for Azure Storage. + */ + StorageBlobAudience["StorageOAuthScopes"] = "https://storage.azure.com/.default"; + /** + * The OAuth scope to use to retrieve an AAD token for Azure Disk. + */ + StorageBlobAudience["DiskComputeOAuthScopes"] = "https://disk.compute.azure.com/.default"; +})(exports.StorageBlobAudience || (exports.StorageBlobAudience = {})); + +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +/** + * Function that converts PageRange and ClearRange to a common Range object. + * PageRange and ClearRange have start and end while Range offset and count + * this function normalizes to Range. + * @param response - Model PageBlob Range response + */ +function rangeResponseFromModel(response) { + const pageRange = (response._response.parsedBody.pageRange || []).map((x) => ({ + offset: x.start, + count: x.end - x.start, + })); + const clearRange = (response._response.parsedBody.clearRange || []).map((x) => ({ + offset: x.start, + count: x.end - x.start, + })); + return Object.assign(Object.assign({}, response), { pageRange, + clearRange, _response: Object.assign(Object.assign({}, response._response), { parsedBody: { + pageRange, + clearRange, + } }) }); +} // Copyright (c) Microsoft Corporation. /** - * ONLY AVAILABLE IN NODE.JS RUNTIME. + * This is the poller returned by {@link BlobClient.beginCopyFromURL}. + * This can not be instantiated directly outside of this package. * - * A Node.js BlobQuickQueryStream will internally parse avro data stream for blob query. + * @hidden */ -class BlobQuickQueryStream extends stream.Readable { - /** - * Creates an instance of BlobQuickQueryStream. - * - * @param source - The current ReadableStream returned from getter - * @param options - - */ - constructor(source, options = {}) { - super(); - this.avroPaused = true; - this.source = source; - this.onProgress = options.onProgress; - this.onError = options.onError; - this.avroReader = new AvroReader(new AvroReadableFromStream(this.source)); - this.avroIter = this.avroReader.parseObjects({ abortSignal: options.abortSignal }); +class BlobBeginCopyFromUrlPoller extends coreLro.Poller { + constructor(options) { + const { blobClient, copySource, intervalInMs = 15000, onProgress, resumeFrom, startCopyFromURLOptions, } = options; + let state; + if (resumeFrom) { + state = JSON.parse(resumeFrom).state; + } + const operation = makeBlobBeginCopyFromURLPollOperation(Object.assign(Object.assign({}, state), { blobClient, + copySource, + startCopyFromURLOptions })); + super(operation); + if (typeof onProgress === "function") { + this.onProgress(onProgress); + } + this.intervalInMs = intervalInMs; } - _read() { - if (this.avroPaused) { - this.readInternal().catch((err) => { - this.emit("error", err); - }); + delay() { + return coreHttp.delay(this.intervalInMs); + } +} +/** + * Note: Intentionally using function expression over arrow function expression + * so that the function can be invoked with a different context. + * This affects what `this` refers to. + * @hidden + */ +const cancel = async function cancel(options = {}) { + const state = this.state; + const { copyId } = state; + if (state.isCompleted) { + return makeBlobBeginCopyFromURLPollOperation(state); + } + if (!copyId) { + state.isCancelled = true; + return makeBlobBeginCopyFromURLPollOperation(state); + } + // if abortCopyFromURL throws, it will bubble up to user's poller.cancelOperation call + await state.blobClient.abortCopyFromURL(copyId, { + abortSignal: options.abortSignal, + }); + state.isCancelled = true; + return makeBlobBeginCopyFromURLPollOperation(state); +}; +/** + * Note: Intentionally using function expression over arrow function expression + * so that the function can be invoked with a different context. + * This affects what `this` refers to. + * @hidden + */ +const update = async function update(options = {}) { + const state = this.state; + const { blobClient, copySource, startCopyFromURLOptions } = state; + if (!state.isStarted) { + state.isStarted = true; + const result = await blobClient.startCopyFromURL(copySource, startCopyFromURLOptions); + // copyId is needed to abort + state.copyId = result.copyId; + if (result.copyStatus === "success") { + state.result = result; + state.isCompleted = true; } } - async readInternal() { - this.avroPaused = false; - let avroNext; - do { - avroNext = await this.avroIter.next(); - if (avroNext.done) { - break; + else if (!state.isCompleted) { + try { + const result = await state.blobClient.getProperties({ abortSignal: options.abortSignal }); + const { copyStatus, copyProgress } = result; + const prevCopyProgress = state.copyProgress; + if (copyProgress) { + state.copyProgress = copyProgress; } - const obj = avroNext.value; - const schema = obj.$schema; - if (typeof schema !== "string") { - throw Error("Missing schema in avro record."); + if (copyStatus === "pending" && + copyProgress !== prevCopyProgress && + typeof options.fireProgress === "function") { + // trigger in setTimeout, or swallow error? + options.fireProgress(state); } - switch (schema) { - case "com.microsoft.azure.storage.queryBlobContents.resultData": - { - const data = obj.data; - if (data instanceof Uint8Array === false) { - throw Error("Invalid data in avro result record."); - } - if (!this.push(Buffer.from(data))) { - this.avroPaused = true; - } - } - break; - case "com.microsoft.azure.storage.queryBlobContents.progress": - { - const bytesScanned = obj.bytesScanned; - if (typeof bytesScanned !== "number") { - throw Error("Invalid bytesScanned in avro progress record."); - } - if (this.onProgress) { - this.onProgress({ loadedBytes: bytesScanned }); - } - } - break; - case "com.microsoft.azure.storage.queryBlobContents.end": - if (this.onProgress) { - const totalBytes = obj.totalBytes; - if (typeof totalBytes !== "number") { - throw Error("Invalid totalBytes in avro end record."); - } - this.onProgress({ loadedBytes: totalBytes }); - } - this.push(null); - break; - case "com.microsoft.azure.storage.queryBlobContents.error": - if (this.onError) { - const fatal = obj.fatal; - if (typeof fatal !== "boolean") { - throw Error("Invalid fatal in avro error record."); - } - const name = obj.name; - if (typeof name !== "string") { - throw Error("Invalid name in avro error record."); - } - const description = obj.description; - if (typeof description !== "string") { - throw Error("Invalid description in avro error record."); - } - const position = obj.position; - if (typeof position !== "number") { - throw Error("Invalid position in avro error record."); - } - this.onError({ - position, - name, - isFatal: fatal, - description, - }); - } - break; - default: - throw Error(`Unknown schema ${schema} in avro progress record.`); + else if (copyStatus === "success") { + state.result = result; + state.isCompleted = true; } - } while (!avroNext.done && !this.avroPaused); + else if (copyStatus === "failed") { + state.error = new Error(`Blob copy failed with reason: "${result.copyStatusDescription || "unknown"}"`); + state.isCompleted = true; + } + } + catch (err) { + state.error = err; + state.isCompleted = true; + } } + return makeBlobBeginCopyFromURLPollOperation(state); +}; +/** + * Note: Intentionally using function expression over arrow function expression + * so that the function can be invoked with a different context. + * This affects what `this` refers to. + * @hidden + */ +const toString = function toString() { + return JSON.stringify({ state: this.state }, (key, value) => { + // remove blobClient from serialized state since a client can't be hydrated from this info. + if (key === "blobClient") { + return undefined; + } + return value; + }); +}; +/** + * Creates a poll operation given the provided state. + * @hidden + */ +function makeBlobBeginCopyFromURLPollOperation(state) { + return { + state: Object.assign({}, state), + cancel, + toString, + update, + }; } // Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. /** - * ONLY AVAILABLE IN NODE.JS RUNTIME. + * Generate a range string. For example: * - * BlobQueryResponse implements BlobDownloadResponseModel interface, and in Node.js runtime it will - * parse avor data returned by blob query. + * "bytes=255-" or "bytes=0-511" + * + * @param iRange - */ -class BlobQueryResponse { - /** - * Creates an instance of BlobQueryResponse. - * - * @param originalResponse - - * @param options - - */ - constructor(originalResponse, options = {}) { - this.originalResponse = originalResponse; - this.blobDownloadStream = new BlobQuickQueryStream(this.originalResponse.readableStreamBody, options); +function rangeToString(iRange) { + if (iRange.offset < 0) { + throw new RangeError(`Range.offset cannot be smaller than 0.`); + } + if (iRange.count && iRange.count <= 0) { + throw new RangeError(`Range.count must be larger than 0. Leave it undefined if you want a range from offset to the end.`); } + return iRange.count + ? `bytes=${iRange.offset}-${iRange.offset + iRange.count - 1}` + : `bytes=${iRange.offset}-`; +} + +// Copyright (c) Microsoft Corporation. +/** + * States for Batch. + */ +var BatchStates; +(function (BatchStates) { + BatchStates[BatchStates["Good"] = 0] = "Good"; + BatchStates[BatchStates["Error"] = 1] = "Error"; +})(BatchStates || (BatchStates = {})); +/** + * Batch provides basic parallel execution with concurrency limits. + * Will stop execute left operations when one of the executed operation throws an error. + * But Batch cannot cancel ongoing operations, you need to cancel them by yourself. + */ +class Batch { /** - * Indicates that the service supports - * requests for partial file content. - * - * @readonly + * Creates an instance of Batch. + * @param concurrency - */ - get acceptRanges() { - return this.originalResponse.acceptRanges; + constructor(concurrency = 5) { + /** + * Number of active operations under execution. + */ + this.actives = 0; + /** + * Number of completed operations under execution. + */ + this.completed = 0; + /** + * Offset of next operation to be executed. + */ + this.offset = 0; + /** + * Operation array to be executed. + */ + this.operations = []; + /** + * States of Batch. When an error happens, state will turn into error. + * Batch will stop execute left operations. + */ + this.state = BatchStates.Good; + if (concurrency < 1) { + throw new RangeError("concurrency must be larger than 0"); + } + this.concurrency = concurrency; + this.emitter = new events.EventEmitter(); } /** - * Returns if it was previously specified - * for the file. + * Add a operation into queue. * - * @readonly + * @param operation - */ - get cacheControl() { - return this.originalResponse.cacheControl; + addOperation(operation) { + this.operations.push(async () => { + try { + this.actives++; + await operation(); + this.actives--; + this.completed++; + this.parallelExecute(); + } + catch (error) { + this.emitter.emit("error", error); + } + }); } /** - * Returns the value that was specified - * for the 'x-ms-content-disposition' header and specifies how to process the - * response. + * Start execute operations in the queue. * - * @readonly */ - get contentDisposition() { - return this.originalResponse.contentDisposition; + async do() { + if (this.operations.length === 0) { + return Promise.resolve(); + } + this.parallelExecute(); + return new Promise((resolve, reject) => { + this.emitter.on("finish", resolve); + this.emitter.on("error", (error) => { + this.state = BatchStates.Error; + reject(error); + }); + }); } /** - * Returns the value that was specified - * for the Content-Encoding request header. + * Get next operation to be executed. Return null when reaching ends. * - * @readonly */ - get contentEncoding() { - return this.originalResponse.contentEncoding; + nextOperation() { + if (this.offset < this.operations.length) { + return this.operations[this.offset++]; + } + return null; } /** - * Returns the value that was specified - * for the Content-Language request header. + * Start execute operations. One one the most important difference between + * this method with do() is that do() wraps as an sync method. * - * @readonly */ - get contentLanguage() { - return this.originalResponse.contentLanguage; + parallelExecute() { + if (this.state === BatchStates.Error) { + return; + } + if (this.completed >= this.operations.length) { + this.emitter.emit("finish"); + return; + } + while (this.actives < this.concurrency) { + const operation = this.nextOperation(); + if (operation) { + operation(); + } + else { + return; + } + } } +} + +// Copyright (c) Microsoft Corporation. +/** + * This class generates a readable stream from the data in an array of buffers. + */ +class BuffersStream extends stream.Readable { /** - * The current sequence number for a - * page blob. This header is not returned for block blobs or append blobs. + * Creates an instance of BuffersStream that will emit the data + * contained in the array of buffers. * - * @readonly + * @param buffers - Array of buffers containing the data + * @param byteLength - The total length of data contained in the buffers */ - get blobSequenceNumber() { - return this.originalResponse.blobSequenceNumber; + constructor(buffers, byteLength, options) { + super(options); + this.buffers = buffers; + this.byteLength = byteLength; + this.byteOffsetInCurrentBuffer = 0; + this.bufferIndex = 0; + this.pushedBytesLength = 0; + // check byteLength is no larger than buffers[] total length + let buffersLength = 0; + for (const buf of this.buffers) { + buffersLength += buf.byteLength; + } + if (buffersLength < this.byteLength) { + throw new Error("Data size shouldn't be larger than the total length of buffers."); + } } /** - * The blob's type. Possible values include: - * 'BlockBlob', 'PageBlob', 'AppendBlob'. + * Internal _read() that will be called when the stream wants to pull more data in. * - * @readonly + * @param size - Optional. The size of data to be read */ - get blobType() { - return this.originalResponse.blobType; + _read(size) { + if (this.pushedBytesLength >= this.byteLength) { + this.push(null); + } + if (!size) { + size = this.readableHighWaterMark; + } + const outBuffers = []; + let i = 0; + while (i < size && this.pushedBytesLength < this.byteLength) { + // The last buffer may be longer than the data it contains. + const remainingDataInAllBuffers = this.byteLength - this.pushedBytesLength; + const remainingCapacityInThisBuffer = this.buffers[this.bufferIndex].byteLength - this.byteOffsetInCurrentBuffer; + const remaining = Math.min(remainingCapacityInThisBuffer, remainingDataInAllBuffers); + if (remaining > size - i) { + // chunkSize = size - i + const end = this.byteOffsetInCurrentBuffer + size - i; + outBuffers.push(this.buffers[this.bufferIndex].slice(this.byteOffsetInCurrentBuffer, end)); + this.pushedBytesLength += size - i; + this.byteOffsetInCurrentBuffer = end; + i = size; + break; + } + else { + // chunkSize = remaining + const end = this.byteOffsetInCurrentBuffer + remaining; + outBuffers.push(this.buffers[this.bufferIndex].slice(this.byteOffsetInCurrentBuffer, end)); + if (remaining === remainingCapacityInThisBuffer) { + // this.buffers[this.bufferIndex] used up, shift to next one + this.byteOffsetInCurrentBuffer = 0; + this.bufferIndex++; + } + else { + this.byteOffsetInCurrentBuffer = end; + } + this.pushedBytesLength += remaining; + i += remaining; + } + } + if (outBuffers.length > 1) { + this.push(Buffer.concat(outBuffers)); + } + else if (outBuffers.length === 1) { + this.push(outBuffers[0]); + } + } +} + +// Copyright (c) Microsoft Corporation. +/** + * maxBufferLength is max size of each buffer in the pooled buffers. + */ +// Can't use import as Typescript doesn't recognize "buffer". +const maxBufferLength = (__nccwpck_require__(4300).constants.MAX_LENGTH); +/** + * This class provides a buffer container which conceptually has no hard size limit. + * It accepts a capacity, an array of input buffers and the total length of input data. + * It will allocate an internal "buffer" of the capacity and fill the data in the input buffers + * into the internal "buffer" serially with respect to the total length. + * Then by calling PooledBuffer.getReadableStream(), you can get a readable stream + * assembled from all the data in the internal "buffer". + */ +class PooledBuffer { + constructor(capacity, buffers, totalLength) { + /** + * Internal buffers used to keep the data. + * Each buffer has a length of the maxBufferLength except last one. + */ + this.buffers = []; + this.capacity = capacity; + this._size = 0; + // allocate + const bufferNum = Math.ceil(capacity / maxBufferLength); + for (let i = 0; i < bufferNum; i++) { + let len = i === bufferNum - 1 ? capacity % maxBufferLength : maxBufferLength; + if (len === 0) { + len = maxBufferLength; + } + this.buffers.push(Buffer.allocUnsafe(len)); + } + if (buffers) { + this.fill(buffers, totalLength); + } } /** - * The number of bytes present in the - * response body. - * - * @readonly + * The size of the data contained in the pooled buffers. */ - get contentLength() { - return this.originalResponse.contentLength; + get size() { + return this._size; } /** - * If the file has an MD5 hash and the - * request is to read the full file, this response header is returned so that - * the client can check for message content integrity. If the request is to - * read a specified range and the 'x-ms-range-get-content-md5' is set to - * true, then the request returns an MD5 hash for the range, as long as the - * range size is less than or equal to 4 MB. If neither of these sets of - * conditions is true, then no value is returned for the 'Content-MD5' - * header. + * Fill the internal buffers with data in the input buffers serially + * with respect to the total length and the total capacity of the internal buffers. + * Data copied will be shift out of the input buffers. + * + * @param buffers - Input buffers containing the data to be filled in the pooled buffer + * @param totalLength - Total length of the data to be filled in. * - * @readonly */ - get contentMD5() { - return this.originalResponse.contentMD5; + fill(buffers, totalLength) { + this._size = Math.min(this.capacity, totalLength); + let i = 0, j = 0, targetOffset = 0, sourceOffset = 0, totalCopiedNum = 0; + while (totalCopiedNum < this._size) { + const source = buffers[i]; + const target = this.buffers[j]; + const copiedNum = source.copy(target, targetOffset, sourceOffset); + totalCopiedNum += copiedNum; + sourceOffset += copiedNum; + targetOffset += copiedNum; + if (sourceOffset === source.length) { + i++; + sourceOffset = 0; + } + if (targetOffset === target.length) { + j++; + targetOffset = 0; + } + } + // clear copied from source buffers + buffers.splice(0, i); + if (buffers.length > 0) { + buffers[0] = buffers[0].slice(sourceOffset); + } } /** - * Indicates the range of bytes returned if - * the client requested a subset of the file by setting the Range request - * header. + * Get the readable stream assembled from all the data in the internal buffers. * - * @readonly */ - get contentRange() { - return this.originalResponse.contentRange; + getReadableStream() { + return new BuffersStream(this.buffers, this.size); } +} + +// Copyright (c) Microsoft Corporation. +/** + * This class accepts a Node.js Readable stream as input, and keeps reading data + * from the stream into the internal buffer structure, until it reaches maxBuffers. + * Every available buffer will try to trigger outgoingHandler. + * + * The internal buffer structure includes an incoming buffer array, and a outgoing + * buffer array. The incoming buffer array includes the "empty" buffers can be filled + * with new incoming data. The outgoing array includes the filled buffers to be + * handled by outgoingHandler. Every above buffer size is defined by parameter bufferSize. + * + * NUM_OF_ALL_BUFFERS = BUFFERS_IN_INCOMING + BUFFERS_IN_OUTGOING + BUFFERS_UNDER_HANDLING + * + * NUM_OF_ALL_BUFFERS lesser than or equal to maxBuffers + * + * PERFORMANCE IMPROVEMENT TIPS: + * 1. Input stream highWaterMark is better to set a same value with bufferSize + * parameter, which will avoid Buffer.concat() operations. + * 2. concurrency should set a smaller value than maxBuffers, which is helpful to + * reduce the possibility when a outgoing handler waits for the stream data. + * in this situation, outgoing handlers are blocked. + * Outgoing queue shouldn't be empty. + */ +class BufferScheduler { /** - * The content type specified for the file. - * The default content type is 'application/octet-stream' + * Creates an instance of BufferScheduler. * - * @readonly + * @param readable - A Node.js Readable stream + * @param bufferSize - Buffer size of every maintained buffer + * @param maxBuffers - How many buffers can be allocated + * @param outgoingHandler - An async function scheduled to be + * triggered when a buffer fully filled + * with stream data + * @param concurrency - Concurrency of executing outgoingHandlers (>0) + * @param encoding - [Optional] Encoding of Readable stream when it's a string stream */ - get contentType() { - return this.originalResponse.contentType; + constructor(readable, bufferSize, maxBuffers, outgoingHandler, concurrency, encoding) { + /** + * An internal event emitter. + */ + this.emitter = new events.EventEmitter(); + /** + * An internal offset marker to track data offset in bytes of next outgoingHandler. + */ + this.offset = 0; + /** + * An internal marker to track whether stream is end. + */ + this.isStreamEnd = false; + /** + * An internal marker to track whether stream or outgoingHandler returns error. + */ + this.isError = false; + /** + * How many handlers are executing. + */ + this.executingOutgoingHandlers = 0; + /** + * How many buffers have been allocated. + */ + this.numBuffers = 0; + /** + * Because this class doesn't know how much data every time stream pops, which + * is defined by highWaterMarker of the stream. So BufferScheduler will cache + * data received from the stream, when data in unresolvedDataArray exceeds the + * blockSize defined, it will try to concat a blockSize of buffer, fill into available + * buffers from incoming and push to outgoing array. + */ + this.unresolvedDataArray = []; + /** + * How much data consisted in unresolvedDataArray. + */ + this.unresolvedLength = 0; + /** + * The array includes all the available buffers can be used to fill data from stream. + */ + this.incoming = []; + /** + * The array (queue) includes all the buffers filled from stream data. + */ + this.outgoing = []; + if (bufferSize <= 0) { + throw new RangeError(`bufferSize must be larger than 0, current is ${bufferSize}`); + } + if (maxBuffers <= 0) { + throw new RangeError(`maxBuffers must be larger than 0, current is ${maxBuffers}`); + } + if (concurrency <= 0) { + throw new RangeError(`concurrency must be larger than 0, current is ${concurrency}`); + } + this.bufferSize = bufferSize; + this.maxBuffers = maxBuffers; + this.readable = readable; + this.outgoingHandler = outgoingHandler; + this.concurrency = concurrency; + this.encoding = encoding; } /** - * Conclusion time of the last attempted - * Copy File operation where this file was the destination file. This value - * can specify the time of a completed, aborted, or failed copy attempt. + * Start the scheduler, will return error when stream of any of the outgoingHandlers + * returns error. * - * @readonly */ - get copyCompletedOn() { - return undefined; + async do() { + return new Promise((resolve, reject) => { + this.readable.on("data", (data) => { + data = typeof data === "string" ? Buffer.from(data, this.encoding) : data; + this.appendUnresolvedData(data); + if (!this.resolveData()) { + this.readable.pause(); + } + }); + this.readable.on("error", (err) => { + this.emitter.emit("error", err); + }); + this.readable.on("end", () => { + this.isStreamEnd = true; + this.emitter.emit("checkEnd"); + }); + this.emitter.on("error", (err) => { + this.isError = true; + this.readable.pause(); + reject(err); + }); + this.emitter.on("checkEnd", () => { + if (this.outgoing.length > 0) { + this.triggerOutgoingHandlers(); + return; + } + if (this.isStreamEnd && this.executingOutgoingHandlers === 0) { + if (this.unresolvedLength > 0 && this.unresolvedLength < this.bufferSize) { + const buffer = this.shiftBufferFromUnresolvedDataArray(); + this.outgoingHandler(() => buffer.getReadableStream(), buffer.size, this.offset) + .then(resolve) + .catch(reject); + } + else if (this.unresolvedLength >= this.bufferSize) { + return; + } + else { + resolve(); + } + } + }); + }); } /** - * String identifier for the last attempted Copy - * File operation where this file was the destination file. + * Insert a new data into unresolved array. * - * @readonly + * @param data - */ - get copyId() { - return this.originalResponse.copyId; + appendUnresolvedData(data) { + this.unresolvedDataArray.push(data); + this.unresolvedLength += data.length; } /** - * Contains the number of bytes copied and - * the total bytes in the source in the last attempted Copy File operation - * where this file was the destination file. Can show between 0 and - * Content-Length bytes copied. + * Try to shift a buffer with size in blockSize. The buffer returned may be less + * than blockSize when data in unresolvedDataArray is less than bufferSize. * - * @readonly */ - get copyProgress() { - return this.originalResponse.copyProgress; + shiftBufferFromUnresolvedDataArray(buffer) { + if (!buffer) { + buffer = new PooledBuffer(this.bufferSize, this.unresolvedDataArray, this.unresolvedLength); + } + else { + buffer.fill(this.unresolvedDataArray, this.unresolvedLength); + } + this.unresolvedLength -= buffer.size; + return buffer; } /** - * URL up to 2KB in length that specifies the - * source file used in the last attempted Copy File operation where this file - * was the destination file. + * Resolve data in unresolvedDataArray. For every buffer with size in blockSize + * shifted, it will try to get (or allocate a buffer) from incoming, and fill it, + * then push it into outgoing to be handled by outgoing handler. * - * @readonly + * Return false when available buffers in incoming are not enough, else true. + * + * @returns Return false when buffers in incoming are not enough, else true. */ - get copySource() { - return this.originalResponse.copySource; + resolveData() { + while (this.unresolvedLength >= this.bufferSize) { + let buffer; + if (this.incoming.length > 0) { + buffer = this.incoming.shift(); + this.shiftBufferFromUnresolvedDataArray(buffer); + } + else { + if (this.numBuffers < this.maxBuffers) { + buffer = this.shiftBufferFromUnresolvedDataArray(); + this.numBuffers++; + } + else { + // No available buffer, wait for buffer returned + return false; + } + } + this.outgoing.push(buffer); + this.triggerOutgoingHandlers(); + } + return true; } /** - * State of the copy operation - * identified by 'x-ms-copy-id'. Possible values include: 'pending', - * 'success', 'aborted', 'failed' - * - * @readonly + * Try to trigger a outgoing handler for every buffer in outgoing. Stop when + * concurrency reaches. */ - get copyStatus() { - return this.originalResponse.copyStatus; + async triggerOutgoingHandlers() { + let buffer; + do { + if (this.executingOutgoingHandlers >= this.concurrency) { + return; + } + buffer = this.outgoing.shift(); + if (buffer) { + this.triggerOutgoingHandler(buffer); + } + } while (buffer); } /** - * Only appears when - * x-ms-copy-status is failed or pending. Describes cause of fatal or - * non-fatal copy operation failure. + * Trigger a outgoing handler for a buffer shifted from outgoing. * - * @readonly + * @param buffer - */ - get copyStatusDescription() { - return this.originalResponse.copyStatusDescription; + async triggerOutgoingHandler(buffer) { + const bufferLength = buffer.size; + this.executingOutgoingHandlers++; + this.offset += bufferLength; + try { + await this.outgoingHandler(() => buffer.getReadableStream(), bufferLength, this.offset - bufferLength); + } + catch (err) { + this.emitter.emit("error", err); + return; + } + this.executingOutgoingHandlers--; + this.reuseBuffer(buffer); + this.emitter.emit("checkEnd"); } /** - * When a blob is leased, - * specifies whether the lease is of infinite or fixed duration. Possible - * values include: 'infinite', 'fixed'. + * Return buffer used by outgoing handler into incoming. * - * @readonly + * @param buffer - */ - get leaseDuration() { - return this.originalResponse.leaseDuration; + reuseBuffer(buffer) { + this.incoming.push(buffer); + if (!this.isError && this.resolveData() && !this.isStreamEnd) { + this.readable.resume(); + } + } +} + +// Copyright (c) Microsoft Corporation. +/** + * Reads a readable stream into buffer. Fill the buffer from offset to end. + * + * @param stream - A Node.js Readable stream + * @param buffer - Buffer to be filled, length must greater than or equal to offset + * @param offset - From which position in the buffer to be filled, inclusive + * @param end - To which position in the buffer to be filled, exclusive + * @param encoding - Encoding of the Readable stream + */ +async function streamToBuffer(stream, buffer, offset, end, encoding) { + let pos = 0; // Position in stream + const count = end - offset; // Total amount of data needed in stream + return new Promise((resolve, reject) => { + const timeout = setTimeout(() => reject(new Error(`The operation cannot be completed in timeout.`)), REQUEST_TIMEOUT); + stream.on("readable", () => { + if (pos >= count) { + clearTimeout(timeout); + resolve(); + return; + } + let chunk = stream.read(); + if (!chunk) { + return; + } + if (typeof chunk === "string") { + chunk = Buffer.from(chunk, encoding); + } + // How much data needed in this chunk + const chunkLength = pos + chunk.length > count ? count - pos : chunk.length; + buffer.fill(chunk.slice(0, chunkLength), offset + pos, offset + pos + chunkLength); + pos += chunkLength; + }); + stream.on("end", () => { + clearTimeout(timeout); + if (pos < count) { + reject(new Error(`Stream drains before getting enough data needed. Data read: ${pos}, data need: ${count}`)); + } + resolve(); + }); + stream.on("error", (msg) => { + clearTimeout(timeout); + reject(msg); + }); + }); +} +/** + * Reads a readable stream into buffer entirely. + * + * @param stream - A Node.js Readable stream + * @param buffer - Buffer to be filled, length must greater than or equal to offset + * @param encoding - Encoding of the Readable stream + * @returns with the count of bytes read. + * @throws `RangeError` If buffer size is not big enough. + */ +async function streamToBuffer2(stream, buffer, encoding) { + let pos = 0; // Position in stream + const bufferSize = buffer.length; + return new Promise((resolve, reject) => { + stream.on("readable", () => { + let chunk = stream.read(); + if (!chunk) { + return; + } + if (typeof chunk === "string") { + chunk = Buffer.from(chunk, encoding); + } + if (pos + chunk.length > bufferSize) { + reject(new Error(`Stream exceeds buffer size. Buffer size: ${bufferSize}`)); + return; + } + buffer.fill(chunk, pos, pos + chunk.length); + pos += chunk.length; + }); + stream.on("end", () => { + resolve(pos); + }); + stream.on("error", reject); + }); +} +/** + * ONLY AVAILABLE IN NODE.JS RUNTIME. + * + * Writes the content of a readstream to a local file. Returns a Promise which is completed after the file handle is closed. + * + * @param rs - The read stream. + * @param file - Destination file path. + */ +async function readStreamToLocalFile(rs, file) { + return new Promise((resolve, reject) => { + const ws = fs__namespace.createWriteStream(file); + rs.on("error", (err) => { + reject(err); + }); + ws.on("error", (err) => { + reject(err); + }); + ws.on("close", resolve); + rs.pipe(ws); + }); +} +/** + * ONLY AVAILABLE IN NODE.JS RUNTIME. + * + * Promisified version of fs.stat(). + */ +const fsStat = util__namespace.promisify(fs__namespace.stat); +const fsCreateReadStream = fs__namespace.createReadStream; + +/** + * A BlobClient represents a URL to an Azure Storage blob; the blob may be a block blob, + * append blob, or page blob. + */ +class BlobClient extends StorageClient { + constructor(urlOrConnectionString, credentialOrPipelineOrContainerName, blobNameOrOptions, + // Legacy, no fix for eslint error without breaking. Disable it for this interface. + /* eslint-disable-next-line @azure/azure-sdk/ts-naming-options*/ + options) { + options = options || {}; + let pipeline; + let url; + if (isPipelineLike(credentialOrPipelineOrContainerName)) { + // (url: string, pipeline: Pipeline) + url = urlOrConnectionString; + pipeline = credentialOrPipelineOrContainerName; + } + else if ((coreHttp.isNode && credentialOrPipelineOrContainerName instanceof StorageSharedKeyCredential) || + credentialOrPipelineOrContainerName instanceof AnonymousCredential || + coreHttp.isTokenCredential(credentialOrPipelineOrContainerName)) { + // (url: string, credential?: StorageSharedKeyCredential | AnonymousCredential | TokenCredential, options?: StoragePipelineOptions) + url = urlOrConnectionString; + options = blobNameOrOptions; + pipeline = newPipeline(credentialOrPipelineOrContainerName, options); + } + else if (!credentialOrPipelineOrContainerName && + typeof credentialOrPipelineOrContainerName !== "string") { + // (url: string, credential?: StorageSharedKeyCredential | AnonymousCredential | TokenCredential, options?: StoragePipelineOptions) + // The second parameter is undefined. Use anonymous credential. + url = urlOrConnectionString; + pipeline = newPipeline(new AnonymousCredential(), options); + } + else if (credentialOrPipelineOrContainerName && + typeof credentialOrPipelineOrContainerName === "string" && + blobNameOrOptions && + typeof blobNameOrOptions === "string") { + // (connectionString: string, containerName: string, blobName: string, options?: StoragePipelineOptions) + const containerName = credentialOrPipelineOrContainerName; + const blobName = blobNameOrOptions; + const extractedCreds = extractConnectionStringParts(urlOrConnectionString); + if (extractedCreds.kind === "AccountConnString") { + if (coreHttp.isNode) { + const sharedKeyCredential = new StorageSharedKeyCredential(extractedCreds.accountName, extractedCreds.accountKey); + url = appendToURLPath(appendToURLPath(extractedCreds.url, encodeURIComponent(containerName)), encodeURIComponent(blobName)); + if (!options.proxyOptions) { + options.proxyOptions = coreHttp.getDefaultProxySettings(extractedCreds.proxyUri); + } + pipeline = newPipeline(sharedKeyCredential, options); + } + else { + throw new Error("Account connection string is only supported in Node.js environment"); + } + } + else if (extractedCreds.kind === "SASConnString") { + url = + appendToURLPath(appendToURLPath(extractedCreds.url, encodeURIComponent(containerName)), encodeURIComponent(blobName)) + + "?" + + extractedCreds.accountSas; + pipeline = newPipeline(new AnonymousCredential(), options); + } + else { + throw new Error("Connection string must be either an Account connection string or a SAS connection string"); + } + } + else { + throw new Error("Expecting non-empty strings for containerName and blobName parameters"); + } + super(url, pipeline); + ({ blobName: this._name, containerName: this._containerName } = + this.getBlobAndContainerNamesFromUrl()); + this.blobContext = new Blob$1(this.storageClientContext); + this._snapshot = getURLParameter(this.url, URLConstants.Parameters.SNAPSHOT); + this._versionId = getURLParameter(this.url, URLConstants.Parameters.VERSIONID); } /** - * Lease state of the blob. Possible - * values include: 'available', 'leased', 'expired', 'breaking', 'broken'. - * - * @readonly + * The name of the blob. */ - get leaseState() { - return this.originalResponse.leaseState; + get name() { + return this._name; } /** - * The current lease status of the - * blob. Possible values include: 'locked', 'unlocked'. - * - * @readonly + * The name of the storage container the blob is associated with. */ - get leaseStatus() { - return this.originalResponse.leaseStatus; + get containerName() { + return this._containerName; } /** - * A UTC date/time value generated by the service that - * indicates the time at which the response was initiated. + * Creates a new BlobClient object identical to the source but with the specified snapshot timestamp. + * Provide "" will remove the snapshot and return a Client to the base blob. * - * @readonly + * @param snapshot - The snapshot timestamp. + * @returns A new BlobClient object identical to the source but with the specified snapshot timestamp */ - get date() { - return this.originalResponse.date; + withSnapshot(snapshot) { + return new BlobClient(setURLParameter(this.url, URLConstants.Parameters.SNAPSHOT, snapshot.length === 0 ? undefined : snapshot), this.pipeline); } /** - * The number of committed blocks - * present in the blob. This header is returned only for append blobs. + * Creates a new BlobClient object pointing to a version of this blob. + * Provide "" will remove the versionId and return a Client to the base blob. * - * @readonly + * @param versionId - The versionId. + * @returns A new BlobClient object pointing to the version of this blob. */ - get blobCommittedBlockCount() { - return this.originalResponse.blobCommittedBlockCount; + withVersion(versionId) { + return new BlobClient(setURLParameter(this.url, URLConstants.Parameters.VERSIONID, versionId.length === 0 ? undefined : versionId), this.pipeline); } /** - * The ETag contains a value that you can use to - * perform operations conditionally, in quotes. + * Creates a AppendBlobClient object. * - * @readonly */ - get etag() { - return this.originalResponse.etag; + getAppendBlobClient() { + return new AppendBlobClient(this.url, this.pipeline); } /** - * The error code. + * Creates a BlockBlobClient object. * - * @readonly */ - get errorCode() { - return this.originalResponse.errorCode; + getBlockBlobClient() { + return new BlockBlobClient(this.url, this.pipeline); } /** - * The value of this header is set to - * true if the file data and application metadata are completely encrypted - * using the specified algorithm. Otherwise, the value is set to false (when - * the file is unencrypted, or if only parts of the file/application metadata - * are encrypted). + * Creates a PageBlobClient object. * - * @readonly */ - get isServerEncrypted() { - return this.originalResponse.isServerEncrypted; + getPageBlobClient() { + return new PageBlobClient(this.url, this.pipeline); } /** - * If the blob has a MD5 hash, and if - * request contains range header (Range or x-ms-range), this response header - * is returned with the value of the whole blob's MD5 value. This value may - * or may not be equal to the value returned in Content-MD5 header, with the - * latter calculated from the requested range. + * Reads or downloads a blob from the system, including its metadata and properties. + * You can also call Get Blob to read a snapshot. * - * @readonly - */ - get blobContentMD5() { - return this.originalResponse.blobContentMD5; - } - /** - * Returns the date and time the file was last - * modified. Any operation that modifies the file or its properties updates - * the last modified time. + * * In Node.js, data returns in a Readable stream readableStreamBody + * * In browsers, data returns in a promise blobBody * - * @readonly - */ - get lastModified() { - return this.originalResponse.lastModified; - } - /** - * A name-value pair - * to associate with a file storage object. + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob * - * @readonly - */ - get metadata() { - return this.originalResponse.metadata; - } - /** - * This header uniquely identifies the request - * that was made and can be used for troubleshooting the request. + * @param offset - From which position of the blob to download, greater than or equal to 0 + * @param count - How much data to be downloaded, greater than 0. Will download to the end when undefined + * @param options - Optional options to Blob Download operation. * - * @readonly + * + * Example usage (Node.js): + * + * ```js + * // Download and convert a blob to a string + * const downloadBlockBlobResponse = await blobClient.download(); + * const downloaded = await streamToBuffer(downloadBlockBlobResponse.readableStreamBody); + * console.log("Downloaded blob content:", downloaded.toString()); + * + * async function streamToBuffer(readableStream) { + * return new Promise((resolve, reject) => { + * const chunks = []; + * readableStream.on("data", (data) => { + * chunks.push(data instanceof Buffer ? data : Buffer.from(data)); + * }); + * readableStream.on("end", () => { + * resolve(Buffer.concat(chunks)); + * }); + * readableStream.on("error", reject); + * }); + * } + * ``` + * + * Example usage (browser): + * + * ```js + * // Download and convert a blob to a string + * const downloadBlockBlobResponse = await blobClient.download(); + * const downloaded = await blobToString(await downloadBlockBlobResponse.blobBody); + * console.log( + * "Downloaded blob content", + * downloaded + * ); + * + * async function blobToString(blob: Blob): Promise { + * const fileReader = new FileReader(); + * return new Promise((resolve, reject) => { + * fileReader.onloadend = (ev: any) => { + * resolve(ev.target!.result); + * }; + * fileReader.onerror = reject; + * fileReader.readAsText(blob); + * }); + * } + * ``` */ - get requestId() { - return this.originalResponse.requestId; + async download(offset = 0, count, options = {}) { + var _a; + options.conditions = options.conditions || {}; + options.conditions = options.conditions || {}; + ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps); + const { span, updatedOptions } = createSpan("BlobClient-download", options); + try { + const res = await this.blobContext.download(Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), requestOptions: { + onDownloadProgress: coreHttp.isNode ? undefined : options.onProgress, // for Node.js, progress is reported by RetriableReadableStream + }, range: offset === 0 && !count ? undefined : rangeToString({ offset, count }), rangeGetContentMD5: options.rangeGetContentMD5, rangeGetContentCRC64: options.rangeGetContentCrc64, snapshot: options.snapshot, cpkInfo: options.customerProvidedKey }, convertTracingToRequestOptionsBase(updatedOptions))); + const wrappedRes = Object.assign(Object.assign({}, res), { _response: res._response, objectReplicationDestinationPolicyId: res.objectReplicationPolicyId, objectReplicationSourceProperties: parseObjectReplicationRecord(res.objectReplicationRules) }); + // Return browser response immediately + if (!coreHttp.isNode) { + return wrappedRes; + } + // We support retrying when download stream unexpected ends in Node.js runtime + // Following code shouldn't be bundled into browser build, however some + // bundlers may try to bundle following code and "FileReadResponse.ts". + // In this case, "FileDownloadResponse.browser.ts" will be used as a shim of "FileDownloadResponse.ts" + // The config is in package.json "browser" field + if (options.maxRetryRequests === undefined || options.maxRetryRequests < 0) { + // TODO: Default value or make it a required parameter? + options.maxRetryRequests = DEFAULT_MAX_DOWNLOAD_RETRY_REQUESTS; + } + if (res.contentLength === undefined) { + throw new RangeError(`File download response doesn't contain valid content length header`); + } + if (!res.etag) { + throw new RangeError(`File download response doesn't contain valid etag header`); + } + return new BlobDownloadResponse(wrappedRes, async (start) => { + var _a; + const updatedDownloadOptions = { + leaseAccessConditions: options.conditions, + modifiedAccessConditions: { + ifMatch: options.conditions.ifMatch || res.etag, + ifModifiedSince: options.conditions.ifModifiedSince, + ifNoneMatch: options.conditions.ifNoneMatch, + ifUnmodifiedSince: options.conditions.ifUnmodifiedSince, + ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions, + }, + range: rangeToString({ + count: offset + res.contentLength - start, + offset: start, + }), + rangeGetContentMD5: options.rangeGetContentMD5, + rangeGetContentCRC64: options.rangeGetContentCrc64, + snapshot: options.snapshot, + cpkInfo: options.customerProvidedKey, + }; + // Debug purpose only + // console.log( + // `Read from internal stream, range: ${ + // updatedOptions.range + // }, options: ${JSON.stringify(updatedOptions)}` + // ); + return (await this.blobContext.download(Object.assign({ abortSignal: options.abortSignal }, updatedDownloadOptions))).readableStreamBody; + }, offset, res.contentLength, { + maxRetryRequests: options.maxRetryRequests, + onProgress: options.onProgress, + }); + } + catch (e) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; + } + finally { + span.end(); + } } /** - * If a client request id header is sent in the request, this header will be present in the - * response with the same value. + * Returns true if the Azure blob resource represented by this client exists; false otherwise. * - * @readonly + * NOTE: use this function with care since an existing blob might be deleted by other clients or + * applications. Vice versa new blobs might be added by other clients or applications after this + * function completes. + * + * @param options - options to Exists operation. */ - get clientRequestId() { - return this.originalResponse.clientRequestId; + async exists(options = {}) { + const { span, updatedOptions } = createSpan("BlobClient-exists", options); + try { + ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps); + await this.getProperties({ + abortSignal: options.abortSignal, + customerProvidedKey: options.customerProvidedKey, + conditions: options.conditions, + tracingOptions: updatedOptions.tracingOptions, + }); + return true; + } + catch (e) { + if (e.statusCode === 404) { + // Expected exception when checking blob existence + return false; + } + else if (e.statusCode === 409 && + (e.details.errorCode === BlobUsesCustomerSpecifiedEncryptionMsg || + e.details.errorCode === BlobDoesNotUseCustomerSpecifiedEncryption)) { + // Expected exception when checking blob existence + return true; + } + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; + } + finally { + span.end(); + } } /** - * Indicates the version of the File service used - * to execute the request. + * Returns all user-defined metadata, standard HTTP properties, and system properties + * for the blob. It does not return the content of the blob. + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-properties * - * @readonly + * WARNING: The `metadata` object returned in the response will have its keys in lowercase, even if + * they originally contained uppercase characters. This differs from the metadata keys returned by + * the methods of {@link ContainerClient} that list blobs using the `includeMetadata` option, which + * will retain their original casing. + * + * @param options - Optional options to Get Properties operation. */ - get version() { - return this.originalResponse.version; + async getProperties(options = {}) { + var _a; + const { span, updatedOptions } = createSpan("BlobClient-getProperties", options); + try { + options.conditions = options.conditions || {}; + ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps); + const res = await this.blobContext.getProperties(Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), cpkInfo: options.customerProvidedKey }, convertTracingToRequestOptionsBase(updatedOptions))); + return Object.assign(Object.assign({}, res), { _response: res._response, objectReplicationDestinationPolicyId: res.objectReplicationPolicyId, objectReplicationSourceProperties: parseObjectReplicationRecord(res.objectReplicationRules) }); + } + catch (e) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; + } + finally { + span.end(); + } } /** - * The SHA-256 hash of the encryption key used to encrypt the blob. This value is only returned - * when the blob was encrypted with a customer-provided key. + * Marks the specified blob or snapshot for deletion. The blob is later deleted + * during garbage collection. Note that in order to delete a blob, you must delete + * all of its snapshots. You can delete both at the same time with the Delete + * Blob operation. + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/delete-blob * - * @readonly + * @param options - Optional options to Blob Delete operation. */ - get encryptionKeySha256() { - return this.originalResponse.encryptionKeySha256; + async delete(options = {}) { + var _a; + const { span, updatedOptions } = createSpan("BlobClient-delete", options); + options.conditions = options.conditions || {}; + try { + return await this.blobContext.delete(Object.assign({ abortSignal: options.abortSignal, deleteSnapshots: options.deleteSnapshots, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }) }, convertTracingToRequestOptionsBase(updatedOptions))); + } + catch (e) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; + } + finally { + span.end(); + } } /** - * If the request is to read a specified range and the x-ms-range-get-content-crc64 is set to - * true, then the request returns a crc64 for the range, as long as the range size is less than - * or equal to 4 MB. If both x-ms-range-get-content-crc64 & x-ms-range-get-content-md5 is - * specified in the same request, it will fail with 400(Bad Request) + * Marks the specified blob or snapshot for deletion if it exists. The blob is later deleted + * during garbage collection. Note that in order to delete a blob, you must delete + * all of its snapshots. You can delete both at the same time with the Delete + * Blob operation. + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/delete-blob + * + * @param options - Optional options to Blob Delete operation. */ - get contentCrc64() { - return this.originalResponse.contentCrc64; + async deleteIfExists(options = {}) { + var _a, _b; + const { span, updatedOptions } = createSpan("BlobClient-deleteIfExists", options); + try { + const res = await this.delete(updatedOptions); + return Object.assign(Object.assign({ succeeded: true }, res), { _response: res._response }); + } + catch (e) { + if (((_a = e.details) === null || _a === void 0 ? void 0 : _a.errorCode) === "BlobNotFound") { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: "Expected exception when deleting a blob or snapshot only if it exists.", + }); + return Object.assign(Object.assign({ succeeded: false }, (_b = e.response) === null || _b === void 0 ? void 0 : _b.parsedHeaders), { _response: e.response }); + } + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; + } + finally { + span.end(); + } } /** - * The response body as a browser Blob. - * Always undefined in node.js. + * Restores the contents and metadata of soft deleted blob and any associated + * soft deleted snapshots. Undelete Blob is supported only on version 2017-07-29 + * or later. + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/undelete-blob * - * @readonly + * @param options - Optional options to Blob Undelete operation. */ - get blobBody() { - return undefined; + async undelete(options = {}) { + const { span, updatedOptions } = createSpan("BlobClient-undelete", options); + try { + return await this.blobContext.undelete(Object.assign({ abortSignal: options.abortSignal }, convertTracingToRequestOptionsBase(updatedOptions))); + } + catch (e) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; + } + finally { + span.end(); + } } /** - * The response body as a node.js Readable stream. - * Always undefined in the browser. + * Sets system properties on the blob. * - * It will parse avor data returned by blob query. + * If no value provided, or no value provided for the specified blob HTTP headers, + * these blob HTTP headers without a value will be cleared. + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-properties * - * @readonly - */ - get readableStreamBody() { - return coreHttp.isNode ? this.blobDownloadStream : undefined; - } - /** - * The HTTP response. + * @param blobHTTPHeaders - If no value provided, or no value provided for + * the specified blob HTTP headers, these blob HTTP + * headers without a value will be cleared. + * A common header to set is `blobContentType` + * enabling the browser to provide functionality + * based on file type. + * @param options - Optional options to Blob Set HTTP Headers operation. */ - get _response() { - return this.originalResponse._response; + async setHTTPHeaders(blobHTTPHeaders, options = {}) { + var _a; + const { span, updatedOptions } = createSpan("BlobClient-setHTTPHeaders", options); + options.conditions = options.conditions || {}; + try { + ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps); + return await this.blobContext.setHttpHeaders(Object.assign({ abortSignal: options.abortSignal, blobHttpHeaders: blobHTTPHeaders, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }) }, convertTracingToRequestOptionsBase(updatedOptions))); + } + catch (e) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; + } + finally { + span.end(); + } } -} - -// Copyright (c) Microsoft Corporation. -/** - * Represents the access tier on a blob. - * For detailed information about block blob level tiering see {@link https://docs.microsoft.com/azure/storage/blobs/storage-blob-storage-tiers|Hot, cool and archive storage tiers.} - */ -exports.BlockBlobTier = void 0; -(function (BlockBlobTier) { - /** - * Optimized for storing data that is accessed frequently. - */ - BlockBlobTier["Hot"] = "Hot"; - /** - * Optimized for storing data that is infrequently accessed and stored for at least 30 days. - */ - BlockBlobTier["Cool"] = "Cool"; /** - * Optimized for storing data that is rarely accessed and stored for at least 180 days - * with flexible latency requirements (on the order of hours). - */ - BlockBlobTier["Archive"] = "Archive"; -})(exports.BlockBlobTier || (exports.BlockBlobTier = {})); -/** - * Specifies the page blob tier to set the blob to. This is only applicable to page blobs on premium storage accounts. - * Please see {@link https://docs.microsoft.com/azure/storage/storage-premium-storage#scalability-and-performance-targets|here} - * for detailed information on the corresponding IOPS and throughput per PageBlobTier. - */ -exports.PremiumPageBlobTier = void 0; -(function (PremiumPageBlobTier) { - /** - * P4 Tier. - */ - PremiumPageBlobTier["P4"] = "P4"; - /** - * P6 Tier. - */ - PremiumPageBlobTier["P6"] = "P6"; - /** - * P10 Tier. - */ - PremiumPageBlobTier["P10"] = "P10"; - /** - * P15 Tier. - */ - PremiumPageBlobTier["P15"] = "P15"; - /** - * P20 Tier. - */ - PremiumPageBlobTier["P20"] = "P20"; - /** - * P30 Tier. - */ - PremiumPageBlobTier["P30"] = "P30"; - /** - * P40 Tier. + * Sets user-defined metadata for the specified blob as one or more name-value pairs. + * + * If no option provided, or no metadata defined in the parameter, the blob + * metadata will be removed. + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-metadata + * + * @param metadata - Replace existing metadata with this value. + * If no value provided the existing metadata will be removed. + * @param options - Optional options to Set Metadata operation. */ - PremiumPageBlobTier["P40"] = "P40"; + async setMetadata(metadata, options = {}) { + var _a; + const { span, updatedOptions } = createSpan("BlobClient-setMetadata", options); + options.conditions = options.conditions || {}; + try { + ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps); + return await this.blobContext.setMetadata(Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, metadata, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), cpkInfo: options.customerProvidedKey, encryptionScope: options.encryptionScope }, convertTracingToRequestOptionsBase(updatedOptions))); + } + catch (e) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; + } + finally { + span.end(); + } + } /** - * P50 Tier. + * Sets tags on the underlying blob. + * A blob can have up to 10 tags. Tag keys must be between 1 and 128 characters. Tag values must be between 0 and 256 characters. + * Valid tag key and value characters include lower and upper case letters, digits (0-9), + * space (' '), plus ('+'), minus ('-'), period ('.'), foward slash ('/'), colon (':'), equals ('='), and underscore ('_'). + * + * @param tags - + * @param options - */ - PremiumPageBlobTier["P50"] = "P50"; + async setTags(tags, options = {}) { + var _a; + const { span, updatedOptions } = createSpan("BlobClient-setTags", options); + try { + return await this.blobContext.setTags(Object.assign(Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }) }, convertTracingToRequestOptionsBase(updatedOptions)), { tags: toBlobTags(tags) })); + } + catch (e) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; + } + finally { + span.end(); + } + } /** - * P60 Tier. + * Gets the tags associated with the underlying blob. + * + * @param options - */ - PremiumPageBlobTier["P60"] = "P60"; + async getTags(options = {}) { + var _a; + const { span, updatedOptions } = createSpan("BlobClient-getTags", options); + try { + const response = await this.blobContext.getTags(Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }) }, convertTracingToRequestOptionsBase(updatedOptions))); + const wrappedResponse = Object.assign(Object.assign({}, response), { _response: response._response, tags: toTags({ blobTagSet: response.blobTagSet }) || {} }); + return wrappedResponse; + } + catch (e) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; + } + finally { + span.end(); + } + } /** - * P70 Tier. + * Get a {@link BlobLeaseClient} that manages leases on the blob. + * + * @param proposeLeaseId - Initial proposed lease Id. + * @returns A new BlobLeaseClient object for managing leases on the blob. */ - PremiumPageBlobTier["P70"] = "P70"; + getBlobLeaseClient(proposeLeaseId) { + return new BlobLeaseClient(this, proposeLeaseId); + } /** - * P80 Tier. + * Creates a read-only snapshot of a blob. + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/snapshot-blob + * + * @param options - Optional options to the Blob Create Snapshot operation. */ - PremiumPageBlobTier["P80"] = "P80"; -})(exports.PremiumPageBlobTier || (exports.PremiumPageBlobTier = {})); -function toAccessTier(tier) { - if (tier === undefined) { - return undefined; - } - return tier; // No more check if string is a valid AccessTier, and left this to underlay logic to decide(service). -} -function ensureCpkIfSpecified(cpk, isHttps) { - if (cpk && !isHttps) { - throw new RangeError("Customer-provided encryption key must be used over HTTPS."); - } - if (cpk && !cpk.encryptionAlgorithm) { - cpk.encryptionAlgorithm = EncryptionAlgorithmAES25; + async createSnapshot(options = {}) { + var _a; + const { span, updatedOptions } = createSpan("BlobClient-createSnapshot", options); + options.conditions = options.conditions || {}; + try { + ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps); + return await this.blobContext.createSnapshot(Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, metadata: options.metadata, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), cpkInfo: options.customerProvidedKey, encryptionScope: options.encryptionScope }, convertTracingToRequestOptionsBase(updatedOptions))); + } + catch (e) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; + } + finally { + span.end(); + } } -} -/** - * Defines the known cloud audiences for Storage. - */ -exports.StorageBlobAudience = void 0; -(function (StorageBlobAudience) { /** - * The OAuth scope to use to retrieve an AAD token for Azure Storage. + * Asynchronously copies a blob to a destination within the storage account. + * This method returns a long running operation poller that allows you to wait + * indefinitely until the copy is completed. + * You can also cancel a copy before it is completed by calling `cancelOperation` on the poller. + * Note that the onProgress callback will not be invoked if the operation completes in the first + * request, and attempting to cancel a completed copy will result in an error being thrown. + * + * In version 2012-02-12 and later, the source for a Copy Blob operation can be + * a committed blob in any Azure storage account. + * Beginning with version 2015-02-21, the source for a Copy Blob operation can be + * an Azure file in any Azure storage account. + * Only storage accounts created on or after June 7th, 2012 allow the Copy Blob + * operation to copy from another storage account. + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob + * + * Example using automatic polling: + * + * ```js + * const copyPoller = await blobClient.beginCopyFromURL('url'); + * const result = await copyPoller.pollUntilDone(); + * ``` + * + * Example using manual polling: + * + * ```js + * const copyPoller = await blobClient.beginCopyFromURL('url'); + * while (!poller.isDone()) { + * await poller.poll(); + * } + * const result = copyPoller.getResult(); + * ``` + * + * Example using progress updates: + * + * ```js + * const copyPoller = await blobClient.beginCopyFromURL('url', { + * onProgress(state) { + * console.log(`Progress: ${state.copyProgress}`); + * } + * }); + * const result = await copyPoller.pollUntilDone(); + * ``` + * + * Example using a changing polling interval (default 15 seconds): + * + * ```js + * const copyPoller = await blobClient.beginCopyFromURL('url', { + * intervalInMs: 1000 // poll blob every 1 second for copy progress + * }); + * const result = await copyPoller.pollUntilDone(); + * ``` + * + * Example using copy cancellation: + * + * ```js + * const copyPoller = await blobClient.beginCopyFromURL('url'); + * // cancel operation after starting it. + * try { + * await copyPoller.cancelOperation(); + * // calls to get the result now throw PollerCancelledError + * await copyPoller.getResult(); + * } catch (err) { + * if (err.name === 'PollerCancelledError') { + * console.log('The copy was cancelled.'); + * } + * } + * ``` + * + * @param copySource - url to the source Azure Blob/File. + * @param options - Optional options to the Blob Start Copy From URL operation. */ - StorageBlobAudience["StorageOAuthScopes"] = "https://storage.azure.com/.default"; + async beginCopyFromURL(copySource, options = {}) { + const client = { + abortCopyFromURL: (...args) => this.abortCopyFromURL(...args), + getProperties: (...args) => this.getProperties(...args), + startCopyFromURL: (...args) => this.startCopyFromURL(...args), + }; + const poller = new BlobBeginCopyFromUrlPoller({ + blobClient: client, + copySource, + intervalInMs: options.intervalInMs, + onProgress: options.onProgress, + resumeFrom: options.resumeFrom, + startCopyFromURLOptions: options, + }); + // Trigger the startCopyFromURL call by calling poll. + // Any errors from this method should be surfaced to the user. + await poller.poll(); + return poller; + } /** - * The OAuth scope to use to retrieve an AAD token for Azure Disk. + * Aborts a pending asynchronous Copy Blob operation, and leaves a destination blob with zero + * length and full metadata. Version 2012-02-12 and newer. + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/abort-copy-blob + * + * @param copyId - Id of the Copy From URL operation. + * @param options - Optional options to the Blob Abort Copy From URL operation. */ - StorageBlobAudience["DiskComputeOAuthScopes"] = "https://disk.compute.azure.com/.default"; -})(exports.StorageBlobAudience || (exports.StorageBlobAudience = {})); - -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. -/** - * Function that converts PageRange and ClearRange to a common Range object. - * PageRange and ClearRange have start and end while Range offset and count - * this function normalizes to Range. - * @param response - Model PageBlob Range response - */ -function rangeResponseFromModel(response) { - const pageRange = (response._response.parsedBody.pageRange || []).map((x) => ({ - offset: x.start, - count: x.end - x.start, - })); - const clearRange = (response._response.parsedBody.clearRange || []).map((x) => ({ - offset: x.start, - count: x.end - x.start, - })); - return Object.assign(Object.assign({}, response), { pageRange, - clearRange, _response: Object.assign(Object.assign({}, response._response), { parsedBody: { - pageRange, - clearRange, - } }) }); -} - -// Copyright (c) Microsoft Corporation. -/** - * This is the poller returned by {@link BlobClient.beginCopyFromURL}. - * This can not be instantiated directly outside of this package. - * - * @hidden - */ -class BlobBeginCopyFromUrlPoller extends coreLro.Poller { - constructor(options) { - const { blobClient, copySource, intervalInMs = 15000, onProgress, resumeFrom, startCopyFromURLOptions, } = options; - let state; - if (resumeFrom) { - state = JSON.parse(resumeFrom).state; + async abortCopyFromURL(copyId, options = {}) { + const { span, updatedOptions } = createSpan("BlobClient-abortCopyFromURL", options); + try { + return await this.blobContext.abortCopyFromURL(copyId, Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions }, convertTracingToRequestOptionsBase(updatedOptions))); } - const operation = makeBlobBeginCopyFromURLPollOperation(Object.assign(Object.assign({}, state), { blobClient, - copySource, - startCopyFromURLOptions })); - super(operation); - if (typeof onProgress === "function") { - this.onProgress(onProgress); + catch (e) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; } - this.intervalInMs = intervalInMs; - } - delay() { - return coreHttp.delay(this.intervalInMs); - } -} -/** - * Note: Intentionally using function expression over arrow function expression - * so that the function can be invoked with a different context. - * This affects what `this` refers to. - * @hidden - */ -const cancel = async function cancel(options = {}) { - const state = this.state; - const { copyId } = state; - if (state.isCompleted) { - return makeBlobBeginCopyFromURLPollOperation(state); - } - if (!copyId) { - state.isCancelled = true; - return makeBlobBeginCopyFromURLPollOperation(state); - } - // if abortCopyFromURL throws, it will bubble up to user's poller.cancelOperation call - await state.blobClient.abortCopyFromURL(copyId, { - abortSignal: options.abortSignal, - }); - state.isCancelled = true; - return makeBlobBeginCopyFromURLPollOperation(state); -}; -/** - * Note: Intentionally using function expression over arrow function expression - * so that the function can be invoked with a different context. - * This affects what `this` refers to. - * @hidden - */ -const update = async function update(options = {}) { - const state = this.state; - const { blobClient, copySource, startCopyFromURLOptions } = state; - if (!state.isStarted) { - state.isStarted = true; - const result = await blobClient.startCopyFromURL(copySource, startCopyFromURLOptions); - // copyId is needed to abort - state.copyId = result.copyId; - if (result.copyStatus === "success") { - state.result = result; - state.isCompleted = true; + finally { + span.end(); } } - else if (!state.isCompleted) { + /** + * The synchronous Copy From URL operation copies a blob or an internet resource to a new blob. It will not + * return a response until the copy is complete. + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url + * + * @param copySource - The source URL to copy from, Shared Access Signature(SAS) maybe needed for authentication + * @param options - + */ + async syncCopyFromURL(copySource, options = {}) { + var _a, _b, _c; + const { span, updatedOptions } = createSpan("BlobClient-syncCopyFromURL", options); + options.conditions = options.conditions || {}; + options.sourceConditions = options.sourceConditions || {}; try { - const result = await state.blobClient.getProperties({ abortSignal: options.abortSignal }); - const { copyStatus, copyProgress } = result; - const prevCopyProgress = state.copyProgress; - if (copyProgress) { - state.copyProgress = copyProgress; - } - if (copyStatus === "pending" && - copyProgress !== prevCopyProgress && - typeof options.fireProgress === "function") { - // trigger in setTimeout, or swallow error? - options.fireProgress(state); - } - else if (copyStatus === "success") { - state.result = result; - state.isCompleted = true; - } - else if (copyStatus === "failed") { - state.error = new Error(`Blob copy failed with reason: "${result.copyStatusDescription || "unknown"}"`); - state.isCompleted = true; - } + return await this.blobContext.copyFromURL(copySource, Object.assign({ abortSignal: options.abortSignal, metadata: options.metadata, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), sourceModifiedAccessConditions: { + sourceIfMatch: options.sourceConditions.ifMatch, + sourceIfModifiedSince: options.sourceConditions.ifModifiedSince, + sourceIfNoneMatch: options.sourceConditions.ifNoneMatch, + sourceIfUnmodifiedSince: options.sourceConditions.ifUnmodifiedSince, + }, sourceContentMD5: options.sourceContentMD5, copySourceAuthorization: httpAuthorizationToString(options.sourceAuthorization), blobTagsString: toBlobTagsString(options.tags), immutabilityPolicyExpiry: (_b = options.immutabilityPolicy) === null || _b === void 0 ? void 0 : _b.expiriesOn, immutabilityPolicyMode: (_c = options.immutabilityPolicy) === null || _c === void 0 ? void 0 : _c.policyMode, legalHold: options.legalHold, encryptionScope: options.encryptionScope, copySourceTags: options.copySourceTags }, convertTracingToRequestOptionsBase(updatedOptions))); } - catch (err) { - state.error = err; - state.isCompleted = true; + catch (e) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; } - } - return makeBlobBeginCopyFromURLPollOperation(state); -}; -/** - * Note: Intentionally using function expression over arrow function expression - * so that the function can be invoked with a different context. - * This affects what `this` refers to. - * @hidden - */ -const toString = function toString() { - return JSON.stringify({ state: this.state }, (key, value) => { - // remove blobClient from serialized state since a client can't be hydrated from this info. - if (key === "blobClient") { - return undefined; + finally { + span.end(); } - return value; - }); -}; -/** - * Creates a poll operation given the provided state. - * @hidden - */ -function makeBlobBeginCopyFromURLPollOperation(state) { - return { - state: Object.assign({}, state), - cancel, - toString, - update, - }; -} - -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. -/** - * Generate a range string. For example: - * - * "bytes=255-" or "bytes=0-511" - * - * @param iRange - - */ -function rangeToString(iRange) { - if (iRange.offset < 0) { - throw new RangeError(`Range.offset cannot be smaller than 0.`); } - if (iRange.count && iRange.count <= 0) { - throw new RangeError(`Range.count must be larger than 0. Leave it undefined if you want a range from offset to the end.`); - } - return iRange.count - ? `bytes=${iRange.offset}-${iRange.offset + iRange.count - 1}` - : `bytes=${iRange.offset}-`; -} - -// Copyright (c) Microsoft Corporation. -/** - * States for Batch. - */ -var BatchStates; -(function (BatchStates) { - BatchStates[BatchStates["Good"] = 0] = "Good"; - BatchStates[BatchStates["Error"] = 1] = "Error"; -})(BatchStates || (BatchStates = {})); -/** - * Batch provides basic parallel execution with concurrency limits. - * Will stop execute left operations when one of the executed operation throws an error. - * But Batch cannot cancel ongoing operations, you need to cancel them by yourself. - */ -class Batch { /** - * Creates an instance of Batch. - * @param concurrency - + * Sets the tier on a blob. The operation is allowed on a page blob in a premium + * storage account and on a block blob in a blob storage account (locally redundant + * storage only). A premium page blob's tier determines the allowed size, IOPS, + * and bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive + * storage type. This operation does not update the blob's ETag. + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tier + * + * @param tier - The tier to be set on the blob. Valid values are Hot, Cool, or Archive. + * @param options - Optional options to the Blob Set Tier operation. */ - constructor(concurrency = 5) { - /** - * Number of active operations under execution. - */ - this.actives = 0; - /** - * Number of completed operations under execution. - */ - this.completed = 0; - /** - * Offset of next operation to be executed. - */ - this.offset = 0; - /** - * Operation array to be executed. - */ - this.operations = []; - /** - * States of Batch. When an error happens, state will turn into error. - * Batch will stop execute left operations. - */ - this.state = BatchStates.Good; - if (concurrency < 1) { - throw new RangeError("concurrency must be larger than 0"); + async setAccessTier(tier, options = {}) { + var _a; + const { span, updatedOptions } = createSpan("BlobClient-setAccessTier", options); + try { + return await this.blobContext.setTier(toAccessTier(tier), Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), rehydratePriority: options.rehydratePriority }, convertTracingToRequestOptionsBase(updatedOptions))); + } + catch (e) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; + } + finally { + span.end(); } - this.concurrency = concurrency; - this.emitter = new events.EventEmitter(); } - /** - * Add a operation into queue. - * - * @param operation - - */ - addOperation(operation) { - this.operations.push(async () => { - try { - this.actives++; - await operation(); - this.actives--; - this.completed++; - this.parallelExecute(); + async downloadToBuffer(param1, param2, param3, param4 = {}) { + let buffer; + let offset = 0; + let count = 0; + let options = param4; + if (param1 instanceof Buffer) { + buffer = param1; + offset = param2 || 0; + count = typeof param3 === "number" ? param3 : 0; + } + else { + offset = typeof param1 === "number" ? param1 : 0; + count = typeof param2 === "number" ? param2 : 0; + options = param3 || {}; + } + const { span, updatedOptions } = createSpan("BlobClient-downloadToBuffer", options); + try { + if (!options.blockSize) { + options.blockSize = 0; + } + if (options.blockSize < 0) { + throw new RangeError("blockSize option must be >= 0"); + } + if (options.blockSize === 0) { + options.blockSize = DEFAULT_BLOB_DOWNLOAD_BLOCK_BYTES; } - catch (error) { - this.emitter.emit("error", error); + if (offset < 0) { + throw new RangeError("offset option must be >= 0"); } - }); - } - /** - * Start execute operations in the queue. - * - */ - async do() { - if (this.operations.length === 0) { - return Promise.resolve(); + if (count && count <= 0) { + throw new RangeError("count option must be greater than 0"); + } + if (!options.conditions) { + options.conditions = {}; + } + // Customer doesn't specify length, get it + if (!count) { + const response = await this.getProperties(Object.assign(Object.assign({}, options), { tracingOptions: Object.assign(Object.assign({}, options.tracingOptions), convertTracingToRequestOptionsBase(updatedOptions)) })); + count = response.contentLength - offset; + if (count < 0) { + throw new RangeError(`offset ${offset} shouldn't be larger than blob size ${response.contentLength}`); + } + } + // Allocate the buffer of size = count if the buffer is not provided + if (!buffer) { + try { + buffer = Buffer.alloc(count); + } + catch (error) { + throw new Error(`Unable to allocate the buffer of size: ${count}(in bytes). Please try passing your own buffer to the "downloadToBuffer" method or try using other methods like "download" or "downloadToFile".\t ${error.message}`); + } + } + if (buffer.length < count) { + throw new RangeError(`The buffer's size should be equal to or larger than the request count of bytes: ${count}`); + } + let transferProgress = 0; + const batch = new Batch(options.concurrency); + for (let off = offset; off < offset + count; off = off + options.blockSize) { + batch.addOperation(async () => { + // Exclusive chunk end position + let chunkEnd = offset + count; + if (off + options.blockSize < chunkEnd) { + chunkEnd = off + options.blockSize; + } + const response = await this.download(off, chunkEnd - off, { + abortSignal: options.abortSignal, + conditions: options.conditions, + maxRetryRequests: options.maxRetryRequestsPerBlock, + customerProvidedKey: options.customerProvidedKey, + tracingOptions: Object.assign(Object.assign({}, options.tracingOptions), convertTracingToRequestOptionsBase(updatedOptions)), + }); + const stream = response.readableStreamBody; + await streamToBuffer(stream, buffer, off - offset, chunkEnd - offset); + // Update progress after block is downloaded, in case of block trying + // Could provide finer grained progress updating inside HTTP requests, + // only if convenience layer download try is enabled + transferProgress += chunkEnd - off; + if (options.onProgress) { + options.onProgress({ loadedBytes: transferProgress }); + } + }); + } + await batch.do(); + return buffer; } - this.parallelExecute(); - return new Promise((resolve, reject) => { - this.emitter.on("finish", resolve); - this.emitter.on("error", (error) => { - this.state = BatchStates.Error; - reject(error); + catch (e) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, }); - }); - } - /** - * Get next operation to be executed. Return null when reaching ends. - * - */ - nextOperation() { - if (this.offset < this.operations.length) { - return this.operations[this.offset++]; + throw e; + } + finally { + span.end(); } - return null; } /** - * Start execute operations. One one the most important difference between - * this method with do() is that do() wraps as an sync method. + * ONLY AVAILABLE IN NODE.JS RUNTIME. * + * Downloads an Azure Blob to a local file. + * Fails if the the given file path already exits. + * Offset and count are optional, pass 0 and undefined respectively to download the entire blob. + * + * @param filePath - + * @param offset - From which position of the block blob to download. + * @param count - How much data to be downloaded. Will download to the end when passing undefined. + * @param options - Options to Blob download options. + * @returns The response data for blob download operation, + * but with readableStreamBody set to undefined since its + * content is already read and written into a local file + * at the specified path. */ - parallelExecute() { - if (this.state === BatchStates.Error) { - return; + async downloadToFile(filePath, offset = 0, count, options = {}) { + const { span, updatedOptions } = createSpan("BlobClient-downloadToFile", options); + try { + const response = await this.download(offset, count, Object.assign(Object.assign({}, options), { tracingOptions: Object.assign(Object.assign({}, options.tracingOptions), convertTracingToRequestOptionsBase(updatedOptions)) })); + if (response.readableStreamBody) { + await readStreamToLocalFile(response.readableStreamBody, filePath); + } + // The stream is no longer accessible so setting it to undefined. + response.blobDownloadStream = undefined; + return response; } - if (this.completed >= this.operations.length) { - this.emitter.emit("finish"); - return; + catch (e) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; } - while (this.actives < this.concurrency) { - const operation = this.nextOperation(); - if (operation) { - operation(); + finally { + span.end(); + } + } + getBlobAndContainerNamesFromUrl() { + let containerName; + let blobName; + try { + // URL may look like the following + // "https://myaccount.blob.core.windows.net/mycontainer/blob?sasString"; + // "https://myaccount.blob.core.windows.net/mycontainer/blob"; + // "https://myaccount.blob.core.windows.net/mycontainer/blob/a.txt?sasString"; + // "https://myaccount.blob.core.windows.net/mycontainer/blob/a.txt"; + // IPv4/IPv6 address hosts, Endpoints - `http://127.0.0.1:10000/devstoreaccount1/containername/blob` + // http://localhost:10001/devstoreaccount1/containername/blob + const parsedUrl = coreHttp.URLBuilder.parse(this.url); + if (parsedUrl.getHost().split(".")[1] === "blob") { + // "https://myaccount.blob.core.windows.net/containername/blob". + // .getPath() -> /containername/blob + const pathComponents = parsedUrl.getPath().match("/([^/]*)(/(.*))?"); + containerName = pathComponents[1]; + blobName = pathComponents[3]; + } + else if (isIpEndpointStyle(parsedUrl)) { + // IPv4/IPv6 address hosts... Example - http://192.0.0.10:10001/devstoreaccount1/containername/blob + // Single word domain without a [dot] in the endpoint... Example - http://localhost:10001/devstoreaccount1/containername/blob + // .getPath() -> /devstoreaccount1/containername/blob + const pathComponents = parsedUrl.getPath().match("/([^/]*)/([^/]*)(/(.*))?"); + containerName = pathComponents[2]; + blobName = pathComponents[4]; } else { - return; + // "https://customdomain.com/containername/blob". + // .getPath() -> /containername/blob + const pathComponents = parsedUrl.getPath().match("/([^/]*)(/(.*))?"); + containerName = pathComponents[1]; + blobName = pathComponents[3]; + } + // decode the encoded blobName, containerName - to get all the special characters that might be present in them + containerName = decodeURIComponent(containerName); + blobName = decodeURIComponent(blobName); + // Azure Storage Server will replace "\" with "/" in the blob names + // doing the same in the SDK side so that the user doesn't have to replace "\" instances in the blobName + blobName = blobName.replace(/\\/g, "/"); + if (!containerName) { + throw new Error("Provided containerName is invalid."); } + return { blobName, containerName }; + } + catch (error) { + throw new Error("Unable to extract blobName and containerName with provided information."); } } -} - -// Copyright (c) Microsoft Corporation. -/** - * This class generates a readable stream from the data in an array of buffers. - */ -class BuffersStream extends stream.Readable { /** - * Creates an instance of BuffersStream that will emit the data - * contained in the array of buffers. + * Asynchronously copies a blob to a destination within the storage account. + * In version 2012-02-12 and later, the source for a Copy Blob operation can be + * a committed blob in any Azure storage account. + * Beginning with version 2015-02-21, the source for a Copy Blob operation can be + * an Azure file in any Azure storage account. + * Only storage accounts created on or after June 7th, 2012 allow the Copy Blob + * operation to copy from another storage account. + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob * - * @param buffers - Array of buffers containing the data - * @param byteLength - The total length of data contained in the buffers + * @param copySource - url to the source Azure Blob/File. + * @param options - Optional options to the Blob Start Copy From URL operation. */ - constructor(buffers, byteLength, options) { - super(options); - this.buffers = buffers; - this.byteLength = byteLength; - this.byteOffsetInCurrentBuffer = 0; - this.bufferIndex = 0; - this.pushedBytesLength = 0; - // check byteLength is no larger than buffers[] total length - let buffersLength = 0; - for (const buf of this.buffers) { - buffersLength += buf.byteLength; + async startCopyFromURL(copySource, options = {}) { + var _a, _b, _c; + const { span, updatedOptions } = createSpan("BlobClient-startCopyFromURL", options); + options.conditions = options.conditions || {}; + options.sourceConditions = options.sourceConditions || {}; + try { + return await this.blobContext.startCopyFromURL(copySource, Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, metadata: options.metadata, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), sourceModifiedAccessConditions: { + sourceIfMatch: options.sourceConditions.ifMatch, + sourceIfModifiedSince: options.sourceConditions.ifModifiedSince, + sourceIfNoneMatch: options.sourceConditions.ifNoneMatch, + sourceIfUnmodifiedSince: options.sourceConditions.ifUnmodifiedSince, + sourceIfTags: options.sourceConditions.tagConditions, + }, immutabilityPolicyExpiry: (_b = options.immutabilityPolicy) === null || _b === void 0 ? void 0 : _b.expiriesOn, immutabilityPolicyMode: (_c = options.immutabilityPolicy) === null || _c === void 0 ? void 0 : _c.policyMode, legalHold: options.legalHold, rehydratePriority: options.rehydratePriority, tier: toAccessTier(options.tier), blobTagsString: toBlobTagsString(options.tags), sealBlob: options.sealBlob }, convertTracingToRequestOptionsBase(updatedOptions))); } - if (buffersLength < this.byteLength) { - throw new Error("Data size shouldn't be larger than the total length of buffers."); + catch (e) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; + } + finally { + span.end(); } } /** - * Internal _read() that will be called when the stream wants to pull more data in. + * Only available for BlobClient constructed with a shared key credential. * - * @param size - Optional. The size of data to be read + * Generates a Blob Service Shared Access Signature (SAS) URI based on the client properties + * and parameters passed in. The SAS is signed by the shared key credential of the client. + * + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas + * + * @param options - Optional parameters. + * @returns The SAS URI consisting of the URI to the resource represented by this client, followed by the generated SAS token. */ - _read(size) { - if (this.pushedBytesLength >= this.byteLength) { - this.push(null); - } - if (!size) { - size = this.readableHighWaterMark; - } - const outBuffers = []; - let i = 0; - while (i < size && this.pushedBytesLength < this.byteLength) { - // The last buffer may be longer than the data it contains. - const remainingDataInAllBuffers = this.byteLength - this.pushedBytesLength; - const remainingCapacityInThisBuffer = this.buffers[this.bufferIndex].byteLength - this.byteOffsetInCurrentBuffer; - const remaining = Math.min(remainingCapacityInThisBuffer, remainingDataInAllBuffers); - if (remaining > size - i) { - // chunkSize = size - i - const end = this.byteOffsetInCurrentBuffer + size - i; - outBuffers.push(this.buffers[this.bufferIndex].slice(this.byteOffsetInCurrentBuffer, end)); - this.pushedBytesLength += size - i; - this.byteOffsetInCurrentBuffer = end; - i = size; - break; - } - else { - // chunkSize = remaining - const end = this.byteOffsetInCurrentBuffer + remaining; - outBuffers.push(this.buffers[this.bufferIndex].slice(this.byteOffsetInCurrentBuffer, end)); - if (remaining === remainingCapacityInThisBuffer) { - // this.buffers[this.bufferIndex] used up, shift to next one - this.byteOffsetInCurrentBuffer = 0; - this.bufferIndex++; - } - else { - this.byteOffsetInCurrentBuffer = end; - } - this.pushedBytesLength += remaining; - i += remaining; - } - } - if (outBuffers.length > 1) { - this.push(Buffer.concat(outBuffers)); - } - else if (outBuffers.length === 1) { - this.push(outBuffers[0]); - } - } -} - -// Copyright (c) Microsoft Corporation. -/** - * maxBufferLength is max size of each buffer in the pooled buffers. - */ -// Can't use import as Typescript doesn't recognize "buffer". -const maxBufferLength = (__nccwpck_require__(4300).constants.MAX_LENGTH); -/** - * This class provides a buffer container which conceptually has no hard size limit. - * It accepts a capacity, an array of input buffers and the total length of input data. - * It will allocate an internal "buffer" of the capacity and fill the data in the input buffers - * into the internal "buffer" serially with respect to the total length. - * Then by calling PooledBuffer.getReadableStream(), you can get a readable stream - * assembled from all the data in the internal "buffer". - */ -class PooledBuffer { - constructor(capacity, buffers, totalLength) { - /** - * Internal buffers used to keep the data. - * Each buffer has a length of the maxBufferLength except last one. - */ - this.buffers = []; - this.capacity = capacity; - this._size = 0; - // allocate - const bufferNum = Math.ceil(capacity / maxBufferLength); - for (let i = 0; i < bufferNum; i++) { - let len = i === bufferNum - 1 ? capacity % maxBufferLength : maxBufferLength; - if (len === 0) { - len = maxBufferLength; + generateSasUrl(options) { + return new Promise((resolve) => { + if (!(this.credential instanceof StorageSharedKeyCredential)) { + throw new RangeError("Can only generate the SAS when the client is initialized with a shared key credential"); } - this.buffers.push(Buffer.allocUnsafe(len)); - } - if (buffers) { - this.fill(buffers, totalLength); - } + const sas = generateBlobSASQueryParameters(Object.assign({ containerName: this._containerName, blobName: this._name, snapshotTime: this._snapshot, versionId: this._versionId }, options), this.credential).toString(); + resolve(appendToURLQuery(this.url, sas)); + }); } /** - * The size of the data contained in the pooled buffers. + * Delete the immutablility policy on the blob. + * + * @param options - Optional options to delete immutability policy on the blob. */ - get size() { - return this._size; + async deleteImmutabilityPolicy(options) { + const { span, updatedOptions } = createSpan("BlobClient-deleteImmutabilityPolicy", options); + try { + return await this.blobContext.deleteImmutabilityPolicy(Object.assign({ abortSignal: options === null || options === void 0 ? void 0 : options.abortSignal }, convertTracingToRequestOptionsBase(updatedOptions))); + } + catch (e) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; + } + finally { + span.end(); + } } /** - * Fill the internal buffers with data in the input buffers serially - * with respect to the total length and the total capacity of the internal buffers. - * Data copied will be shift out of the input buffers. - * - * @param buffers - Input buffers containing the data to be filled in the pooled buffer - * @param totalLength - Total length of the data to be filled in. + * Set immutablility policy on the blob. * + * @param options - Optional options to set immutability policy on the blob. */ - fill(buffers, totalLength) { - this._size = Math.min(this.capacity, totalLength); - let i = 0, j = 0, targetOffset = 0, sourceOffset = 0, totalCopiedNum = 0; - while (totalCopiedNum < this._size) { - const source = buffers[i]; - const target = this.buffers[j]; - const copiedNum = source.copy(target, targetOffset, sourceOffset); - totalCopiedNum += copiedNum; - sourceOffset += copiedNum; - targetOffset += copiedNum; - if (sourceOffset === source.length) { - i++; - sourceOffset = 0; - } - if (targetOffset === target.length) { - j++; - targetOffset = 0; - } + async setImmutabilityPolicy(immutabilityPolicy, options) { + const { span, updatedOptions } = createSpan("BlobClient-setImmutabilityPolicy", options); + try { + return await this.blobContext.setImmutabilityPolicy(Object.assign({ abortSignal: options === null || options === void 0 ? void 0 : options.abortSignal, immutabilityPolicyExpiry: immutabilityPolicy.expiriesOn, immutabilityPolicyMode: immutabilityPolicy.policyMode, modifiedAccessConditions: options === null || options === void 0 ? void 0 : options.modifiedAccessCondition }, convertTracingToRequestOptionsBase(updatedOptions))); } - // clear copied from source buffers - buffers.splice(0, i); - if (buffers.length > 0) { - buffers[0] = buffers[0].slice(sourceOffset); + catch (e) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; + } + finally { + span.end(); } } /** - * Get the readable stream assembled from all the data in the internal buffers. + * Set legal hold on the blob. * + * @param options - Optional options to set legal hold on the blob. */ - getReadableStream() { - return new BuffersStream(this.buffers, this.size); + async setLegalHold(legalHoldEnabled, options) { + const { span, updatedOptions } = createSpan("BlobClient-setLegalHold", options); + try { + return await this.blobContext.setLegalHold(legalHoldEnabled, Object.assign({ abortSignal: options === null || options === void 0 ? void 0 : options.abortSignal }, convertTracingToRequestOptionsBase(updatedOptions))); + } + catch (e) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; + } + finally { + span.end(); + } } } - -// Copyright (c) Microsoft Corporation. /** - * This class accepts a Node.js Readable stream as input, and keeps reading data - * from the stream into the internal buffer structure, until it reaches maxBuffers. - * Every available buffer will try to trigger outgoingHandler. - * - * The internal buffer structure includes an incoming buffer array, and a outgoing - * buffer array. The incoming buffer array includes the "empty" buffers can be filled - * with new incoming data. The outgoing array includes the filled buffers to be - * handled by outgoingHandler. Every above buffer size is defined by parameter bufferSize. - * - * NUM_OF_ALL_BUFFERS = BUFFERS_IN_INCOMING + BUFFERS_IN_OUTGOING + BUFFERS_UNDER_HANDLING - * - * NUM_OF_ALL_BUFFERS lesser than or equal to maxBuffers - * - * PERFORMANCE IMPROVEMENT TIPS: - * 1. Input stream highWaterMark is better to set a same value with bufferSize - * parameter, which will avoid Buffer.concat() operations. - * 2. concurrency should set a smaller value than maxBuffers, which is helpful to - * reduce the possibility when a outgoing handler waits for the stream data. - * in this situation, outgoing handlers are blocked. - * Outgoing queue shouldn't be empty. + * AppendBlobClient defines a set of operations applicable to append blobs. */ -class BufferScheduler { - /** - * Creates an instance of BufferScheduler. - * - * @param readable - A Node.js Readable stream - * @param bufferSize - Buffer size of every maintained buffer - * @param maxBuffers - How many buffers can be allocated - * @param outgoingHandler - An async function scheduled to be - * triggered when a buffer fully filled - * with stream data - * @param concurrency - Concurrency of executing outgoingHandlers (>0) - * @param encoding - [Optional] Encoding of Readable stream when it's a string stream - */ - constructor(readable, bufferSize, maxBuffers, outgoingHandler, concurrency, encoding) { - /** - * An internal event emitter. - */ - this.emitter = new events.EventEmitter(); - /** - * An internal offset marker to track data offset in bytes of next outgoingHandler. - */ - this.offset = 0; - /** - * An internal marker to track whether stream is end. - */ - this.isStreamEnd = false; - /** - * An internal marker to track whether stream or outgoingHandler returns error. - */ - this.isError = false; - /** - * How many handlers are executing. - */ - this.executingOutgoingHandlers = 0; - /** - * How many buffers have been allocated. - */ - this.numBuffers = 0; - /** - * Because this class doesn't know how much data every time stream pops, which - * is defined by highWaterMarker of the stream. So BufferScheduler will cache - * data received from the stream, when data in unresolvedDataArray exceeds the - * blockSize defined, it will try to concat a blockSize of buffer, fill into available - * buffers from incoming and push to outgoing array. - */ - this.unresolvedDataArray = []; - /** - * How much data consisted in unresolvedDataArray. - */ - this.unresolvedLength = 0; - /** - * The array includes all the available buffers can be used to fill data from stream. - */ - this.incoming = []; - /** - * The array (queue) includes all the buffers filled from stream data. - */ - this.outgoing = []; - if (bufferSize <= 0) { - throw new RangeError(`bufferSize must be larger than 0, current is ${bufferSize}`); +class AppendBlobClient extends BlobClient { + constructor(urlOrConnectionString, credentialOrPipelineOrContainerName, blobNameOrOptions, + // Legacy, no fix for eslint error without breaking. Disable it for this interface. + /* eslint-disable-next-line @azure/azure-sdk/ts-naming-options*/ + options) { + // In TypeScript we cannot simply pass all parameters to super() like below so have to duplicate the code instead. + // super(s, credentialOrPipelineOrContainerNameOrOptions, blobNameOrOptions, options); + let pipeline; + let url; + options = options || {}; + if (isPipelineLike(credentialOrPipelineOrContainerName)) { + // (url: string, pipeline: Pipeline) + url = urlOrConnectionString; + pipeline = credentialOrPipelineOrContainerName; } - if (maxBuffers <= 0) { - throw new RangeError(`maxBuffers must be larger than 0, current is ${maxBuffers}`); + else if ((coreHttp.isNode && credentialOrPipelineOrContainerName instanceof StorageSharedKeyCredential) || + credentialOrPipelineOrContainerName instanceof AnonymousCredential || + coreHttp.isTokenCredential(credentialOrPipelineOrContainerName)) { + // (url: string, credential?: StorageSharedKeyCredential | AnonymousCredential | TokenCredential, options?: StoragePipelineOptions) url = urlOrConnectionString; + url = urlOrConnectionString; + options = blobNameOrOptions; + pipeline = newPipeline(credentialOrPipelineOrContainerName, options); } - if (concurrency <= 0) { - throw new RangeError(`concurrency must be larger than 0, current is ${concurrency}`); + else if (!credentialOrPipelineOrContainerName && + typeof credentialOrPipelineOrContainerName !== "string") { + // (url: string, credential?: StorageSharedKeyCredential | AnonymousCredential | TokenCredential, options?: StoragePipelineOptions) + url = urlOrConnectionString; + // The second parameter is undefined. Use anonymous credential. + pipeline = newPipeline(new AnonymousCredential(), options); } - this.bufferSize = bufferSize; - this.maxBuffers = maxBuffers; - this.readable = readable; - this.outgoingHandler = outgoingHandler; - this.concurrency = concurrency; - this.encoding = encoding; - } - /** - * Start the scheduler, will return error when stream of any of the outgoingHandlers - * returns error. - * - */ - async do() { - return new Promise((resolve, reject) => { - this.readable.on("data", (data) => { - data = typeof data === "string" ? Buffer.from(data, this.encoding) : data; - this.appendUnresolvedData(data); - if (!this.resolveData()) { - this.readable.pause(); - } - }); - this.readable.on("error", (err) => { - this.emitter.emit("error", err); - }); - this.readable.on("end", () => { - this.isStreamEnd = true; - this.emitter.emit("checkEnd"); - }); - this.emitter.on("error", (err) => { - this.isError = true; - this.readable.pause(); - reject(err); - }); - this.emitter.on("checkEnd", () => { - if (this.outgoing.length > 0) { - this.triggerOutgoingHandlers(); - return; - } - if (this.isStreamEnd && this.executingOutgoingHandlers === 0) { - if (this.unresolvedLength > 0 && this.unresolvedLength < this.bufferSize) { - const buffer = this.shiftBufferFromUnresolvedDataArray(); - this.outgoingHandler(() => buffer.getReadableStream(), buffer.size, this.offset) - .then(resolve) - .catch(reject); - } - else if (this.unresolvedLength >= this.bufferSize) { - return; - } - else { - resolve(); + else if (credentialOrPipelineOrContainerName && + typeof credentialOrPipelineOrContainerName === "string" && + blobNameOrOptions && + typeof blobNameOrOptions === "string") { + // (connectionString: string, containerName: string, blobName: string, options?: StoragePipelineOptions) + const containerName = credentialOrPipelineOrContainerName; + const blobName = blobNameOrOptions; + const extractedCreds = extractConnectionStringParts(urlOrConnectionString); + if (extractedCreds.kind === "AccountConnString") { + if (coreHttp.isNode) { + const sharedKeyCredential = new StorageSharedKeyCredential(extractedCreds.accountName, extractedCreds.accountKey); + url = appendToURLPath(appendToURLPath(extractedCreds.url, encodeURIComponent(containerName)), encodeURIComponent(blobName)); + if (!options.proxyOptions) { + options.proxyOptions = coreHttp.getDefaultProxySettings(extractedCreds.proxyUri); } + pipeline = newPipeline(sharedKeyCredential, options); } - }); - }); + else { + throw new Error("Account connection string is only supported in Node.js environment"); + } + } + else if (extractedCreds.kind === "SASConnString") { + url = + appendToURLPath(appendToURLPath(extractedCreds.url, encodeURIComponent(containerName)), encodeURIComponent(blobName)) + + "?" + + extractedCreds.accountSas; + pipeline = newPipeline(new AnonymousCredential(), options); + } + else { + throw new Error("Connection string must be either an Account connection string or a SAS connection string"); + } + } + else { + throw new Error("Expecting non-empty strings for containerName and blobName parameters"); + } + super(url, pipeline); + this.appendBlobContext = new AppendBlob(this.storageClientContext); } /** - * Insert a new data into unresolved array. + * Creates a new AppendBlobClient object identical to the source but with the + * specified snapshot timestamp. + * Provide "" will remove the snapshot and return a Client to the base blob. * - * @param data - + * @param snapshot - The snapshot timestamp. + * @returns A new AppendBlobClient object identical to the source but with the specified snapshot timestamp. */ - appendUnresolvedData(data) { - this.unresolvedDataArray.push(data); - this.unresolvedLength += data.length; + withSnapshot(snapshot) { + return new AppendBlobClient(setURLParameter(this.url, URLConstants.Parameters.SNAPSHOT, snapshot.length === 0 ? undefined : snapshot), this.pipeline); } /** - * Try to shift a buffer with size in blockSize. The buffer returned may be less - * than blockSize when data in unresolvedDataArray is less than bufferSize. + * Creates a 0-length append blob. Call AppendBlock to append data to an append blob. + * @see https://docs.microsoft.com/rest/api/storageservices/put-blob + * + * @param options - Options to the Append Block Create operation. + * + * + * Example usage: * + * ```js + * const appendBlobClient = containerClient.getAppendBlobClient(""); + * await appendBlobClient.create(); + * ``` */ - shiftBufferFromUnresolvedDataArray(buffer) { - if (!buffer) { - buffer = new PooledBuffer(this.bufferSize, this.unresolvedDataArray, this.unresolvedLength); + async create(options = {}) { + var _a, _b, _c; + const { span, updatedOptions } = createSpan("AppendBlobClient-create", options); + options.conditions = options.conditions || {}; + try { + ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps); + return await this.appendBlobContext.create(0, Object.assign({ abortSignal: options.abortSignal, blobHttpHeaders: options.blobHTTPHeaders, leaseAccessConditions: options.conditions, metadata: options.metadata, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), cpkInfo: options.customerProvidedKey, encryptionScope: options.encryptionScope, immutabilityPolicyExpiry: (_b = options.immutabilityPolicy) === null || _b === void 0 ? void 0 : _b.expiriesOn, immutabilityPolicyMode: (_c = options.immutabilityPolicy) === null || _c === void 0 ? void 0 : _c.policyMode, legalHold: options.legalHold, blobTagsString: toBlobTagsString(options.tags) }, convertTracingToRequestOptionsBase(updatedOptions))); } - else { - buffer.fill(this.unresolvedDataArray, this.unresolvedLength); + catch (e) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; + } + finally { + span.end(); } - this.unresolvedLength -= buffer.size; - return buffer; } /** - * Resolve data in unresolvedDataArray. For every buffer with size in blockSize - * shifted, it will try to get (or allocate a buffer) from incoming, and fill it, - * then push it into outgoing to be handled by outgoing handler. - * - * Return false when available buffers in incoming are not enough, else true. + * Creates a 0-length append blob. Call AppendBlock to append data to an append blob. + * If the blob with the same name already exists, the content of the existing blob will remain unchanged. + * @see https://docs.microsoft.com/rest/api/storageservices/put-blob * - * @returns Return false when buffers in incoming are not enough, else true. + * @param options - */ - resolveData() { - while (this.unresolvedLength >= this.bufferSize) { - let buffer; - if (this.incoming.length > 0) { - buffer = this.incoming.shift(); - this.shiftBufferFromUnresolvedDataArray(buffer); - } - else { - if (this.numBuffers < this.maxBuffers) { - buffer = this.shiftBufferFromUnresolvedDataArray(); - this.numBuffers++; - } - else { - // No available buffer, wait for buffer returned - return false; - } + async createIfNotExists(options = {}) { + var _a, _b; + const { span, updatedOptions } = createSpan("AppendBlobClient-createIfNotExists", options); + const conditions = { ifNoneMatch: ETagAny }; + try { + const res = await this.create(Object.assign(Object.assign({}, updatedOptions), { conditions })); + return Object.assign(Object.assign({ succeeded: true }, res), { _response: res._response }); + } + catch (e) { + if (((_a = e.details) === null || _a === void 0 ? void 0 : _a.errorCode) === "BlobAlreadyExists") { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: "Expected exception when creating a blob only if it does not already exist.", + }); + return Object.assign(Object.assign({ succeeded: false }, (_b = e.response) === null || _b === void 0 ? void 0 : _b.parsedHeaders), { _response: e.response }); } - this.outgoing.push(buffer); - this.triggerOutgoingHandlers(); + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; + } + finally { + span.end(); } - return true; } /** - * Try to trigger a outgoing handler for every buffer in outgoing. Stop when - * concurrency reaches. + * Seals the append blob, making it read only. + * + * @param options - */ - async triggerOutgoingHandlers() { - let buffer; - do { - if (this.executingOutgoingHandlers >= this.concurrency) { - return; - } - buffer = this.outgoing.shift(); - if (buffer) { - this.triggerOutgoingHandler(buffer); - } - } while (buffer); + async seal(options = {}) { + var _a; + const { span, updatedOptions } = createSpan("AppendBlobClient-seal", options); + options.conditions = options.conditions || {}; + try { + return await this.appendBlobContext.seal(Object.assign({ abortSignal: options.abortSignal, appendPositionAccessConditions: options.conditions, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }) }, convertTracingToRequestOptionsBase(updatedOptions))); + } + catch (e) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; + } + finally { + span.end(); + } } /** - * Trigger a outgoing handler for a buffer shifted from outgoing. + * Commits a new block of data to the end of the existing append blob. + * @see https://docs.microsoft.com/rest/api/storageservices/append-block * - * @param buffer - + * @param body - Data to be appended. + * @param contentLength - Length of the body in bytes. + * @param options - Options to the Append Block operation. + * + * + * Example usage: + * + * ```js + * const content = "Hello World!"; + * + * // Create a new append blob and append data to the blob. + * const newAppendBlobClient = containerClient.getAppendBlobClient(""); + * await newAppendBlobClient.create(); + * await newAppendBlobClient.appendBlock(content, content.length); + * + * // Append data to an existing append blob. + * const existingAppendBlobClient = containerClient.getAppendBlobClient(""); + * await existingAppendBlobClient.appendBlock(content, content.length); + * ``` */ - async triggerOutgoingHandler(buffer) { - const bufferLength = buffer.size; - this.executingOutgoingHandlers++; - this.offset += bufferLength; + async appendBlock(body, contentLength, options = {}) { + var _a; + const { span, updatedOptions } = createSpan("AppendBlobClient-appendBlock", options); + options.conditions = options.conditions || {}; try { - await this.outgoingHandler(() => buffer.getReadableStream(), bufferLength, this.offset - bufferLength); + ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps); + return await this.appendBlobContext.appendBlock(contentLength, body, Object.assign({ abortSignal: options.abortSignal, appendPositionAccessConditions: options.conditions, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), requestOptions: { + onUploadProgress: options.onProgress, + }, transactionalContentMD5: options.transactionalContentMD5, transactionalContentCrc64: options.transactionalContentCrc64, cpkInfo: options.customerProvidedKey, encryptionScope: options.encryptionScope }, convertTracingToRequestOptionsBase(updatedOptions))); } - catch (err) { - this.emitter.emit("error", err); - return; + catch (e) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; + } + finally { + span.end(); } - this.executingOutgoingHandlers--; - this.reuseBuffer(buffer); - this.emitter.emit("checkEnd"); } /** - * Return buffer used by outgoing handler into incoming. + * The Append Block operation commits a new block of data to the end of an existing append blob + * where the contents are read from a source url. + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/append-block-from-url * - * @param buffer - + * @param sourceURL - + * The url to the blob that will be the source of the copy. A source blob in the same storage account can + * be authenticated via Shared Key. However, if the source is a blob in another account, the source blob + * must either be public or must be authenticated via a shared access signature. If the source blob is + * public, no authentication is required to perform the operation. + * @param sourceOffset - Offset in source to be appended + * @param count - Number of bytes to be appended as a block + * @param options - */ - reuseBuffer(buffer) { - this.incoming.push(buffer); - if (!this.isError && this.resolveData() && !this.isStreamEnd) { - this.readable.resume(); + async appendBlockFromURL(sourceURL, sourceOffset, count, options = {}) { + var _a; + const { span, updatedOptions } = createSpan("AppendBlobClient-appendBlockFromURL", options); + options.conditions = options.conditions || {}; + options.sourceConditions = options.sourceConditions || {}; + try { + ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps); + return await this.appendBlobContext.appendBlockFromUrl(sourceURL, 0, Object.assign({ abortSignal: options.abortSignal, sourceRange: rangeToString({ offset: sourceOffset, count }), sourceContentMD5: options.sourceContentMD5, sourceContentCrc64: options.sourceContentCrc64, leaseAccessConditions: options.conditions, appendPositionAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), sourceModifiedAccessConditions: { + sourceIfMatch: options.sourceConditions.ifMatch, + sourceIfModifiedSince: options.sourceConditions.ifModifiedSince, + sourceIfNoneMatch: options.sourceConditions.ifNoneMatch, + sourceIfUnmodifiedSince: options.sourceConditions.ifUnmodifiedSince, + }, copySourceAuthorization: httpAuthorizationToString(options.sourceAuthorization), cpkInfo: options.customerProvidedKey, encryptionScope: options.encryptionScope }, convertTracingToRequestOptionsBase(updatedOptions))); + } + catch (e) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; + } + finally { + span.end(); } } } - -// Copyright (c) Microsoft Corporation. -/** - * Reads a readable stream into buffer. Fill the buffer from offset to end. - * - * @param stream - A Node.js Readable stream - * @param buffer - Buffer to be filled, length must greater than or equal to offset - * @param offset - From which position in the buffer to be filled, inclusive - * @param end - To which position in the buffer to be filled, exclusive - * @param encoding - Encoding of the Readable stream - */ -async function streamToBuffer(stream, buffer, offset, end, encoding) { - let pos = 0; // Position in stream - const count = end - offset; // Total amount of data needed in stream - return new Promise((resolve, reject) => { - const timeout = setTimeout(() => reject(new Error(`The operation cannot be completed in timeout.`)), REQUEST_TIMEOUT); - stream.on("readable", () => { - if (pos >= count) { - clearTimeout(timeout); - resolve(); - return; - } - let chunk = stream.read(); - if (!chunk) { - return; - } - if (typeof chunk === "string") { - chunk = Buffer.from(chunk, encoding); - } - // How much data needed in this chunk - const chunkLength = pos + chunk.length > count ? count - pos : chunk.length; - buffer.fill(chunk.slice(0, chunkLength), offset + pos, offset + pos + chunkLength); - pos += chunkLength; - }); - stream.on("end", () => { - clearTimeout(timeout); - if (pos < count) { - reject(new Error(`Stream drains before getting enough data needed. Data read: ${pos}, data need: ${count}`)); - } - resolve(); - }); - stream.on("error", (msg) => { - clearTimeout(timeout); - reject(msg); - }); - }); -} -/** - * Reads a readable stream into buffer entirely. - * - * @param stream - A Node.js Readable stream - * @param buffer - Buffer to be filled, length must greater than or equal to offset - * @param encoding - Encoding of the Readable stream - * @returns with the count of bytes read. - * @throws `RangeError` If buffer size is not big enough. - */ -async function streamToBuffer2(stream, buffer, encoding) { - let pos = 0; // Position in stream - const bufferSize = buffer.length; - return new Promise((resolve, reject) => { - stream.on("readable", () => { - let chunk = stream.read(); - if (!chunk) { - return; - } - if (typeof chunk === "string") { - chunk = Buffer.from(chunk, encoding); - } - if (pos + chunk.length > bufferSize) { - reject(new Error(`Stream exceeds buffer size. Buffer size: ${bufferSize}`)); - return; - } - buffer.fill(chunk, pos, pos + chunk.length); - pos += chunk.length; - }); - stream.on("end", () => { - resolve(pos); - }); - stream.on("error", reject); - }); -} /** - * ONLY AVAILABLE IN NODE.JS RUNTIME. - * - * Writes the content of a readstream to a local file. Returns a Promise which is completed after the file handle is closed. - * - * @param rs - The read stream. - * @param file - Destination file path. - */ -async function readStreamToLocalFile(rs, file) { - return new Promise((resolve, reject) => { - const ws = fs__namespace.createWriteStream(file); - rs.on("error", (err) => { - reject(err); - }); - ws.on("error", (err) => { - reject(err); - }); - ws.on("close", resolve); - rs.pipe(ws); - }); -} -/** - * ONLY AVAILABLE IN NODE.JS RUNTIME. - * - * Promisified version of fs.stat(). - */ -const fsStat = util__namespace.promisify(fs__namespace.stat); -const fsCreateReadStream = fs__namespace.createReadStream; - -/** - * A BlobClient represents a URL to an Azure Storage blob; the blob may be a block blob, - * append blob, or page blob. + * BlockBlobClient defines a set of operations applicable to block blobs. */ -class BlobClient extends StorageClient { +class BlockBlobClient extends BlobClient { constructor(urlOrConnectionString, credentialOrPipelineOrContainerName, blobNameOrOptions, // Legacy, no fix for eslint error without breaking. Disable it for this interface. /* eslint-disable-next-line @azure/azure-sdk/ts-naming-options*/ options) { - options = options || {}; + // In TypeScript we cannot simply pass all parameters to super() like below so have to duplicate the code instead. + // super(s, credentialOrPipelineOrContainerNameOrOptions, blobNameOrOptions, options); let pipeline; let url; + options = options || {}; if (isPipelineLike(credentialOrPipelineOrContainerName)) { // (url: string, pipeline: Pipeline) url = urlOrConnectionString; @@ -41717,189 +40540,480 @@ class BlobClient extends StorageClient { throw new Error("Connection string must be either an Account connection string or a SAS connection string"); } } - else { - throw new Error("Expecting non-empty strings for containerName and blobName parameters"); + else { + throw new Error("Expecting non-empty strings for containerName and blobName parameters"); + } + super(url, pipeline); + this.blockBlobContext = new BlockBlob(this.storageClientContext); + this._blobContext = new Blob$1(this.storageClientContext); + } + /** + * Creates a new BlockBlobClient object identical to the source but with the + * specified snapshot timestamp. + * Provide "" will remove the snapshot and return a URL to the base blob. + * + * @param snapshot - The snapshot timestamp. + * @returns A new BlockBlobClient object identical to the source but with the specified snapshot timestamp. + */ + withSnapshot(snapshot) { + return new BlockBlobClient(setURLParameter(this.url, URLConstants.Parameters.SNAPSHOT, snapshot.length === 0 ? undefined : snapshot), this.pipeline); + } + /** + * ONLY AVAILABLE IN NODE.JS RUNTIME. + * + * Quick query for a JSON or CSV formatted blob. + * + * Example usage (Node.js): + * + * ```js + * // Query and convert a blob to a string + * const queryBlockBlobResponse = await blockBlobClient.query("select * from BlobStorage"); + * const downloaded = (await streamToBuffer(queryBlockBlobResponse.readableStreamBody)).toString(); + * console.log("Query blob content:", downloaded); + * + * async function streamToBuffer(readableStream) { + * return new Promise((resolve, reject) => { + * const chunks = []; + * readableStream.on("data", (data) => { + * chunks.push(data instanceof Buffer ? data : Buffer.from(data)); + * }); + * readableStream.on("end", () => { + * resolve(Buffer.concat(chunks)); + * }); + * readableStream.on("error", reject); + * }); + * } + * ``` + * + * @param query - + * @param options - + */ + async query(query, options = {}) { + var _a; + ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps); + const { span, updatedOptions } = createSpan("BlockBlobClient-query", options); + try { + if (!coreHttp.isNode) { + throw new Error("This operation currently is only supported in Node.js."); + } + ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps); + const response = await this._blobContext.query(Object.assign({ abortSignal: options.abortSignal, queryRequest: { + queryType: "SQL", + expression: query, + inputSerialization: toQuerySerialization(options.inputTextConfiguration), + outputSerialization: toQuerySerialization(options.outputTextConfiguration), + }, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), cpkInfo: options.customerProvidedKey }, convertTracingToRequestOptionsBase(updatedOptions))); + return new BlobQueryResponse(response, { + abortSignal: options.abortSignal, + onProgress: options.onProgress, + onError: options.onError, + }); + } + catch (e) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; + } + finally { + span.end(); } - super(url, pipeline); - ({ blobName: this._name, containerName: this._containerName } = - this.getBlobAndContainerNamesFromUrl()); - this.blobContext = new Blob$1(this.storageClientContext); - this._snapshot = getURLParameter(this.url, URLConstants.Parameters.SNAPSHOT); - this._versionId = getURLParameter(this.url, URLConstants.Parameters.VERSIONID); } /** - * The name of the blob. + * Creates a new block blob, or updates the content of an existing block blob. + * Updating an existing block blob overwrites any existing metadata on the blob. + * Partial updates are not supported; the content of the existing blob is + * overwritten with the new content. To perform a partial update of a block blob's, + * use {@link stageBlock} and {@link commitBlockList}. + * + * This is a non-parallel uploading method, please use {@link uploadFile}, + * {@link uploadStream} or {@link uploadBrowserData} for better performance + * with concurrency uploading. + * + * @see https://docs.microsoft.com/rest/api/storageservices/put-blob + * + * @param body - Blob, string, ArrayBuffer, ArrayBufferView or a function + * which returns a new Readable stream whose offset is from data source beginning. + * @param contentLength - Length of body in bytes. Use Buffer.byteLength() to calculate body length for a + * string including non non-Base64/Hex-encoded characters. + * @param options - Options to the Block Blob Upload operation. + * @returns Response data for the Block Blob Upload operation. + * + * Example usage: + * + * ```js + * const content = "Hello world!"; + * const uploadBlobResponse = await blockBlobClient.upload(content, content.length); + * ``` */ - get name() { - return this._name; + async upload(body, contentLength, options = {}) { + var _a, _b, _c; + options.conditions = options.conditions || {}; + const { span, updatedOptions } = createSpan("BlockBlobClient-upload", options); + try { + ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps); + return await this.blockBlobContext.upload(contentLength, body, Object.assign({ abortSignal: options.abortSignal, blobHttpHeaders: options.blobHTTPHeaders, leaseAccessConditions: options.conditions, metadata: options.metadata, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), requestOptions: { + onUploadProgress: options.onProgress, + }, cpkInfo: options.customerProvidedKey, encryptionScope: options.encryptionScope, immutabilityPolicyExpiry: (_b = options.immutabilityPolicy) === null || _b === void 0 ? void 0 : _b.expiriesOn, immutabilityPolicyMode: (_c = options.immutabilityPolicy) === null || _c === void 0 ? void 0 : _c.policyMode, legalHold: options.legalHold, tier: toAccessTier(options.tier), blobTagsString: toBlobTagsString(options.tags) }, convertTracingToRequestOptionsBase(updatedOptions))); + } + catch (e) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; + } + finally { + span.end(); + } } /** - * The name of the storage container the blob is associated with. + * Creates a new Block Blob where the contents of the blob are read from a given URL. + * This API is supported beginning with the 2020-04-08 version. Partial updates + * are not supported with Put Blob from URL; the content of an existing blob is overwritten with + * the content of the new blob. To perform partial updates to a block blob’s contents using a + * source URL, use {@link stageBlockFromURL} and {@link commitBlockList}. + * + * @param sourceURL - Specifies the URL of the blob. The value + * may be a URL of up to 2 KB in length that specifies a blob. + * The value should be URL-encoded as it would appear + * in a request URI. The source blob must either be public + * or must be authenticated via a shared access signature. + * If the source blob is public, no authentication is required + * to perform the operation. Here are some examples of source object URLs: + * - https://myaccount.blob.core.windows.net/mycontainer/myblob + * - https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= + * @param options - Optional parameters. */ - get containerName() { - return this._containerName; + async syncUploadFromURL(sourceURL, options = {}) { + var _a, _b, _c, _d, _e; + options.conditions = options.conditions || {}; + const { span, updatedOptions } = createSpan("BlockBlobClient-syncUploadFromURL", options); + try { + ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps); + return await this.blockBlobContext.putBlobFromUrl(0, sourceURL, Object.assign(Object.assign(Object.assign({}, options), { blobHttpHeaders: options.blobHTTPHeaders, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: options.conditions.tagConditions }), sourceModifiedAccessConditions: { + sourceIfMatch: (_a = options.sourceConditions) === null || _a === void 0 ? void 0 : _a.ifMatch, + sourceIfModifiedSince: (_b = options.sourceConditions) === null || _b === void 0 ? void 0 : _b.ifModifiedSince, + sourceIfNoneMatch: (_c = options.sourceConditions) === null || _c === void 0 ? void 0 : _c.ifNoneMatch, + sourceIfUnmodifiedSince: (_d = options.sourceConditions) === null || _d === void 0 ? void 0 : _d.ifUnmodifiedSince, + sourceIfTags: (_e = options.sourceConditions) === null || _e === void 0 ? void 0 : _e.tagConditions, + }, cpkInfo: options.customerProvidedKey, copySourceAuthorization: httpAuthorizationToString(options.sourceAuthorization), tier: toAccessTier(options.tier), blobTagsString: toBlobTagsString(options.tags), copySourceTags: options.copySourceTags }), convertTracingToRequestOptionsBase(updatedOptions))); + } + catch (e) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; + } + finally { + span.end(); + } } /** - * Creates a new BlobClient object identical to the source but with the specified snapshot timestamp. - * Provide "" will remove the snapshot and return a Client to the base blob. + * Uploads the specified block to the block blob's "staging area" to be later + * committed by a call to commitBlockList. + * @see https://docs.microsoft.com/rest/api/storageservices/put-block * - * @param snapshot - The snapshot timestamp. - * @returns A new BlobClient object identical to the source but with the specified snapshot timestamp + * @param blockId - A 64-byte value that is base64-encoded + * @param body - Data to upload to the staging area. + * @param contentLength - Number of bytes to upload. + * @param options - Options to the Block Blob Stage Block operation. + * @returns Response data for the Block Blob Stage Block operation. */ - withSnapshot(snapshot) { - return new BlobClient(setURLParameter(this.url, URLConstants.Parameters.SNAPSHOT, snapshot.length === 0 ? undefined : snapshot), this.pipeline); + async stageBlock(blockId, body, contentLength, options = {}) { + const { span, updatedOptions } = createSpan("BlockBlobClient-stageBlock", options); + try { + ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps); + return await this.blockBlobContext.stageBlock(blockId, contentLength, body, Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, requestOptions: { + onUploadProgress: options.onProgress, + }, transactionalContentMD5: options.transactionalContentMD5, transactionalContentCrc64: options.transactionalContentCrc64, cpkInfo: options.customerProvidedKey, encryptionScope: options.encryptionScope }, convertTracingToRequestOptionsBase(updatedOptions))); + } + catch (e) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; + } + finally { + span.end(); + } } /** - * Creates a new BlobClient object pointing to a version of this blob. - * Provide "" will remove the versionId and return a Client to the base blob. + * The Stage Block From URL operation creates a new block to be committed as part + * of a blob where the contents are read from a URL. + * This API is available starting in version 2018-03-28. + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url * - * @param versionId - The versionId. - * @returns A new BlobClient object pointing to the version of this blob. + * @param blockId - A 64-byte value that is base64-encoded + * @param sourceURL - Specifies the URL of the blob. The value + * may be a URL of up to 2 KB in length that specifies a blob. + * The value should be URL-encoded as it would appear + * in a request URI. The source blob must either be public + * or must be authenticated via a shared access signature. + * If the source blob is public, no authentication is required + * to perform the operation. Here are some examples of source object URLs: + * - https://myaccount.blob.core.windows.net/mycontainer/myblob + * - https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= + * @param offset - From which position of the blob to download, greater than or equal to 0 + * @param count - How much data to be downloaded, greater than 0. Will download to the end when undefined + * @param options - Options to the Block Blob Stage Block From URL operation. + * @returns Response data for the Block Blob Stage Block From URL operation. */ - withVersion(versionId) { - return new BlobClient(setURLParameter(this.url, URLConstants.Parameters.VERSIONID, versionId.length === 0 ? undefined : versionId), this.pipeline); + async stageBlockFromURL(blockId, sourceURL, offset = 0, count, options = {}) { + const { span, updatedOptions } = createSpan("BlockBlobClient-stageBlockFromURL", options); + try { + ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps); + return await this.blockBlobContext.stageBlockFromURL(blockId, 0, sourceURL, Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, sourceContentMD5: options.sourceContentMD5, sourceContentCrc64: options.sourceContentCrc64, sourceRange: offset === 0 && !count ? undefined : rangeToString({ offset, count }), cpkInfo: options.customerProvidedKey, encryptionScope: options.encryptionScope, copySourceAuthorization: httpAuthorizationToString(options.sourceAuthorization) }, convertTracingToRequestOptionsBase(updatedOptions))); + } + catch (e) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; + } + finally { + span.end(); + } } /** - * Creates a AppendBlobClient object. + * Writes a blob by specifying the list of block IDs that make up the blob. + * In order to be written as part of a blob, a block must have been successfully written + * to the server in a prior {@link stageBlock} operation. You can call {@link commitBlockList} to + * update a blob by uploading only those blocks that have changed, then committing the new and existing + * blocks together. Any blocks not specified in the block list and permanently deleted. + * @see https://docs.microsoft.com/rest/api/storageservices/put-block-list * + * @param blocks - Array of 64-byte value that is base64-encoded + * @param options - Options to the Block Blob Commit Block List operation. + * @returns Response data for the Block Blob Commit Block List operation. */ - getAppendBlobClient() { - return new AppendBlobClient(this.url, this.pipeline); + async commitBlockList(blocks, options = {}) { + var _a, _b, _c; + options.conditions = options.conditions || {}; + const { span, updatedOptions } = createSpan("BlockBlobClient-commitBlockList", options); + try { + ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps); + return await this.blockBlobContext.commitBlockList({ latest: blocks }, Object.assign({ abortSignal: options.abortSignal, blobHttpHeaders: options.blobHTTPHeaders, leaseAccessConditions: options.conditions, metadata: options.metadata, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), cpkInfo: options.customerProvidedKey, encryptionScope: options.encryptionScope, immutabilityPolicyExpiry: (_b = options.immutabilityPolicy) === null || _b === void 0 ? void 0 : _b.expiriesOn, immutabilityPolicyMode: (_c = options.immutabilityPolicy) === null || _c === void 0 ? void 0 : _c.policyMode, legalHold: options.legalHold, tier: toAccessTier(options.tier), blobTagsString: toBlobTagsString(options.tags) }, convertTracingToRequestOptionsBase(updatedOptions))); + } + catch (e) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; + } + finally { + span.end(); + } } /** - * Creates a BlockBlobClient object. + * Returns the list of blocks that have been uploaded as part of a block blob + * using the specified block list filter. + * @see https://docs.microsoft.com/rest/api/storageservices/get-block-list * + * @param listType - Specifies whether to return the list of committed blocks, + * the list of uncommitted blocks, or both lists together. + * @param options - Options to the Block Blob Get Block List operation. + * @returns Response data for the Block Blob Get Block List operation. */ - getBlockBlobClient() { - return new BlockBlobClient(this.url, this.pipeline); + async getBlockList(listType, options = {}) { + var _a; + const { span, updatedOptions } = createSpan("BlockBlobClient-getBlockList", options); + try { + const res = await this.blockBlobContext.getBlockList(listType, Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }) }, convertTracingToRequestOptionsBase(updatedOptions))); + if (!res.committedBlocks) { + res.committedBlocks = []; + } + if (!res.uncommittedBlocks) { + res.uncommittedBlocks = []; + } + return res; + } + catch (e) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; + } + finally { + span.end(); + } } + // High level functions /** - * Creates a PageBlobClient object. + * Uploads a Buffer(Node.js)/Blob(browsers)/ArrayBuffer/ArrayBufferView object to a BlockBlob. + * + * When data length is no more than the specifiled {@link BlockBlobParallelUploadOptions.maxSingleShotSize} (default is + * {@link BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES}), this method will use 1 {@link upload} call to finish the upload. + * Otherwise, this method will call {@link stageBlock} to upload blocks, and finally call {@link commitBlockList} + * to commit the block list. + * + * A common {@link BlockBlobParallelUploadOptions.blobHTTPHeaders} option to set is + * `blobContentType`, enabling the browser to provide + * functionality based on file type. * + * @param data - Buffer(Node.js), Blob, ArrayBuffer or ArrayBufferView + * @param options - */ - getPageBlobClient() { - return new PageBlobClient(this.url, this.pipeline); + async uploadData(data, options = {}) { + const { span, updatedOptions } = createSpan("BlockBlobClient-uploadData", options); + try { + if (coreHttp.isNode) { + let buffer; + if (data instanceof Buffer) { + buffer = data; + } + else if (data instanceof ArrayBuffer) { + buffer = Buffer.from(data); + } + else { + data = data; + buffer = Buffer.from(data.buffer, data.byteOffset, data.byteLength); + } + return this.uploadSeekableInternal((offset, size) => buffer.slice(offset, offset + size), buffer.byteLength, updatedOptions); + } + else { + const browserBlob = new Blob([data]); + return this.uploadSeekableInternal((offset, size) => browserBlob.slice(offset, offset + size), browserBlob.size, updatedOptions); + } + } + catch (e) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; + } + finally { + span.end(); + } } /** - * Reads or downloads a blob from the system, including its metadata and properties. - * You can also call Get Blob to read a snapshot. - * - * * In Node.js, data returns in a Readable stream readableStreamBody - * * In browsers, data returns in a promise blobBody - * - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob + * ONLY AVAILABLE IN BROWSERS. * - * @param offset - From which position of the blob to download, greater than or equal to 0 - * @param count - How much data to be downloaded, greater than 0. Will download to the end when undefined - * @param options - Optional options to Blob Download operation. + * Uploads a browser Blob/File/ArrayBuffer/ArrayBufferView object to block blob. * + * When buffer length lesser than or equal to 256MB, this method will use 1 upload call to finish the upload. + * Otherwise, this method will call {@link stageBlock} to upload blocks, and finally call + * {@link commitBlockList} to commit the block list. * - * Example usage (Node.js): + * A common {@link BlockBlobParallelUploadOptions.blobHTTPHeaders} option to set is + * `blobContentType`, enabling the browser to provide + * functionality based on file type. * - * ```js - * // Download and convert a blob to a string - * const downloadBlockBlobResponse = await blobClient.download(); - * const downloaded = await streamToBuffer(downloadBlockBlobResponse.readableStreamBody); - * console.log("Downloaded blob content:", downloaded.toString()); + * @deprecated Use {@link uploadData} instead. * - * async function streamToBuffer(readableStream) { - * return new Promise((resolve, reject) => { - * const chunks = []; - * readableStream.on("data", (data) => { - * chunks.push(data instanceof Buffer ? data : Buffer.from(data)); - * }); - * readableStream.on("end", () => { - * resolve(Buffer.concat(chunks)); - * }); - * readableStream.on("error", reject); - * }); - * } - * ``` + * @param browserData - Blob, File, ArrayBuffer or ArrayBufferView + * @param options - Options to upload browser data. + * @returns Response data for the Blob Upload operation. + */ + async uploadBrowserData(browserData, options = {}) { + const { span, updatedOptions } = createSpan("BlockBlobClient-uploadBrowserData", options); + try { + const browserBlob = new Blob([browserData]); + return await this.uploadSeekableInternal((offset, size) => browserBlob.slice(offset, offset + size), browserBlob.size, updatedOptions); + } + catch (e) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; + } + finally { + span.end(); + } + } + /** * - * Example usage (browser): + * Uploads data to block blob. Requires a bodyFactory as the data source, + * which need to return a {@link HttpRequestBody} object with the offset and size provided. * - * ```js - * // Download and convert a blob to a string - * const downloadBlockBlobResponse = await blobClient.download(); - * const downloaded = await blobToString(await downloadBlockBlobResponse.blobBody); - * console.log( - * "Downloaded blob content", - * downloaded - * ); + * When data length is no more than the specified {@link BlockBlobParallelUploadOptions.maxSingleShotSize} (default is + * {@link BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES}), this method will use 1 {@link upload} call to finish the upload. + * Otherwise, this method will call {@link stageBlock} to upload blocks, and finally call {@link commitBlockList} + * to commit the block list. * - * async function blobToString(blob: Blob): Promise { - * const fileReader = new FileReader(); - * return new Promise((resolve, reject) => { - * fileReader.onloadend = (ev: any) => { - * resolve(ev.target!.result); - * }; - * fileReader.onerror = reject; - * fileReader.readAsText(blob); - * }); - * } - * ``` + * @param bodyFactory - + * @param size - size of the data to upload. + * @param options - Options to Upload to Block Blob operation. + * @returns Response data for the Blob Upload operation. */ - async download(offset = 0, count, options = {}) { - var _a; - options.conditions = options.conditions || {}; - options.conditions = options.conditions || {}; - ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps); - const { span, updatedOptions } = createSpan("BlobClient-download", options); - try { - const res = await this.blobContext.download(Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), requestOptions: { - onDownloadProgress: coreHttp.isNode ? undefined : options.onProgress, // for Node.js, progress is reported by RetriableReadableStream - }, range: offset === 0 && !count ? undefined : rangeToString({ offset, count }), rangeGetContentMD5: options.rangeGetContentMD5, rangeGetContentCRC64: options.rangeGetContentCrc64, snapshot: options.snapshot, cpkInfo: options.customerProvidedKey }, convertTracingToRequestOptionsBase(updatedOptions))); - const wrappedRes = Object.assign(Object.assign({}, res), { _response: res._response, objectReplicationDestinationPolicyId: res.objectReplicationPolicyId, objectReplicationSourceProperties: parseObjectReplicationRecord(res.objectReplicationRules) }); - // Return browser response immediately - if (!coreHttp.isNode) { - return wrappedRes; + async uploadSeekableInternal(bodyFactory, size, options = {}) { + if (!options.blockSize) { + options.blockSize = 0; + } + if (options.blockSize < 0 || options.blockSize > BLOCK_BLOB_MAX_STAGE_BLOCK_BYTES) { + throw new RangeError(`blockSize option must be >= 0 and <= ${BLOCK_BLOB_MAX_STAGE_BLOCK_BYTES}`); + } + if (options.maxSingleShotSize !== 0 && !options.maxSingleShotSize) { + options.maxSingleShotSize = BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES; + } + if (options.maxSingleShotSize < 0 || + options.maxSingleShotSize > BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES) { + throw new RangeError(`maxSingleShotSize option must be >= 0 and <= ${BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES}`); + } + if (options.blockSize === 0) { + if (size > BLOCK_BLOB_MAX_STAGE_BLOCK_BYTES * BLOCK_BLOB_MAX_BLOCKS) { + throw new RangeError(`${size} is too larger to upload to a block blob.`); } - // We support retrying when download stream unexpected ends in Node.js runtime - // Following code shouldn't be bundled into browser build, however some - // bundlers may try to bundle following code and "FileReadResponse.ts". - // In this case, "FileDownloadResponse.browser.ts" will be used as a shim of "FileDownloadResponse.ts" - // The config is in package.json "browser" field - if (options.maxRetryRequests === undefined || options.maxRetryRequests < 0) { - // TODO: Default value or make it a required parameter? - options.maxRetryRequests = DEFAULT_MAX_DOWNLOAD_RETRY_REQUESTS; + if (size > options.maxSingleShotSize) { + options.blockSize = Math.ceil(size / BLOCK_BLOB_MAX_BLOCKS); + if (options.blockSize < DEFAULT_BLOB_DOWNLOAD_BLOCK_BYTES) { + options.blockSize = DEFAULT_BLOB_DOWNLOAD_BLOCK_BYTES; + } } - if (res.contentLength === undefined) { - throw new RangeError(`File download response doesn't contain valid content length header`); + } + if (!options.blobHTTPHeaders) { + options.blobHTTPHeaders = {}; + } + if (!options.conditions) { + options.conditions = {}; + } + const { span, updatedOptions } = createSpan("BlockBlobClient-uploadSeekableInternal", options); + try { + if (size <= options.maxSingleShotSize) { + return await this.upload(bodyFactory(0, size), size, updatedOptions); } - if (!res.etag) { - throw new RangeError(`File download response doesn't contain valid etag header`); + const numBlocks = Math.floor((size - 1) / options.blockSize) + 1; + if (numBlocks > BLOCK_BLOB_MAX_BLOCKS) { + throw new RangeError(`The buffer's size is too big or the BlockSize is too small;` + + `the number of blocks must be <= ${BLOCK_BLOB_MAX_BLOCKS}`); } - return new BlobDownloadResponse(wrappedRes, async (start) => { - var _a; - const updatedDownloadOptions = { - leaseAccessConditions: options.conditions, - modifiedAccessConditions: { - ifMatch: options.conditions.ifMatch || res.etag, - ifModifiedSince: options.conditions.ifModifiedSince, - ifNoneMatch: options.conditions.ifNoneMatch, - ifUnmodifiedSince: options.conditions.ifUnmodifiedSince, - ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions, - }, - range: rangeToString({ - count: offset + res.contentLength - start, - offset: start, - }), - rangeGetContentMD5: options.rangeGetContentMD5, - rangeGetContentCRC64: options.rangeGetContentCrc64, - snapshot: options.snapshot, - cpkInfo: options.customerProvidedKey, - }; - // Debug purpose only - // console.log( - // `Read from internal stream, range: ${ - // updatedOptions.range - // }, options: ${JSON.stringify(updatedOptions)}` - // ); - return (await this.blobContext.download(Object.assign({ abortSignal: options.abortSignal }, updatedDownloadOptions))).readableStreamBody; - }, offset, res.contentLength, { - maxRetryRequests: options.maxRetryRequests, - onProgress: options.onProgress, - }); + const blockList = []; + const blockIDPrefix = coreHttp.generateUuid(); + let transferProgress = 0; + const batch = new Batch(options.concurrency); + for (let i = 0; i < numBlocks; i++) { + batch.addOperation(async () => { + const blockID = generateBlockID(blockIDPrefix, i); + const start = options.blockSize * i; + const end = i === numBlocks - 1 ? size : start + options.blockSize; + const contentLength = end - start; + blockList.push(blockID); + await this.stageBlock(blockID, bodyFactory(start, contentLength), contentLength, { + abortSignal: options.abortSignal, + conditions: options.conditions, + encryptionScope: options.encryptionScope, + tracingOptions: updatedOptions.tracingOptions, + }); + // Update progress after block is successfully uploaded to server, in case of block trying + // TODO: Hook with convenience layer progress event in finer level + transferProgress += contentLength; + if (options.onProgress) { + options.onProgress({ + loadedBytes: transferProgress, + }); + } + }); + } + await batch.do(); + return this.commitBlockList(blockList, updatedOptions); } catch (e) { span.setStatus({ @@ -41913,37 +41027,31 @@ class BlobClient extends StorageClient { } } /** - * Returns true if the Azure blob resource represented by this client exists; false otherwise. + * ONLY AVAILABLE IN NODE.JS RUNTIME. * - * NOTE: use this function with care since an existing blob might be deleted by other clients or - * applications. Vice versa new blobs might be added by other clients or applications after this - * function completes. + * Uploads a local file in blocks to a block blob. * - * @param options - options to Exists operation. + * When file size lesser than or equal to 256MB, this method will use 1 upload call to finish the upload. + * Otherwise, this method will call stageBlock to upload blocks, and finally call commitBlockList + * to commit the block list. + * + * @param filePath - Full path of local file + * @param options - Options to Upload to Block Blob operation. + * @returns Response data for the Blob Upload operation. */ - async exists(options = {}) { - const { span, updatedOptions } = createSpan("BlobClient-exists", options); + async uploadFile(filePath, options = {}) { + const { span, updatedOptions } = createSpan("BlockBlobClient-uploadFile", options); try { - ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps); - await this.getProperties({ - abortSignal: options.abortSignal, - customerProvidedKey: options.customerProvidedKey, - conditions: options.conditions, - tracingOptions: updatedOptions.tracingOptions, - }); - return true; + const size = (await fsStat(filePath)).size; + return await this.uploadSeekableInternal((offset, count) => { + return () => fsCreateReadStream(filePath, { + autoClose: true, + end: count ? offset + count - 1 : Infinity, + start: offset, + }); + }, size, Object.assign(Object.assign({}, options), { tracingOptions: Object.assign(Object.assign({}, options.tracingOptions), convertTracingToRequestOptionsBase(updatedOptions)) })); } catch (e) { - if (e.statusCode === 404) { - // Expected exception when checking blob existence - return false; - } - else if (e.statusCode === 409 && - (e.details.errorCode === BlobUsesCustomerSpecifiedEncryptionMsg || - e.details.errorCode === BlobDoesNotUseCustomerSpecifiedEncryption)) { - // Expected exception when checking blob existence - return true; - } span.setStatus({ code: coreTracing.SpanStatusCode.ERROR, message: e.message, @@ -41955,25 +41063,56 @@ class BlobClient extends StorageClient { } } /** - * Returns all user-defined metadata, standard HTTP properties, and system properties - * for the blob. It does not return the content of the blob. - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-properties + * ONLY AVAILABLE IN NODE.JS RUNTIME. * - * WARNING: The `metadata` object returned in the response will have its keys in lowercase, even if - * they originally contained uppercase characters. This differs from the metadata keys returned by - * the methods of {@link ContainerClient} that list blobs using the `includeMetadata` option, which - * will retain their original casing. + * Uploads a Node.js Readable stream into block blob. * - * @param options - Optional options to Get Properties operation. + * PERFORMANCE IMPROVEMENT TIPS: + * * Input stream highWaterMark is better to set a same value with bufferSize + * parameter, which will avoid Buffer.concat() operations. + * + * @param stream - Node.js Readable stream + * @param bufferSize - Size of every buffer allocated, also the block size in the uploaded block blob. Default value is 8MB + * @param maxConcurrency - Max concurrency indicates the max number of buffers that can be allocated, + * positive correlation with max uploading concurrency. Default value is 5 + * @param options - Options to Upload Stream to Block Blob operation. + * @returns Response data for the Blob Upload operation. */ - async getProperties(options = {}) { - var _a; - const { span, updatedOptions } = createSpan("BlobClient-getProperties", options); + async uploadStream(stream, bufferSize = DEFAULT_BLOCK_BUFFER_SIZE_BYTES, maxConcurrency = 5, options = {}) { + if (!options.blobHTTPHeaders) { + options.blobHTTPHeaders = {}; + } + if (!options.conditions) { + options.conditions = {}; + } + const { span, updatedOptions } = createSpan("BlockBlobClient-uploadStream", options); try { - options.conditions = options.conditions || {}; - ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps); - const res = await this.blobContext.getProperties(Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), cpkInfo: options.customerProvidedKey }, convertTracingToRequestOptionsBase(updatedOptions))); - return Object.assign(Object.assign({}, res), { _response: res._response, objectReplicationDestinationPolicyId: res.objectReplicationPolicyId, objectReplicationSourceProperties: parseObjectReplicationRecord(res.objectReplicationRules) }); + let blockNum = 0; + const blockIDPrefix = coreHttp.generateUuid(); + let transferProgress = 0; + const blockList = []; + const scheduler = new BufferScheduler(stream, bufferSize, maxConcurrency, async (body, length) => { + const blockID = generateBlockID(blockIDPrefix, blockNum); + blockList.push(blockID); + blockNum++; + await this.stageBlock(blockID, body, length, { + conditions: options.conditions, + encryptionScope: options.encryptionScope, + tracingOptions: updatedOptions.tracingOptions, + }); + // Update progress after block is successfully uploaded to server, in case of block trying + transferProgress += length; + if (options.onProgress) { + options.onProgress({ loadedBytes: transferProgress }); + } + }, + // concurrency should set a smaller value than maxConcurrency, which is helpful to + // reduce the possibility when a outgoing handler waits for stream data, in + // this situation, outgoing handlers are blocked. + // Outgoing queue shouldn't be empty. + Math.ceil((maxConcurrency / 4) * 3)); + await scheduler.do(); + return await this.commitBlockList(blockList, Object.assign(Object.assign({}, options), { tracingOptions: Object.assign(Object.assign({}, options.tracingOptions), convertTracingToRequestOptionsBase(updatedOptions)) })); } catch (e) { span.setStatus({ @@ -41986,21 +41125,105 @@ class BlobClient extends StorageClient { span.end(); } } +} +/** + * PageBlobClient defines a set of operations applicable to page blobs. + */ +class PageBlobClient extends BlobClient { + constructor(urlOrConnectionString, credentialOrPipelineOrContainerName, blobNameOrOptions, + // Legacy, no fix for eslint error without breaking. Disable it for this interface. + /* eslint-disable-next-line @azure/azure-sdk/ts-naming-options*/ + options) { + // In TypeScript we cannot simply pass all parameters to super() like below so have to duplicate the code instead. + // super(s, credentialOrPipelineOrContainerNameOrOptions, blobNameOrOptions, options); + let pipeline; + let url; + options = options || {}; + if (isPipelineLike(credentialOrPipelineOrContainerName)) { + // (url: string, pipeline: Pipeline) + url = urlOrConnectionString; + pipeline = credentialOrPipelineOrContainerName; + } + else if ((coreHttp.isNode && credentialOrPipelineOrContainerName instanceof StorageSharedKeyCredential) || + credentialOrPipelineOrContainerName instanceof AnonymousCredential || + coreHttp.isTokenCredential(credentialOrPipelineOrContainerName)) { + // (url: string, credential?: StorageSharedKeyCredential | AnonymousCredential | TokenCredential, options?: StoragePipelineOptions) + url = urlOrConnectionString; + options = blobNameOrOptions; + pipeline = newPipeline(credentialOrPipelineOrContainerName, options); + } + else if (!credentialOrPipelineOrContainerName && + typeof credentialOrPipelineOrContainerName !== "string") { + // (url: string, credential?: StorageSharedKeyCredential | AnonymousCredential | TokenCredential, options?: StoragePipelineOptions) + // The second parameter is undefined. Use anonymous credential. + url = urlOrConnectionString; + pipeline = newPipeline(new AnonymousCredential(), options); + } + else if (credentialOrPipelineOrContainerName && + typeof credentialOrPipelineOrContainerName === "string" && + blobNameOrOptions && + typeof blobNameOrOptions === "string") { + // (connectionString: string, containerName: string, blobName: string, options?: StoragePipelineOptions) + const containerName = credentialOrPipelineOrContainerName; + const blobName = blobNameOrOptions; + const extractedCreds = extractConnectionStringParts(urlOrConnectionString); + if (extractedCreds.kind === "AccountConnString") { + if (coreHttp.isNode) { + const sharedKeyCredential = new StorageSharedKeyCredential(extractedCreds.accountName, extractedCreds.accountKey); + url = appendToURLPath(appendToURLPath(extractedCreds.url, encodeURIComponent(containerName)), encodeURIComponent(blobName)); + if (!options.proxyOptions) { + options.proxyOptions = coreHttp.getDefaultProxySettings(extractedCreds.proxyUri); + } + pipeline = newPipeline(sharedKeyCredential, options); + } + else { + throw new Error("Account connection string is only supported in Node.js environment"); + } + } + else if (extractedCreds.kind === "SASConnString") { + url = + appendToURLPath(appendToURLPath(extractedCreds.url, encodeURIComponent(containerName)), encodeURIComponent(blobName)) + + "?" + + extractedCreds.accountSas; + pipeline = newPipeline(new AnonymousCredential(), options); + } + else { + throw new Error("Connection string must be either an Account connection string or a SAS connection string"); + } + } + else { + throw new Error("Expecting non-empty strings for containerName and blobName parameters"); + } + super(url, pipeline); + this.pageBlobContext = new PageBlob(this.storageClientContext); + } /** - * Marks the specified blob or snapshot for deletion. The blob is later deleted - * during garbage collection. Note that in order to delete a blob, you must delete - * all of its snapshots. You can delete both at the same time with the Delete - * Blob operation. - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/delete-blob + * Creates a new PageBlobClient object identical to the source but with the + * specified snapshot timestamp. + * Provide "" will remove the snapshot and return a Client to the base blob. * - * @param options - Optional options to Blob Delete operation. + * @param snapshot - The snapshot timestamp. + * @returns A new PageBlobClient object identical to the source but with the specified snapshot timestamp. */ - async delete(options = {}) { - var _a; - const { span, updatedOptions } = createSpan("BlobClient-delete", options); + withSnapshot(snapshot) { + return new PageBlobClient(setURLParameter(this.url, URLConstants.Parameters.SNAPSHOT, snapshot.length === 0 ? undefined : snapshot), this.pipeline); + } + /** + * Creates a page blob of the specified length. Call uploadPages to upload data + * data to a page blob. + * @see https://docs.microsoft.com/rest/api/storageservices/put-blob + * + * @param size - size of the page blob. + * @param options - Options to the Page Blob Create operation. + * @returns Response data for the Page Blob Create operation. + */ + async create(size, options = {}) { + var _a, _b, _c; options.conditions = options.conditions || {}; + const { span, updatedOptions } = createSpan("PageBlobClient-create", options); try { - return await this.blobContext.delete(Object.assign({ abortSignal: options.abortSignal, deleteSnapshots: options.deleteSnapshots, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }) }, convertTracingToRequestOptionsBase(updatedOptions))); + ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps); + return await this.pageBlobContext.create(0, size, Object.assign({ abortSignal: options.abortSignal, blobHttpHeaders: options.blobHTTPHeaders, blobSequenceNumber: options.blobSequenceNumber, leaseAccessConditions: options.conditions, metadata: options.metadata, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), cpkInfo: options.customerProvidedKey, encryptionScope: options.encryptionScope, immutabilityPolicyExpiry: (_b = options.immutabilityPolicy) === null || _b === void 0 ? void 0 : _b.expiriesOn, immutabilityPolicyMode: (_c = options.immutabilityPolicy) === null || _c === void 0 ? void 0 : _c.policyMode, legalHold: options.legalHold, tier: toAccessTier(options.tier), blobTagsString: toBlobTagsString(options.tags) }, convertTracingToRequestOptionsBase(updatedOptions))); } catch (e) { span.setStatus({ @@ -42014,26 +41237,27 @@ class BlobClient extends StorageClient { } } /** - * Marks the specified blob or snapshot for deletion if it exists. The blob is later deleted - * during garbage collection. Note that in order to delete a blob, you must delete - * all of its snapshots. You can delete both at the same time with the Delete - * Blob operation. - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/delete-blob + * Creates a page blob of the specified length. Call uploadPages to upload data + * data to a page blob. If the blob with the same name already exists, the content + * of the existing blob will remain unchanged. + * @see https://docs.microsoft.com/rest/api/storageservices/put-blob * - * @param options - Optional options to Blob Delete operation. + * @param size - size of the page blob. + * @param options - */ - async deleteIfExists(options = {}) { + async createIfNotExists(size, options = {}) { var _a, _b; - const { span, updatedOptions } = createSpan("BlobClient-deleteIfExists", options); + const { span, updatedOptions } = createSpan("PageBlobClient-createIfNotExists", options); try { - const res = await this.delete(updatedOptions); + const conditions = { ifNoneMatch: ETagAny }; + const res = await this.create(size, Object.assign(Object.assign({}, options), { conditions, tracingOptions: updatedOptions.tracingOptions })); return Object.assign(Object.assign({ succeeded: true }, res), { _response: res._response }); } catch (e) { - if (((_a = e.details) === null || _a === void 0 ? void 0 : _a.errorCode) === "BlobNotFound") { + if (((_a = e.details) === null || _a === void 0 ? void 0 : _a.errorCode) === "BlobAlreadyExists") { span.setStatus({ code: coreTracing.SpanStatusCode.ERROR, - message: "Expected exception when deleting a blob or snapshot only if it exists.", + message: "Expected exception when creating a blob only if it does not already exist.", }); return Object.assign(Object.assign({ succeeded: false }, (_b = e.response) === null || _b === void 0 ? void 0 : _b.parsedHeaders), { _response: e.response }); } @@ -42048,17 +41272,24 @@ class BlobClient extends StorageClient { } } /** - * Restores the contents and metadata of soft deleted blob and any associated - * soft deleted snapshots. Undelete Blob is supported only on version 2017-07-29 - * or later. - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/undelete-blob + * Writes 1 or more pages to the page blob. The start and end offsets must be a multiple of 512. + * @see https://docs.microsoft.com/rest/api/storageservices/put-page * - * @param options - Optional options to Blob Undelete operation. + * @param body - Data to upload + * @param offset - Offset of destination page blob + * @param count - Content length of the body, also number of bytes to be uploaded + * @param options - Options to the Page Blob Upload Pages operation. + * @returns Response data for the Page Blob Upload Pages operation. */ - async undelete(options = {}) { - const { span, updatedOptions } = createSpan("BlobClient-undelete", options); + async uploadPages(body, offset, count, options = {}) { + var _a; + options.conditions = options.conditions || {}; + const { span, updatedOptions } = createSpan("PageBlobClient-uploadPages", options); try { - return await this.blobContext.undelete(Object.assign({ abortSignal: options.abortSignal }, convertTracingToRequestOptionsBase(updatedOptions))); + ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps); + return await this.pageBlobContext.uploadPages(count, body, Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), requestOptions: { + onUploadProgress: options.onProgress, + }, range: rangeToString({ offset, count }), sequenceNumberAccessConditions: options.conditions, transactionalContentMD5: options.transactionalContentMD5, transactionalContentCrc64: options.transactionalContentCrc64, cpkInfo: options.customerProvidedKey, encryptionScope: options.encryptionScope }, convertTracingToRequestOptionsBase(updatedOptions))); } catch (e) { span.setStatus({ @@ -42072,27 +41303,29 @@ class BlobClient extends StorageClient { } } /** - * Sets system properties on the blob. - * - * If no value provided, or no value provided for the specified blob HTTP headers, - * these blob HTTP headers without a value will be cleared. - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-properties + * The Upload Pages operation writes a range of pages to a page blob where the + * contents are read from a URL. + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/put-page-from-url * - * @param blobHTTPHeaders - If no value provided, or no value provided for - * the specified blob HTTP headers, these blob HTTP - * headers without a value will be cleared. - * A common header to set is `blobContentType` - * enabling the browser to provide functionality - * based on file type. - * @param options - Optional options to Blob Set HTTP Headers operation. + * @param sourceURL - Specify a URL to the copy source, Shared Access Signature(SAS) maybe needed for authentication + * @param sourceOffset - The source offset to copy from. Pass 0 to copy from the beginning of source page blob + * @param destOffset - Offset of destination page blob + * @param count - Number of bytes to be uploaded from source page blob + * @param options - */ - async setHTTPHeaders(blobHTTPHeaders, options = {}) { + async uploadPagesFromURL(sourceURL, sourceOffset, destOffset, count, options = {}) { var _a; - const { span, updatedOptions } = createSpan("BlobClient-setHTTPHeaders", options); options.conditions = options.conditions || {}; + options.sourceConditions = options.sourceConditions || {}; + const { span, updatedOptions } = createSpan("PageBlobClient-uploadPagesFromURL", options); try { ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps); - return await this.blobContext.setHttpHeaders(Object.assign({ abortSignal: options.abortSignal, blobHttpHeaders: blobHTTPHeaders, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }) }, convertTracingToRequestOptionsBase(updatedOptions))); + return await this.pageBlobContext.uploadPagesFromURL(sourceURL, rangeToString({ offset: sourceOffset, count }), 0, rangeToString({ offset: destOffset, count }), Object.assign({ abortSignal: options.abortSignal, sourceContentMD5: options.sourceContentMD5, sourceContentCrc64: options.sourceContentCrc64, leaseAccessConditions: options.conditions, sequenceNumberAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), sourceModifiedAccessConditions: { + sourceIfMatch: options.sourceConditions.ifMatch, + sourceIfModifiedSince: options.sourceConditions.ifModifiedSince, + sourceIfNoneMatch: options.sourceConditions.ifNoneMatch, + sourceIfUnmodifiedSince: options.sourceConditions.ifUnmodifiedSince, + }, cpkInfo: options.customerProvidedKey, encryptionScope: options.encryptionScope, copySourceAuthorization: httpAuthorizationToString(options.sourceAuthorization) }, convertTracingToRequestOptionsBase(updatedOptions))); } catch (e) { span.setStatus({ @@ -42106,23 +41339,78 @@ class BlobClient extends StorageClient { } } /** - * Sets user-defined metadata for the specified blob as one or more name-value pairs. + * Frees the specified pages from the page blob. + * @see https://docs.microsoft.com/rest/api/storageservices/put-page * - * If no option provided, or no metadata defined in the parameter, the blob - * metadata will be removed. - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-metadata + * @param offset - Starting byte position of the pages to clear. + * @param count - Number of bytes to clear. + * @param options - Options to the Page Blob Clear Pages operation. + * @returns Response data for the Page Blob Clear Pages operation. + */ + async clearPages(offset = 0, count, options = {}) { + var _a; + options.conditions = options.conditions || {}; + const { span, updatedOptions } = createSpan("PageBlobClient-clearPages", options); + try { + return await this.pageBlobContext.clearPages(0, Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), range: rangeToString({ offset, count }), sequenceNumberAccessConditions: options.conditions, cpkInfo: options.customerProvidedKey, encryptionScope: options.encryptionScope }, convertTracingToRequestOptionsBase(updatedOptions))); + } + catch (e) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; + } + finally { + span.end(); + } + } + /** + * Returns the list of valid page ranges for a page blob or snapshot of a page blob. + * @see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges * - * @param metadata - Replace existing metadata with this value. - * If no value provided the existing metadata will be removed. - * @param options - Optional options to Set Metadata operation. + * @param offset - Starting byte position of the page ranges. + * @param count - Number of bytes to get. + * @param options - Options to the Page Blob Get Ranges operation. + * @returns Response data for the Page Blob Get Ranges operation. */ - async setMetadata(metadata, options = {}) { + async getPageRanges(offset = 0, count, options = {}) { var _a; - const { span, updatedOptions } = createSpan("BlobClient-setMetadata", options); options.conditions = options.conditions || {}; + const { span, updatedOptions } = createSpan("PageBlobClient-getPageRanges", options); try { - ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps); - return await this.blobContext.setMetadata(Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, metadata, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), cpkInfo: options.customerProvidedKey, encryptionScope: options.encryptionScope }, convertTracingToRequestOptionsBase(updatedOptions))); + return await this.pageBlobContext + .getPageRanges(Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), range: rangeToString({ offset, count }) }, convertTracingToRequestOptionsBase(updatedOptions))) + .then(rangeResponseFromModel); + } + catch (e) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; + } + finally { + span.end(); + } + } + /** + * getPageRangesSegment returns a single segment of page ranges starting from the + * specified Marker. Use an empty Marker to start enumeration from the beginning. + * After getting a segment, process it, and then call getPageRangesSegment again + * (passing the the previously-returned Marker) to get the next segment. + * @see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges + * + * @param offset - Starting byte position of the page ranges. + * @param count - Number of bytes to get. + * @param marker - A string value that identifies the portion of the list to be returned with the next list operation. + * @param options - Options to PageBlob Get Page Ranges Segment operation. + */ + async listPageRangesSegment(offset = 0, count, marker, options = {}) { + var _a; + const { span, updatedOptions } = createSpan("PageBlobClient-getPageRangesSegment", options); + try { + return await this.pageBlobContext.getPageRanges(Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), range: rangeToString({ offset, count }), marker: marker, maxPageSize: options.maxPageSize }, convertTracingToRequestOptionsBase(updatedOptions))); } catch (e) { span.setStatus({ @@ -42136,19 +41424,171 @@ class BlobClient extends StorageClient { } } /** - * Sets tags on the underlying blob. - * A blob can have up to 10 tags. Tag keys must be between 1 and 128 characters. Tag values must be between 0 and 256 characters. - * Valid tag key and value characters include lower and upper case letters, digits (0-9), - * space (' '), plus ('+'), minus ('-'), period ('.'), foward slash ('/'), colon (':'), equals ('='), and underscore ('_'). + * Returns an AsyncIterableIterator for {@link PageBlobGetPageRangesResponseModel} + * + * @param offset - Starting byte position of the page ranges. + * @param count - Number of bytes to get. + * @param marker - A string value that identifies the portion of + * the get of page ranges to be returned with the next getting operation. The + * operation returns the ContinuationToken value within the response body if the + * getting operation did not return all page ranges remaining within the current page. + * The ContinuationToken value can be used as the value for + * the marker parameter in a subsequent call to request the next page of get + * items. The marker value is opaque to the client. + * @param options - Options to List Page Ranges operation. + */ + listPageRangeItemSegments(offset = 0, count, marker, options = {}) { + return tslib.__asyncGenerator(this, arguments, function* listPageRangeItemSegments_1() { + let getPageRangeItemSegmentsResponse; + if (!!marker || marker === undefined) { + do { + getPageRangeItemSegmentsResponse = yield tslib.__await(this.listPageRangesSegment(offset, count, marker, options)); + marker = getPageRangeItemSegmentsResponse.continuationToken; + yield yield tslib.__await(yield tslib.__await(getPageRangeItemSegmentsResponse)); + } while (marker); + } + }); + } + /** + * Returns an AsyncIterableIterator of {@link PageRangeInfo} objects + * + * @param offset - Starting byte position of the page ranges. + * @param count - Number of bytes to get. + * @param options - Options to List Page Ranges operation. + */ + listPageRangeItems(offset = 0, count, options = {}) { + return tslib.__asyncGenerator(this, arguments, function* listPageRangeItems_1() { + var e_1, _a; + let marker; + try { + for (var _b = tslib.__asyncValues(this.listPageRangeItemSegments(offset, count, marker, options)), _c; _c = yield tslib.__await(_b.next()), !_c.done;) { + const getPageRangesSegment = _c.value; + yield tslib.__await(yield* tslib.__asyncDelegator(tslib.__asyncValues(ExtractPageRangeInfoItems(getPageRangesSegment)))); + } + } + catch (e_1_1) { e_1 = { error: e_1_1 }; } + finally { + try { + if (_c && !_c.done && (_a = _b.return)) yield tslib.__await(_a.call(_b)); + } + finally { if (e_1) throw e_1.error; } + } + }); + } + /** + * Returns an async iterable iterator to list of page ranges for a page blob. + * @see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges + * + * .byPage() returns an async iterable iterator to list of page ranges for a page blob. + * + * Example using `for await` syntax: + * + * ```js + * // Get the pageBlobClient before you run these snippets, + * // Can be obtained from `blobServiceClient.getContainerClient("").getPageBlobClient("");` + * let i = 1; + * for await (const pageRange of pageBlobClient.listPageRanges()) { + * console.log(`Page range ${i++}: ${pageRange.start} - ${pageRange.end}`); + * } + * ``` + * + * Example using `iter.next()`: + * + * ```js + * let i = 1; + * let iter = pageBlobClient.listPageRanges(); + * let pageRangeItem = await iter.next(); + * while (!pageRangeItem.done) { + * console.log(`Page range ${i++}: ${pageRangeItem.value.start} - ${pageRangeItem.value.end}, IsClear: ${pageRangeItem.value.isClear}`); + * pageRangeItem = await iter.next(); + * } + * ``` + * + * Example using `byPage()`: + * + * ```js + * // passing optional maxPageSize in the page settings + * let i = 1; + * for await (const response of pageBlobClient.listPageRanges().byPage({ maxPageSize: 20 })) { + * for (const pageRange of response) { + * console.log(`Page range ${i++}: ${pageRange.start} - ${pageRange.end}`); + * } + * } + * ``` + * + * Example using paging with a marker: + * + * ```js + * let i = 1; + * let iterator = pageBlobClient.listPageRanges().byPage({ maxPageSize: 2 }); + * let response = (await iterator.next()).value; + * + * // Prints 2 page ranges + * for (const pageRange of response) { + * console.log(`Page range ${i++}: ${pageRange.start} - ${pageRange.end}`); + * } + * + * // Gets next marker + * let marker = response.continuationToken; + * + * // Passing next marker as continuationToken + * + * iterator = pageBlobClient.listPageRanges().byPage({ continuationToken: marker, maxPageSize: 10 }); + * response = (await iterator.next()).value; + * + * // Prints 10 page ranges + * for (const blob of response) { + * console.log(`Page range ${i++}: ${pageRange.start} - ${pageRange.end}`); + * } + * ``` + * @param offset - Starting byte position of the page ranges. + * @param count - Number of bytes to get. + * @param options - Options to the Page Blob Get Ranges operation. + * @returns An asyncIterableIterator that supports paging. + */ + listPageRanges(offset = 0, count, options = {}) { + options.conditions = options.conditions || {}; + // AsyncIterableIterator to iterate over blobs + const iter = this.listPageRangeItems(offset, count, options); + return { + /** + * The next method, part of the iteration protocol + */ + next() { + return iter.next(); + }, + /** + * The connection to the async iterator, part of the iteration protocol + */ + [Symbol.asyncIterator]() { + return this; + }, + /** + * Return an AsyncIterableIterator that works a page at a time + */ + byPage: (settings = {}) => { + return this.listPageRangeItemSegments(offset, count, settings.continuationToken, Object.assign({ maxPageSize: settings.maxPageSize }, options)); + }, + }; + } + /** + * Gets the collection of page ranges that differ between a specified snapshot and this page blob. + * @see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges * - * @param tags - - * @param options - + * @param offset - Starting byte position of the page blob + * @param count - Number of bytes to get ranges diff. + * @param prevSnapshot - Timestamp of snapshot to retrieve the difference. + * @param options - Options to the Page Blob Get Page Ranges Diff operation. + * @returns Response data for the Page Blob Get Page Range Diff operation. */ - async setTags(tags, options = {}) { + async getPageRangesDiff(offset, count, prevSnapshot, options = {}) { var _a; - const { span, updatedOptions } = createSpan("BlobClient-setTags", options); + options.conditions = options.conditions || {}; + const { span, updatedOptions } = createSpan("PageBlobClient-getPageRangesDiff", options); try { - return await this.blobContext.setTags(Object.assign(Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }) }, convertTracingToRequestOptionsBase(updatedOptions)), { tags: toBlobTags(tags) })); + return await this.pageBlobContext + .getPageRangesDiff(Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), prevsnapshot: prevSnapshot, range: rangeToString({ offset, count }) }, convertTracingToRequestOptionsBase(updatedOptions))) + .then(rangeResponseFromModel); } catch (e) { span.setStatus({ @@ -42162,17 +41602,27 @@ class BlobClient extends StorageClient { } } /** - * Gets the tags associated with the underlying blob. + * getPageRangesDiffSegment returns a single segment of page ranges starting from the + * specified Marker for difference between previous snapshot and the target page blob. + * Use an empty Marker to start enumeration from the beginning. + * After getting a segment, process it, and then call getPageRangesDiffSegment again + * (passing the the previously-returned Marker) to get the next segment. + * @see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges * - * @param options - + * @param offset - Starting byte position of the page ranges. + * @param count - Number of bytes to get. + * @param prevSnapshotOrUrl - Timestamp of snapshot to retrieve the difference or URL of snapshot to retrieve the difference. + * @param marker - A string value that identifies the portion of the get to be returned with the next get operation. + * @param options - Options to the Page Blob Get Page Ranges Diff operation. */ - async getTags(options = {}) { + async listPageRangesDiffSegment(offset, count, prevSnapshotOrUrl, marker, options) { var _a; - const { span, updatedOptions } = createSpan("BlobClient-getTags", options); + const { span, updatedOptions } = createSpan("PageBlobClient-getPageRangesDiffSegment", options); try { - const response = await this.blobContext.getTags(Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }) }, convertTracingToRequestOptionsBase(updatedOptions))); - const wrappedResponse = Object.assign(Object.assign({}, response), { _response: response._response, tags: toTags({ blobTagSet: response.blobTagSet }) || {} }); - return wrappedResponse; + return await this.pageBlobContext.getPageRangesDiff(Object.assign({ abortSignal: options === null || options === void 0 ? void 0 : options.abortSignal, leaseAccessConditions: options === null || options === void 0 ? void 0 : options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options === null || options === void 0 ? void 0 : options.conditions), { ifTags: (_a = options === null || options === void 0 ? void 0 : options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), prevsnapshot: prevSnapshotOrUrl, range: rangeToString({ + offset: offset, + count: count, + }), marker: marker, maxPageSize: options === null || options === void 0 ? void 0 : options.maxPageSize }, convertTracingToRequestOptionsBase(updatedOptions))); } catch (e) { span.setStatus({ @@ -42186,174 +41636,175 @@ class BlobClient extends StorageClient { } } /** - * Get a {@link BlobLeaseClient} that manages leases on the blob. + * Returns an AsyncIterableIterator for {@link PageBlobGetPageRangesDiffResponseModel} * - * @param proposeLeaseId - Initial proposed lease Id. - * @returns A new BlobLeaseClient object for managing leases on the blob. + * + * @param offset - Starting byte position of the page ranges. + * @param count - Number of bytes to get. + * @param prevSnapshotOrUrl - Timestamp of snapshot to retrieve the difference or URL of snapshot to retrieve the difference. + * @param marker - A string value that identifies the portion of + * the get of page ranges to be returned with the next getting operation. The + * operation returns the ContinuationToken value within the response body if the + * getting operation did not return all page ranges remaining within the current page. + * The ContinuationToken value can be used as the value for + * the marker parameter in a subsequent call to request the next page of get + * items. The marker value is opaque to the client. + * @param options - Options to the Page Blob Get Page Ranges Diff operation. */ - getBlobLeaseClient(proposeLeaseId) { - return new BlobLeaseClient(this, proposeLeaseId); + listPageRangeDiffItemSegments(offset, count, prevSnapshotOrUrl, marker, options) { + return tslib.__asyncGenerator(this, arguments, function* listPageRangeDiffItemSegments_1() { + let getPageRangeItemSegmentsResponse; + if (!!marker || marker === undefined) { + do { + getPageRangeItemSegmentsResponse = yield tslib.__await(this.listPageRangesDiffSegment(offset, count, prevSnapshotOrUrl, marker, options)); + marker = getPageRangeItemSegmentsResponse.continuationToken; + yield yield tslib.__await(yield tslib.__await(getPageRangeItemSegmentsResponse)); + } while (marker); + } + }); } /** - * Creates a read-only snapshot of a blob. - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/snapshot-blob + * Returns an AsyncIterableIterator of {@link PageRangeInfo} objects * - * @param options - Optional options to the Blob Create Snapshot operation. + * @param offset - Starting byte position of the page ranges. + * @param count - Number of bytes to get. + * @param prevSnapshotOrUrl - Timestamp of snapshot to retrieve the difference or URL of snapshot to retrieve the difference. + * @param options - Options to the Page Blob Get Page Ranges Diff operation. */ - async createSnapshot(options = {}) { - var _a; - const { span, updatedOptions } = createSpan("BlobClient-createSnapshot", options); - options.conditions = options.conditions || {}; - try { - ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps); - return await this.blobContext.createSnapshot(Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, metadata: options.metadata, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), cpkInfo: options.customerProvidedKey, encryptionScope: options.encryptionScope }, convertTracingToRequestOptionsBase(updatedOptions))); - } - catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; - } - finally { - span.end(); - } + listPageRangeDiffItems(offset, count, prevSnapshotOrUrl, options) { + return tslib.__asyncGenerator(this, arguments, function* listPageRangeDiffItems_1() { + var e_2, _a; + let marker; + try { + for (var _b = tslib.__asyncValues(this.listPageRangeDiffItemSegments(offset, count, prevSnapshotOrUrl, marker, options)), _c; _c = yield tslib.__await(_b.next()), !_c.done;) { + const getPageRangesSegment = _c.value; + yield tslib.__await(yield* tslib.__asyncDelegator(tslib.__asyncValues(ExtractPageRangeInfoItems(getPageRangesSegment)))); + } + } + catch (e_2_1) { e_2 = { error: e_2_1 }; } + finally { + try { + if (_c && !_c.done && (_a = _b.return)) yield tslib.__await(_a.call(_b)); + } + finally { if (e_2) throw e_2.error; } + } + }); } /** - * Asynchronously copies a blob to a destination within the storage account. - * This method returns a long running operation poller that allows you to wait - * indefinitely until the copy is completed. - * You can also cancel a copy before it is completed by calling `cancelOperation` on the poller. - * Note that the onProgress callback will not be invoked if the operation completes in the first - * request, and attempting to cancel a completed copy will result in an error being thrown. + * Returns an async iterable iterator to list of page ranges that differ between a specified snapshot and this page blob. + * @see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges * - * In version 2012-02-12 and later, the source for a Copy Blob operation can be - * a committed blob in any Azure storage account. - * Beginning with version 2015-02-21, the source for a Copy Blob operation can be - * an Azure file in any Azure storage account. - * Only storage accounts created on or after June 7th, 2012 allow the Copy Blob - * operation to copy from another storage account. - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob + * .byPage() returns an async iterable iterator to list of page ranges that differ between a specified snapshot and this page blob. * - * Example using automatic polling: + * Example using `for await` syntax: * * ```js - * const copyPoller = await blobClient.beginCopyFromURL('url'); - * const result = await copyPoller.pollUntilDone(); + * // Get the pageBlobClient before you run these snippets, + * // Can be obtained from `blobServiceClient.getContainerClient("").getPageBlobClient("");` + * let i = 1; + * for await (const pageRange of pageBlobClient.listPageRangesDiff()) { + * console.log(`Page range ${i++}: ${pageRange.start} - ${pageRange.end}`); + * } * ``` * - * Example using manual polling: + * Example using `iter.next()`: * * ```js - * const copyPoller = await blobClient.beginCopyFromURL('url'); - * while (!poller.isDone()) { - * await poller.poll(); + * let i = 1; + * let iter = pageBlobClient.listPageRangesDiff(); + * let pageRangeItem = await iter.next(); + * while (!pageRangeItem.done) { + * console.log(`Page range ${i++}: ${pageRangeItem.value.start} - ${pageRangeItem.value.end}, IsClear: ${pageRangeItem.value.isClear}`); + * pageRangeItem = await iter.next(); * } - * const result = copyPoller.getResult(); * ``` * - * Example using progress updates: + * Example using `byPage()`: * * ```js - * const copyPoller = await blobClient.beginCopyFromURL('url', { - * onProgress(state) { - * console.log(`Progress: ${state.copyProgress}`); + * // passing optional maxPageSize in the page settings + * let i = 1; + * for await (const response of pageBlobClient.listPageRangesDiff().byPage({ maxPageSize: 20 })) { + * for (const pageRange of response) { + * console.log(`Page range ${i++}: ${pageRange.start} - ${pageRange.end}`); * } - * }); - * const result = await copyPoller.pollUntilDone(); + * } * ``` * - * Example using a changing polling interval (default 15 seconds): + * Example using paging with a marker: * * ```js - * const copyPoller = await blobClient.beginCopyFromURL('url', { - * intervalInMs: 1000 // poll blob every 1 second for copy progress - * }); - * const result = await copyPoller.pollUntilDone(); - * ``` + * let i = 1; + * let iterator = pageBlobClient.listPageRangesDiff().byPage({ maxPageSize: 2 }); + * let response = (await iterator.next()).value; * - * Example using copy cancellation: + * // Prints 2 page ranges + * for (const pageRange of response) { + * console.log(`Page range ${i++}: ${pageRange.start} - ${pageRange.end}`); + * } * - * ```js - * const copyPoller = await blobClient.beginCopyFromURL('url'); - * // cancel operation after starting it. - * try { - * await copyPoller.cancelOperation(); - * // calls to get the result now throw PollerCancelledError - * await copyPoller.getResult(); - * } catch (err) { - * if (err.name === 'PollerCancelledError') { - * console.log('The copy was cancelled.'); - * } + * // Gets next marker + * let marker = response.continuationToken; + * + * // Passing next marker as continuationToken + * + * iterator = pageBlobClient.listPageRangesDiff().byPage({ continuationToken: marker, maxPageSize: 10 }); + * response = (await iterator.next()).value; + * + * // Prints 10 page ranges + * for (const blob of response) { + * console.log(`Page range ${i++}: ${pageRange.start} - ${pageRange.end}`); * } * ``` - * - * @param copySource - url to the source Azure Blob/File. - * @param options - Optional options to the Blob Start Copy From URL operation. + * @param offset - Starting byte position of the page ranges. + * @param count - Number of bytes to get. + * @param prevSnapshot - Timestamp of snapshot to retrieve the difference. + * @param options - Options to the Page Blob Get Ranges operation. + * @returns An asyncIterableIterator that supports paging. */ - async beginCopyFromURL(copySource, options = {}) { - const client = { - abortCopyFromURL: (...args) => this.abortCopyFromURL(...args), - getProperties: (...args) => this.getProperties(...args), - startCopyFromURL: (...args) => this.startCopyFromURL(...args), + listPageRangesDiff(offset, count, prevSnapshot, options = {}) { + options.conditions = options.conditions || {}; + // AsyncIterableIterator to iterate over blobs + const iter = this.listPageRangeDiffItems(offset, count, prevSnapshot, Object.assign({}, options)); + return { + /** + * The next method, part of the iteration protocol + */ + next() { + return iter.next(); + }, + /** + * The connection to the async iterator, part of the iteration protocol + */ + [Symbol.asyncIterator]() { + return this; + }, + /** + * Return an AsyncIterableIterator that works a page at a time + */ + byPage: (settings = {}) => { + return this.listPageRangeDiffItemSegments(offset, count, prevSnapshot, settings.continuationToken, Object.assign({ maxPageSize: settings.maxPageSize }, options)); + }, }; - const poller = new BlobBeginCopyFromUrlPoller({ - blobClient: client, - copySource, - intervalInMs: options.intervalInMs, - onProgress: options.onProgress, - resumeFrom: options.resumeFrom, - startCopyFromURLOptions: options, - }); - // Trigger the startCopyFromURL call by calling poll. - // Any errors from this method should be surfaced to the user. - await poller.poll(); - return poller; - } - /** - * Aborts a pending asynchronous Copy Blob operation, and leaves a destination blob with zero - * length and full metadata. Version 2012-02-12 and newer. - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/abort-copy-blob - * - * @param copyId - Id of the Copy From URL operation. - * @param options - Optional options to the Blob Abort Copy From URL operation. - */ - async abortCopyFromURL(copyId, options = {}) { - const { span, updatedOptions } = createSpan("BlobClient-abortCopyFromURL", options); - try { - return await this.blobContext.abortCopyFromURL(copyId, Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions }, convertTracingToRequestOptionsBase(updatedOptions))); - } - catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; - } - finally { - span.end(); - } } /** - * The synchronous Copy From URL operation copies a blob or an internet resource to a new blob. It will not - * return a response until the copy is complete. - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url + * Gets the collection of page ranges that differ between a specified snapshot and this page blob for managed disks. + * @see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges * - * @param copySource - The source URL to copy from, Shared Access Signature(SAS) maybe needed for authentication - * @param options - + * @param offset - Starting byte position of the page blob + * @param count - Number of bytes to get ranges diff. + * @param prevSnapshotUrl - URL of snapshot to retrieve the difference. + * @param options - Options to the Page Blob Get Page Ranges Diff operation. + * @returns Response data for the Page Blob Get Page Range Diff operation. */ - async syncCopyFromURL(copySource, options = {}) { - var _a, _b, _c; - const { span, updatedOptions } = createSpan("BlobClient-syncCopyFromURL", options); + async getPageRangesDiffForManagedDisks(offset, count, prevSnapshotUrl, options = {}) { + var _a; options.conditions = options.conditions || {}; - options.sourceConditions = options.sourceConditions || {}; + const { span, updatedOptions } = createSpan("PageBlobClient-GetPageRangesDiffForManagedDisks", options); try { - return await this.blobContext.copyFromURL(copySource, Object.assign({ abortSignal: options.abortSignal, metadata: options.metadata, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), sourceModifiedAccessConditions: { - sourceIfMatch: options.sourceConditions.ifMatch, - sourceIfModifiedSince: options.sourceConditions.ifModifiedSince, - sourceIfNoneMatch: options.sourceConditions.ifNoneMatch, - sourceIfUnmodifiedSince: options.sourceConditions.ifUnmodifiedSince, - }, sourceContentMD5: options.sourceContentMD5, copySourceAuthorization: httpAuthorizationToString(options.sourceAuthorization), blobTagsString: toBlobTagsString(options.tags), immutabilityPolicyExpiry: (_b = options.immutabilityPolicy) === null || _b === void 0 ? void 0 : _b.expiriesOn, immutabilityPolicyMode: (_c = options.immutabilityPolicy) === null || _c === void 0 ? void 0 : _c.policyMode, legalHold: options.legalHold, encryptionScope: options.encryptionScope, copySourceTags: options.copySourceTags }, convertTracingToRequestOptionsBase(updatedOptions))); + return await this.pageBlobContext + .getPageRangesDiff(Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), prevSnapshotUrl, range: rangeToString({ offset, count }) }, convertTracingToRequestOptionsBase(updatedOptions))) + .then(rangeResponseFromModel); } catch (e) { span.setStatus({ @@ -42367,117 +41818,19 @@ class BlobClient extends StorageClient { } } /** - * Sets the tier on a blob. The operation is allowed on a page blob in a premium - * storage account and on a block blob in a blob storage account (locally redundant - * storage only). A premium page blob's tier determines the allowed size, IOPS, - * and bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive - * storage type. This operation does not update the blob's ETag. - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tier + * Resizes the page blob to the specified size (which must be a multiple of 512). + * @see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties * - * @param tier - The tier to be set on the blob. Valid values are Hot, Cool, or Archive. - * @param options - Optional options to the Blob Set Tier operation. + * @param size - Target size + * @param options - Options to the Page Blob Resize operation. + * @returns Response data for the Page Blob Resize operation. */ - async setAccessTier(tier, options = {}) { + async resize(size, options = {}) { var _a; - const { span, updatedOptions } = createSpan("BlobClient-setAccessTier", options); - try { - return await this.blobContext.setTier(toAccessTier(tier), Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), rehydratePriority: options.rehydratePriority }, convertTracingToRequestOptionsBase(updatedOptions))); - } - catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; - } - finally { - span.end(); - } - } - async downloadToBuffer(param1, param2, param3, param4 = {}) { - let buffer; - let offset = 0; - let count = 0; - let options = param4; - if (param1 instanceof Buffer) { - buffer = param1; - offset = param2 || 0; - count = typeof param3 === "number" ? param3 : 0; - } - else { - offset = typeof param1 === "number" ? param1 : 0; - count = typeof param2 === "number" ? param2 : 0; - options = param3 || {}; - } - const { span, updatedOptions } = createSpan("BlobClient-downloadToBuffer", options); + options.conditions = options.conditions || {}; + const { span, updatedOptions } = createSpan("PageBlobClient-resize", options); try { - if (!options.blockSize) { - options.blockSize = 0; - } - if (options.blockSize < 0) { - throw new RangeError("blockSize option must be >= 0"); - } - if (options.blockSize === 0) { - options.blockSize = DEFAULT_BLOB_DOWNLOAD_BLOCK_BYTES; - } - if (offset < 0) { - throw new RangeError("offset option must be >= 0"); - } - if (count && count <= 0) { - throw new RangeError("count option must be greater than 0"); - } - if (!options.conditions) { - options.conditions = {}; - } - // Customer doesn't specify length, get it - if (!count) { - const response = await this.getProperties(Object.assign(Object.assign({}, options), { tracingOptions: Object.assign(Object.assign({}, options.tracingOptions), convertTracingToRequestOptionsBase(updatedOptions)) })); - count = response.contentLength - offset; - if (count < 0) { - throw new RangeError(`offset ${offset} shouldn't be larger than blob size ${response.contentLength}`); - } - } - // Allocate the buffer of size = count if the buffer is not provided - if (!buffer) { - try { - buffer = Buffer.alloc(count); - } - catch (error) { - throw new Error(`Unable to allocate the buffer of size: ${count}(in bytes). Please try passing your own buffer to the "downloadToBuffer" method or try using other methods like "download" or "downloadToFile".\t ${error.message}`); - } - } - if (buffer.length < count) { - throw new RangeError(`The buffer's size should be equal to or larger than the request count of bytes: ${count}`); - } - let transferProgress = 0; - const batch = new Batch(options.concurrency); - for (let off = offset; off < offset + count; off = off + options.blockSize) { - batch.addOperation(async () => { - // Exclusive chunk end position - let chunkEnd = offset + count; - if (off + options.blockSize < chunkEnd) { - chunkEnd = off + options.blockSize; - } - const response = await this.download(off, chunkEnd - off, { - abortSignal: options.abortSignal, - conditions: options.conditions, - maxRetryRequests: options.maxRetryRequestsPerBlock, - customerProvidedKey: options.customerProvidedKey, - tracingOptions: Object.assign(Object.assign({}, options.tracingOptions), convertTracingToRequestOptionsBase(updatedOptions)), - }); - const stream = response.readableStreamBody; - await streamToBuffer(stream, buffer, off - offset, chunkEnd - offset); - // Update progress after block is downloaded, in case of block trying - // Could provide finer grained progress updating inside HTTP requests, - // only if convenience layer download try is enabled - transferProgress += chunkEnd - off; - if (options.onProgress) { - options.onProgress({ loadedBytes: transferProgress }); - } - }); - } - await batch.do(); - return buffer; + return await this.pageBlobContext.resize(size, Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), encryptionScope: options.encryptionScope }, convertTracingToRequestOptionsBase(updatedOptions))); } catch (e) { span.setStatus({ @@ -42491,118 +41844,50 @@ class BlobClient extends StorageClient { } } /** - * ONLY AVAILABLE IN NODE.JS RUNTIME. - * - * Downloads an Azure Blob to a local file. - * Fails if the the given file path already exits. - * Offset and count are optional, pass 0 and undefined respectively to download the entire blob. + * Sets a page blob's sequence number. + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-properties * - * @param filePath - - * @param offset - From which position of the block blob to download. - * @param count - How much data to be downloaded. Will download to the end when passing undefined. - * @param options - Options to Blob download options. - * @returns The response data for blob download operation, - * but with readableStreamBody set to undefined since its - * content is already read and written into a local file - * at the specified path. - */ - async downloadToFile(filePath, offset = 0, count, options = {}) { - const { span, updatedOptions } = createSpan("BlobClient-downloadToFile", options); - try { - const response = await this.download(offset, count, Object.assign(Object.assign({}, options), { tracingOptions: Object.assign(Object.assign({}, options.tracingOptions), convertTracingToRequestOptionsBase(updatedOptions)) })); - if (response.readableStreamBody) { - await readStreamToLocalFile(response.readableStreamBody, filePath); - } - // The stream is no longer accessible so setting it to undefined. - response.blobDownloadStream = undefined; - return response; - } - catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; - } - finally { - span.end(); - } - } - getBlobAndContainerNamesFromUrl() { - let containerName; - let blobName; - try { - // URL may look like the following - // "https://myaccount.blob.core.windows.net/mycontainer/blob?sasString"; - // "https://myaccount.blob.core.windows.net/mycontainer/blob"; - // "https://myaccount.blob.core.windows.net/mycontainer/blob/a.txt?sasString"; - // "https://myaccount.blob.core.windows.net/mycontainer/blob/a.txt"; - // IPv4/IPv6 address hosts, Endpoints - `http://127.0.0.1:10000/devstoreaccount1/containername/blob` - // http://localhost:10001/devstoreaccount1/containername/blob - const parsedUrl = coreHttp.URLBuilder.parse(this.url); - if (parsedUrl.getHost().split(".")[1] === "blob") { - // "https://myaccount.blob.core.windows.net/containername/blob". - // .getPath() -> /containername/blob - const pathComponents = parsedUrl.getPath().match("/([^/]*)(/(.*))?"); - containerName = pathComponents[1]; - blobName = pathComponents[3]; - } - else if (isIpEndpointStyle(parsedUrl)) { - // IPv4/IPv6 address hosts... Example - http://192.0.0.10:10001/devstoreaccount1/containername/blob - // Single word domain without a [dot] in the endpoint... Example - http://localhost:10001/devstoreaccount1/containername/blob - // .getPath() -> /devstoreaccount1/containername/blob - const pathComponents = parsedUrl.getPath().match("/([^/]*)/([^/]*)(/(.*))?"); - containerName = pathComponents[2]; - blobName = pathComponents[4]; - } - else { - // "https://customdomain.com/containername/blob". - // .getPath() -> /containername/blob - const pathComponents = parsedUrl.getPath().match("/([^/]*)(/(.*))?"); - containerName = pathComponents[1]; - blobName = pathComponents[3]; - } - // decode the encoded blobName, containerName - to get all the special characters that might be present in them - containerName = decodeURIComponent(containerName); - blobName = decodeURIComponent(blobName); - // Azure Storage Server will replace "\" with "/" in the blob names - // doing the same in the SDK side so that the user doesn't have to replace "\" instances in the blobName - blobName = blobName.replace(/\\/g, "/"); - if (!containerName) { - throw new Error("Provided containerName is invalid."); - } - return { blobName, containerName }; + * @param sequenceNumberAction - Indicates how the service should modify the blob's sequence number. + * @param sequenceNumber - Required if sequenceNumberAction is max or update + * @param options - Options to the Page Blob Update Sequence Number operation. + * @returns Response data for the Page Blob Update Sequence Number operation. + */ + async updateSequenceNumber(sequenceNumberAction, sequenceNumber, options = {}) { + var _a; + options.conditions = options.conditions || {}; + const { span, updatedOptions } = createSpan("PageBlobClient-updateSequenceNumber", options); + try { + return await this.pageBlobContext.updateSequenceNumber(sequenceNumberAction, Object.assign({ abortSignal: options.abortSignal, blobSequenceNumber: sequenceNumber, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }) }, convertTracingToRequestOptionsBase(updatedOptions))); } - catch (error) { - throw new Error("Unable to extract blobName and containerName with provided information."); + catch (e) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; + } + finally { + span.end(); } } /** - * Asynchronously copies a blob to a destination within the storage account. - * In version 2012-02-12 and later, the source for a Copy Blob operation can be - * a committed blob in any Azure storage account. - * Beginning with version 2015-02-21, the source for a Copy Blob operation can be - * an Azure file in any Azure storage account. - * Only storage accounts created on or after June 7th, 2012 allow the Copy Blob - * operation to copy from another storage account. - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob + * Begins an operation to start an incremental copy from one page blob's snapshot to this page blob. + * The snapshot is copied such that only the differential changes between the previously + * copied snapshot are transferred to the destination. + * The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual. + * @see https://docs.microsoft.com/rest/api/storageservices/incremental-copy-blob + * @see https://docs.microsoft.com/en-us/azure/virtual-machines/windows/incremental-snapshots * - * @param copySource - url to the source Azure Blob/File. - * @param options - Optional options to the Blob Start Copy From URL operation. + * @param copySource - Specifies the name of the source page blob snapshot. For example, + * https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= + * @param options - Options to the Page Blob Copy Incremental operation. + * @returns Response data for the Page Blob Copy Incremental operation. */ - async startCopyFromURL(copySource, options = {}) { - var _a, _b, _c; - const { span, updatedOptions } = createSpan("BlobClient-startCopyFromURL", options); - options.conditions = options.conditions || {}; - options.sourceConditions = options.sourceConditions || {}; + async startCopyIncremental(copySource, options = {}) { + var _a; + const { span, updatedOptions } = createSpan("PageBlobClient-startCopyIncremental", options); try { - return await this.blobContext.startCopyFromURL(copySource, Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, metadata: options.metadata, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), sourceModifiedAccessConditions: { - sourceIfMatch: options.sourceConditions.ifMatch, - sourceIfModifiedSince: options.sourceConditions.ifModifiedSince, - sourceIfNoneMatch: options.sourceConditions.ifNoneMatch, - sourceIfUnmodifiedSince: options.sourceConditions.ifUnmodifiedSince, - sourceIfTags: options.sourceConditions.tagConditions, - }, immutabilityPolicyExpiry: (_b = options.immutabilityPolicy) === null || _b === void 0 ? void 0 : _b.expiriesOn, immutabilityPolicyMode: (_c = options.immutabilityPolicy) === null || _c === void 0 ? void 0 : _c.policyMode, legalHold: options.legalHold, rehydratePriority: options.rehydratePriority, tier: toAccessTier(options.tier), blobTagsString: toBlobTagsString(options.tags), sealBlob: options.sealBlob }, convertTracingToRequestOptionsBase(updatedOptions))); + return await this.pageBlobContext.copyIncremental(copySource, Object.assign({ abortSignal: options.abortSignal, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }) }, convertTracingToRequestOptionsBase(updatedOptions))); } catch (e) { span.setStatus({ @@ -42615,35 +41900,297 @@ class BlobClient extends StorageClient { span.end(); } } +} + +// Copyright (c) Microsoft Corporation. +async function getBodyAsText(batchResponse) { + let buffer = Buffer.alloc(BATCH_MAX_PAYLOAD_IN_BYTES); + const responseLength = await streamToBuffer2(batchResponse.readableStreamBody, buffer); + // Slice the buffer to trim the empty ending. + buffer = buffer.slice(0, responseLength); + return buffer.toString(); +} +function utf8ByteLength(str) { + return Buffer.byteLength(str); +} + +// Copyright (c) Microsoft Corporation. +const HTTP_HEADER_DELIMITER = ": "; +const SPACE_DELIMITER = " "; +const NOT_FOUND = -1; +/** + * Util class for parsing batch response. + */ +class BatchResponseParser { + constructor(batchResponse, subRequests) { + if (!batchResponse || !batchResponse.contentType) { + // In special case(reported), server may return invalid content-type which could not be parsed. + throw new RangeError("batchResponse is malformed or doesn't contain valid content-type."); + } + if (!subRequests || subRequests.size === 0) { + // This should be prevent during coding. + throw new RangeError("Invalid state: subRequests is not provided or size is 0."); + } + this.batchResponse = batchResponse; + this.subRequests = subRequests; + this.responseBatchBoundary = this.batchResponse.contentType.split("=")[1]; + this.perResponsePrefix = `--${this.responseBatchBoundary}${HTTP_LINE_ENDING}`; + this.batchResponseEnding = `--${this.responseBatchBoundary}--`; + } + // For example of response, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/blob-batch#response + async parseBatchResponse() { + // When logic reach here, suppose batch request has already succeeded with 202, so we can further parse + // sub request's response. + if (this.batchResponse._response.status !== HTTPURLConnection.HTTP_ACCEPTED) { + throw new Error(`Invalid state: batch request failed with status: '${this.batchResponse._response.status}'.`); + } + const responseBodyAsText = await getBodyAsText(this.batchResponse); + const subResponses = responseBodyAsText + .split(this.batchResponseEnding)[0] // string after ending is useless + .split(this.perResponsePrefix) + .slice(1); // string before first response boundary is useless + const subResponseCount = subResponses.length; + // Defensive coding in case of potential error parsing. + // Note: subResponseCount == 1 is special case where sub request is invalid. + // We try to prevent such cases through early validation, e.g. validate sub request count >= 1. + // While in unexpected sub request invalid case, we allow sub response to be parsed and return to user. + if (subResponseCount !== this.subRequests.size && subResponseCount !== 1) { + throw new Error("Invalid state: sub responses' count is not equal to sub requests' count."); + } + const deserializedSubResponses = new Array(subResponseCount); + let subResponsesSucceededCount = 0; + let subResponsesFailedCount = 0; + // Parse sub subResponses. + for (let index = 0; index < subResponseCount; index++) { + const subResponse = subResponses[index]; + const deserializedSubResponse = {}; + deserializedSubResponse.headers = new coreHttp.HttpHeaders(); + const responseLines = subResponse.split(`${HTTP_LINE_ENDING}`); + let subRespHeaderStartFound = false; + let subRespHeaderEndFound = false; + let subRespFailed = false; + let contentId = NOT_FOUND; + for (const responseLine of responseLines) { + if (!subRespHeaderStartFound) { + // Convention line to indicate content ID + if (responseLine.startsWith(HeaderConstants.CONTENT_ID)) { + contentId = parseInt(responseLine.split(HTTP_HEADER_DELIMITER)[1]); + } + // Http version line with status code indicates the start of sub request's response. + // Example: HTTP/1.1 202 Accepted + if (responseLine.startsWith(HTTP_VERSION_1_1)) { + subRespHeaderStartFound = true; + const tokens = responseLine.split(SPACE_DELIMITER); + deserializedSubResponse.status = parseInt(tokens[1]); + deserializedSubResponse.statusMessage = tokens.slice(2).join(SPACE_DELIMITER); + } + continue; // Skip convention headers not specifically for sub request i.e. Content-Type: application/http and Content-ID: * + } + if (responseLine.trim() === "") { + // Sub response's header start line already found, and the first empty line indicates header end line found. + if (!subRespHeaderEndFound) { + subRespHeaderEndFound = true; + } + continue; // Skip empty line + } + // Note: when code reach here, it indicates subRespHeaderStartFound == true + if (!subRespHeaderEndFound) { + if (responseLine.indexOf(HTTP_HEADER_DELIMITER) === -1) { + // Defensive coding to prevent from missing valuable lines. + throw new Error(`Invalid state: find non-empty line '${responseLine}' without HTTP header delimiter '${HTTP_HEADER_DELIMITER}'.`); + } + // Parse headers of sub response. + const tokens = responseLine.split(HTTP_HEADER_DELIMITER); + deserializedSubResponse.headers.set(tokens[0], tokens[1]); + if (tokens[0] === HeaderConstants.X_MS_ERROR_CODE) { + deserializedSubResponse.errorCode = tokens[1]; + subRespFailed = true; + } + } + else { + // Assemble body of sub response. + if (!deserializedSubResponse.bodyAsText) { + deserializedSubResponse.bodyAsText = ""; + } + deserializedSubResponse.bodyAsText += responseLine; + } + } // Inner for end + // The response will contain the Content-ID header for each corresponding subrequest response to use for tracking. + // The Content-IDs are set to a valid index in the subrequests we sent. In the status code 202 path, we could expect it + // to be 1-1 mapping from the [0, subRequests.size) to the Content-IDs returned. If not, we simply don't return that + // unexpected subResponse in the parsed reponse and we can always look it up in the raw response for debugging purpose. + if (contentId !== NOT_FOUND && + Number.isInteger(contentId) && + contentId >= 0 && + contentId < this.subRequests.size && + deserializedSubResponses[contentId] === undefined) { + deserializedSubResponse._request = this.subRequests.get(contentId); + deserializedSubResponses[contentId] = deserializedSubResponse; + } + else { + logger.error(`subResponses[${index}] is dropped as the Content-ID is not found or invalid, Content-ID: ${contentId}`); + } + if (subRespFailed) { + subResponsesFailedCount++; + } + else { + subResponsesSucceededCount++; + } + } + return { + subResponses: deserializedSubResponses, + subResponsesSucceededCount: subResponsesSucceededCount, + subResponsesFailedCount: subResponsesFailedCount, + }; + } +} + +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +var MutexLockStatus; +(function (MutexLockStatus) { + MutexLockStatus[MutexLockStatus["LOCKED"] = 0] = "LOCKED"; + MutexLockStatus[MutexLockStatus["UNLOCKED"] = 1] = "UNLOCKED"; +})(MutexLockStatus || (MutexLockStatus = {})); +/** + * An async mutex lock. + */ +class Mutex { /** - * Only available for BlobClient constructed with a shared key credential. - * - * Generates a Blob Service Shared Access Signature (SAS) URI based on the client properties - * and parameters passed in. The SAS is signed by the shared key credential of the client. - * - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas + * Lock for a specific key. If the lock has been acquired by another customer, then + * will wait until getting the lock. * - * @param options - Optional parameters. - * @returns The SAS URI consisting of the URI to the resource represented by this client, followed by the generated SAS token. + * @param key - lock key */ - generateSasUrl(options) { + static async lock(key) { return new Promise((resolve) => { - if (!(this.credential instanceof StorageSharedKeyCredential)) { - throw new RangeError("Can only generate the SAS when the client is initialized with a shared key credential"); + if (this.keys[key] === undefined || this.keys[key] === MutexLockStatus.UNLOCKED) { + this.keys[key] = MutexLockStatus.LOCKED; + resolve(); + } + else { + this.onUnlockEvent(key, () => { + this.keys[key] = MutexLockStatus.LOCKED; + resolve(); + }); } - const sas = generateBlobSASQueryParameters(Object.assign({ containerName: this._containerName, blobName: this._name, snapshotTime: this._snapshot, versionId: this._versionId }, options), this.credential).toString(); - resolve(appendToURLQuery(this.url, sas)); }); } /** - * Delete the immutablility policy on the blob. + * Unlock a key. * - * @param options - Optional options to delete immutability policy on the blob. + * @param key - */ - async deleteImmutabilityPolicy(options) { - const { span, updatedOptions } = createSpan("BlobClient-deleteImmutabilityPolicy", options); + static async unlock(key) { + return new Promise((resolve) => { + if (this.keys[key] === MutexLockStatus.LOCKED) { + this.emitUnlockEvent(key); + } + delete this.keys[key]; + resolve(); + }); + } + static onUnlockEvent(key, handler) { + if (this.listeners[key] === undefined) { + this.listeners[key] = [handler]; + } + else { + this.listeners[key].push(handler); + } + } + static emitUnlockEvent(key) { + if (this.listeners[key] !== undefined && this.listeners[key].length > 0) { + const handler = this.listeners[key].shift(); + setImmediate(() => { + handler.call(this); + }); + } + } +} +Mutex.keys = {}; +Mutex.listeners = {}; + +// Copyright (c) Microsoft Corporation. +/** + * A BlobBatch represents an aggregated set of operations on blobs. + * Currently, only `delete` and `setAccessTier` are supported. + */ +class BlobBatch { + constructor() { + this.batch = "batch"; + this.batchRequest = new InnerBatchRequest(); + } + /** + * Get the value of Content-Type for a batch request. + * The value must be multipart/mixed with a batch boundary. + * Example: multipart/mixed; boundary=batch_a81786c8-e301-4e42-a729-a32ca24ae252 + */ + getMultiPartContentType() { + return this.batchRequest.getMultipartContentType(); + } + /** + * Get assembled HTTP request body for sub requests. + */ + getHttpRequestBody() { + return this.batchRequest.getHttpRequestBody(); + } + /** + * Get sub requests that are added into the batch request. + */ + getSubRequests() { + return this.batchRequest.getSubRequests(); + } + async addSubRequestInternal(subRequest, assembleSubRequestFunc) { + await Mutex.lock(this.batch); try { - return await this.blobContext.deleteImmutabilityPolicy(Object.assign({ abortSignal: options === null || options === void 0 ? void 0 : options.abortSignal }, convertTracingToRequestOptionsBase(updatedOptions))); + this.batchRequest.preAddSubRequest(subRequest); + await assembleSubRequestFunc(); + this.batchRequest.postAddSubRequest(subRequest); + } + finally { + await Mutex.unlock(this.batch); + } + } + setBatchType(batchType) { + if (!this.batchType) { + this.batchType = batchType; + } + if (this.batchType !== batchType) { + throw new RangeError(`BlobBatch only supports one operation type per batch and it already is being used for ${this.batchType} operations.`); + } + } + async deleteBlob(urlOrBlobClient, credentialOrOptions, options) { + let url; + let credential; + if (typeof urlOrBlobClient === "string" && + ((coreHttp.isNode && credentialOrOptions instanceof StorageSharedKeyCredential) || + credentialOrOptions instanceof AnonymousCredential || + coreHttp.isTokenCredential(credentialOrOptions))) { + // First overload + url = urlOrBlobClient; + credential = credentialOrOptions; + } + else if (urlOrBlobClient instanceof BlobClient) { + // Second overload + url = urlOrBlobClient.url; + credential = urlOrBlobClient.credential; + options = credentialOrOptions; + } + else { + throw new RangeError("Invalid arguments. Either url and credential, or BlobClient need be provided."); + } + if (!options) { + options = {}; + } + const { span, updatedOptions } = createSpan("BatchDeleteRequest-addSubRequest", options); + try { + this.setBatchType("delete"); + await this.addSubRequestInternal({ + url: url, + credential: credential, + }, async () => { + await new BlobClient(url, this.batchRequest.createPipeline(credential)).delete(updatedOptions); + }); } catch (e) { span.setStatus({ @@ -42656,15 +42203,41 @@ class BlobClient extends StorageClient { span.end(); } } - /** - * Set immutablility policy on the blob. - * - * @param options - Optional options to set immutability policy on the blob. - */ - async setImmutabilityPolicy(immutabilityPolicy, options) { - const { span, updatedOptions } = createSpan("BlobClient-setImmutabilityPolicy", options); + async setBlobAccessTier(urlOrBlobClient, credentialOrTier, tierOrOptions, options) { + let url; + let credential; + let tier; + if (typeof urlOrBlobClient === "string" && + ((coreHttp.isNode && credentialOrTier instanceof StorageSharedKeyCredential) || + credentialOrTier instanceof AnonymousCredential || + coreHttp.isTokenCredential(credentialOrTier))) { + // First overload + url = urlOrBlobClient; + credential = credentialOrTier; + tier = tierOrOptions; + } + else if (urlOrBlobClient instanceof BlobClient) { + // Second overload + url = urlOrBlobClient.url; + credential = urlOrBlobClient.credential; + tier = credentialOrTier; + options = tierOrOptions; + } + else { + throw new RangeError("Invalid arguments. Either url and credential, or BlobClient need be provided."); + } + if (!options) { + options = {}; + } + const { span, updatedOptions } = createSpan("BatchSetTierRequest-addSubRequest", options); try { - return await this.blobContext.setImmutabilityPolicy(Object.assign({ abortSignal: options === null || options === void 0 ? void 0 : options.abortSignal, immutabilityPolicyExpiry: immutabilityPolicy.expiriesOn, immutabilityPolicyMode: immutabilityPolicy.policyMode, modifiedAccessConditions: options === null || options === void 0 ? void 0 : options.modifiedAccessCondition }, convertTracingToRequestOptionsBase(updatedOptions))); + this.setBatchType("setAccessTier"); + await this.addSubRequestInternal({ + url: url, + credential: credential, + }, async () => { + await new BlobClient(url, this.batchRequest.createPipeline(credential)).setAccessTier(tier, updatedOptions); + }); } catch (e) { span.setStatus({ @@ -42677,271 +42250,265 @@ class BlobClient extends StorageClient { span.end(); } } +} +/** + * Inner batch request class which is responsible for assembling and serializing sub requests. + * See https://docs.microsoft.com/en-us/rest/api/storageservices/blob-batch#request-body for how requests are assembled. + */ +class InnerBatchRequest { + constructor() { + this.operationCount = 0; + this.body = ""; + const tempGuid = coreHttp.generateUuid(); + // batch_{batchid} + this.boundary = `batch_${tempGuid}`; + // --batch_{batchid} + // Content-Type: application/http + // Content-Transfer-Encoding: binary + this.subRequestPrefix = `--${this.boundary}${HTTP_LINE_ENDING}${HeaderConstants.CONTENT_TYPE}: application/http${HTTP_LINE_ENDING}${HeaderConstants.CONTENT_TRANSFER_ENCODING}: binary`; + // multipart/mixed; boundary=batch_{batchid} + this.multipartContentType = `multipart/mixed; boundary=${this.boundary}`; + // --batch_{batchid}-- + this.batchRequestEnding = `--${this.boundary}--`; + this.subRequests = new Map(); + } /** - * Set legal hold on the blob. - * - * @param options - Optional options to set legal hold on the blob. + * Create pipeline to assemble sub requests. The idea here is to use existing + * credential and serialization/deserialization components, with additional policies to + * filter unnecessary headers, assemble sub requests into request's body + * and intercept request from going to wire. + * @param credential - Such as AnonymousCredential, StorageSharedKeyCredential or any credential from the `@azure/identity` package to authenticate requests to the service. You can also provide an object that implements the TokenCredential interface. If not specified, AnonymousCredential is used. */ - async setLegalHold(legalHoldEnabled, options) { - const { span, updatedOptions } = createSpan("BlobClient-setLegalHold", options); - try { - return await this.blobContext.setLegalHold(legalHoldEnabled, Object.assign({ abortSignal: options === null || options === void 0 ? void 0 : options.abortSignal }, convertTracingToRequestOptionsBase(updatedOptions))); + createPipeline(credential) { + const isAnonymousCreds = credential instanceof AnonymousCredential; + const policyFactoryLength = 3 + (isAnonymousCreds ? 0 : 1); // [deserializationPolicy, BatchHeaderFilterPolicyFactory, (Optional)Credential, BatchRequestAssemblePolicyFactory] + const factories = new Array(policyFactoryLength); + factories[0] = coreHttp.deserializationPolicy(); // Default deserializationPolicy is provided by protocol layer + factories[1] = new BatchHeaderFilterPolicyFactory(); // Use batch header filter policy to exclude unnecessary headers + if (!isAnonymousCreds) { + factories[2] = coreHttp.isTokenCredential(credential) + ? attachCredential(coreHttp.bearerTokenAuthenticationPolicy(credential, StorageOAuthScopes), credential) + : credential; } - catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; + factories[policyFactoryLength - 1] = new BatchRequestAssemblePolicyFactory(this); // Use batch assemble policy to assemble request and intercept request from going to wire + return new Pipeline(factories, {}); + } + appendSubRequestToBody(request) { + // Start to assemble sub request + this.body += [ + this.subRequestPrefix, + `${HeaderConstants.CONTENT_ID}: ${this.operationCount}`, + "", + `${request.method.toString()} ${getURLPathAndQuery(request.url)} ${HTTP_VERSION_1_1}${HTTP_LINE_ENDING}`, // sub request start line with method + ].join(HTTP_LINE_ENDING); + for (const header of request.headers.headersArray()) { + this.body += `${header.name}: ${header.value}${HTTP_LINE_ENDING}`; + } + this.body += HTTP_LINE_ENDING; // sub request's headers need be ending with an empty line + // No body to assemble for current batch request support + // End to assemble sub request + } + preAddSubRequest(subRequest) { + if (this.operationCount >= BATCH_MAX_REQUEST) { + throw new RangeError(`Cannot exceed ${BATCH_MAX_REQUEST} sub requests in a single batch`); + } + // Fast fail if url for sub request is invalid + const path = getURLPath(subRequest.url); + if (!path || path === "") { + throw new RangeError(`Invalid url for sub request: '${subRequest.url}'`); + } + } + postAddSubRequest(subRequest) { + this.subRequests.set(this.operationCount, subRequest); + this.operationCount++; + } + // Return the http request body with assembling the ending line to the sub request body. + getHttpRequestBody() { + return `${this.body}${this.batchRequestEnding}${HTTP_LINE_ENDING}`; + } + getMultipartContentType() { + return this.multipartContentType; + } + getSubRequests() { + return this.subRequests; + } +} +class BatchRequestAssemblePolicy extends coreHttp.BaseRequestPolicy { + constructor(batchRequest, nextPolicy, options) { + super(nextPolicy, options); + this.dummyResponse = { + request: new coreHttp.WebResource(), + status: 200, + headers: new coreHttp.HttpHeaders(), + }; + this.batchRequest = batchRequest; + } + async sendRequest(request) { + await this.batchRequest.appendSubRequestToBody(request); + return this.dummyResponse; // Intercept request from going to wire + } +} +class BatchRequestAssemblePolicyFactory { + constructor(batchRequest) { + this.batchRequest = batchRequest; + } + create(nextPolicy, options) { + return new BatchRequestAssemblePolicy(this.batchRequest, nextPolicy, options); + } +} +class BatchHeaderFilterPolicy extends coreHttp.BaseRequestPolicy { + // The base class has a protected constructor. Adding a public one to enable constructing of this class. + /* eslint-disable-next-line @typescript-eslint/no-useless-constructor*/ + constructor(nextPolicy, options) { + super(nextPolicy, options); + } + async sendRequest(request) { + let xMsHeaderName = ""; + for (const header of request.headers.headersArray()) { + if (iEqual(header.name, HeaderConstants.X_MS_VERSION)) { + xMsHeaderName = header.name; + } } - finally { - span.end(); + if (xMsHeaderName !== "") { + request.headers.remove(xMsHeaderName); // The subrequests should not have the x-ms-version header. } + return this._nextPolicy.sendRequest(request); + } +} +class BatchHeaderFilterPolicyFactory { + create(nextPolicy, options) { + return new BatchHeaderFilterPolicy(nextPolicy, options); } } + +// Copyright (c) Microsoft Corporation. /** - * AppendBlobClient defines a set of operations applicable to append blobs. + * A BlobBatchClient allows you to make batched requests to the Azure Storage Blob service. + * + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/blob-batch */ -class AppendBlobClient extends BlobClient { - constructor(urlOrConnectionString, credentialOrPipelineOrContainerName, blobNameOrOptions, +class BlobBatchClient { + constructor(url, credentialOrPipeline, // Legacy, no fix for eslint error without breaking. Disable it for this interface. /* eslint-disable-next-line @azure/azure-sdk/ts-naming-options*/ options) { - // In TypeScript we cannot simply pass all parameters to super() like below so have to duplicate the code instead. - // super(s, credentialOrPipelineOrContainerNameOrOptions, blobNameOrOptions, options); let pipeline; - let url; - options = options || {}; - if (isPipelineLike(credentialOrPipelineOrContainerName)) { - // (url: string, pipeline: Pipeline) - url = urlOrConnectionString; - pipeline = credentialOrPipelineOrContainerName; - } - else if ((coreHttp.isNode && credentialOrPipelineOrContainerName instanceof StorageSharedKeyCredential) || - credentialOrPipelineOrContainerName instanceof AnonymousCredential || - coreHttp.isTokenCredential(credentialOrPipelineOrContainerName)) { - // (url: string, credential?: StorageSharedKeyCredential | AnonymousCredential | TokenCredential, options?: StoragePipelineOptions) url = urlOrConnectionString; - url = urlOrConnectionString; - options = blobNameOrOptions; - pipeline = newPipeline(credentialOrPipelineOrContainerName, options); + if (isPipelineLike(credentialOrPipeline)) { + pipeline = credentialOrPipeline; } - else if (!credentialOrPipelineOrContainerName && - typeof credentialOrPipelineOrContainerName !== "string") { - // (url: string, credential?: StorageSharedKeyCredential | AnonymousCredential | TokenCredential, options?: StoragePipelineOptions) - url = urlOrConnectionString; - // The second parameter is undefined. Use anonymous credential. + else if (!credentialOrPipeline) { + // no credential provided pipeline = newPipeline(new AnonymousCredential(), options); } - else if (credentialOrPipelineOrContainerName && - typeof credentialOrPipelineOrContainerName === "string" && - blobNameOrOptions && - typeof blobNameOrOptions === "string") { - // (connectionString: string, containerName: string, blobName: string, options?: StoragePipelineOptions) - const containerName = credentialOrPipelineOrContainerName; - const blobName = blobNameOrOptions; - const extractedCreds = extractConnectionStringParts(urlOrConnectionString); - if (extractedCreds.kind === "AccountConnString") { - if (coreHttp.isNode) { - const sharedKeyCredential = new StorageSharedKeyCredential(extractedCreds.accountName, extractedCreds.accountKey); - url = appendToURLPath(appendToURLPath(extractedCreds.url, encodeURIComponent(containerName)), encodeURIComponent(blobName)); - if (!options.proxyOptions) { - options.proxyOptions = coreHttp.getDefaultProxySettings(extractedCreds.proxyUri); - } - pipeline = newPipeline(sharedKeyCredential, options); - } - else { - throw new Error("Account connection string is only supported in Node.js environment"); - } - } - else if (extractedCreds.kind === "SASConnString") { - url = - appendToURLPath(appendToURLPath(extractedCreds.url, encodeURIComponent(containerName)), encodeURIComponent(blobName)) + - "?" + - extractedCreds.accountSas; - pipeline = newPipeline(new AnonymousCredential(), options); - } - else { - throw new Error("Connection string must be either an Account connection string or a SAS connection string"); - } - } else { - throw new Error("Expecting non-empty strings for containerName and blobName parameters"); - } - super(url, pipeline); - this.appendBlobContext = new AppendBlob(this.storageClientContext); - } - /** - * Creates a new AppendBlobClient object identical to the source but with the - * specified snapshot timestamp. - * Provide "" will remove the snapshot and return a Client to the base blob. - * - * @param snapshot - The snapshot timestamp. - * @returns A new AppendBlobClient object identical to the source but with the specified snapshot timestamp. - */ - withSnapshot(snapshot) { - return new AppendBlobClient(setURLParameter(this.url, URLConstants.Parameters.SNAPSHOT, snapshot.length === 0 ? undefined : snapshot), this.pipeline); - } - /** - * Creates a 0-length append blob. Call AppendBlock to append data to an append blob. - * @see https://docs.microsoft.com/rest/api/storageservices/put-blob - * - * @param options - Options to the Append Block Create operation. - * - * - * Example usage: - * - * ```js - * const appendBlobClient = containerClient.getAppendBlobClient(""); - * await appendBlobClient.create(); - * ``` - */ - async create(options = {}) { - var _a, _b, _c; - const { span, updatedOptions } = createSpan("AppendBlobClient-create", options); - options.conditions = options.conditions || {}; - try { - ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps); - return await this.appendBlobContext.create(0, Object.assign({ abortSignal: options.abortSignal, blobHttpHeaders: options.blobHTTPHeaders, leaseAccessConditions: options.conditions, metadata: options.metadata, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), cpkInfo: options.customerProvidedKey, encryptionScope: options.encryptionScope, immutabilityPolicyExpiry: (_b = options.immutabilityPolicy) === null || _b === void 0 ? void 0 : _b.expiriesOn, immutabilityPolicyMode: (_c = options.immutabilityPolicy) === null || _c === void 0 ? void 0 : _c.policyMode, legalHold: options.legalHold, blobTagsString: toBlobTagsString(options.tags) }, convertTracingToRequestOptionsBase(updatedOptions))); + pipeline = newPipeline(credentialOrPipeline, options); } - catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; + const storageClientContext = new StorageClientContext(url, pipeline.toServiceClientOptions()); + const path = getURLPath(url); + if (path && path !== "/") { + // Container scoped. + this.serviceOrContainerContext = new Container(storageClientContext); } - finally { - span.end(); + else { + this.serviceOrContainerContext = new Service(storageClientContext); } } /** - * Creates a 0-length append blob. Call AppendBlock to append data to an append blob. - * If the blob with the same name already exists, the content of the existing blob will remain unchanged. - * @see https://docs.microsoft.com/rest/api/storageservices/put-blob - * - * @param options - + * Creates a {@link BlobBatch}. + * A BlobBatch represents an aggregated set of operations on blobs. */ - async createIfNotExists(options = {}) { - var _a, _b; - const { span, updatedOptions } = createSpan("AppendBlobClient-createIfNotExists", options); - const conditions = { ifNoneMatch: ETagAny }; - try { - const res = await this.create(Object.assign(Object.assign({}, updatedOptions), { conditions })); - return Object.assign(Object.assign({ succeeded: true }, res), { _response: res._response }); - } - catch (e) { - if (((_a = e.details) === null || _a === void 0 ? void 0 : _a.errorCode) === "BlobAlreadyExists") { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: "Expected exception when creating a blob only if it does not already exist.", - }); - return Object.assign(Object.assign({ succeeded: false }, (_b = e.response) === null || _b === void 0 ? void 0 : _b.parsedHeaders), { _response: e.response }); + createBatch() { + return new BlobBatch(); + } + async deleteBlobs(urlsOrBlobClients, credentialOrOptions, + // Legacy, no fix for eslint error without breaking. Disable it for this interface. + /* eslint-disable-next-line @azure/azure-sdk/ts-naming-options*/ + options) { + const batch = new BlobBatch(); + for (const urlOrBlobClient of urlsOrBlobClients) { + if (typeof urlOrBlobClient === "string") { + await batch.deleteBlob(urlOrBlobClient, credentialOrOptions, options); + } + else { + await batch.deleteBlob(urlOrBlobClient, credentialOrOptions); } - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; - } - finally { - span.end(); } + return this.submitBatch(batch); } - /** - * Seals the append blob, making it read only. - * - * @param options - - */ - async seal(options = {}) { - var _a; - const { span, updatedOptions } = createSpan("AppendBlobClient-seal", options); - options.conditions = options.conditions || {}; - try { - return await this.appendBlobContext.seal(Object.assign({ abortSignal: options.abortSignal, appendPositionAccessConditions: options.conditions, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }) }, convertTracingToRequestOptionsBase(updatedOptions))); - } - catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; - } - finally { - span.end(); + async setBlobsAccessTier(urlsOrBlobClients, credentialOrTier, tierOrOptions, + // Legacy, no fix for eslint error without breaking. Disable it for this interface. + /* eslint-disable-next-line @azure/azure-sdk/ts-naming-options*/ + options) { + const batch = new BlobBatch(); + for (const urlOrBlobClient of urlsOrBlobClients) { + if (typeof urlOrBlobClient === "string") { + await batch.setBlobAccessTier(urlOrBlobClient, credentialOrTier, tierOrOptions, options); + } + else { + await batch.setBlobAccessTier(urlOrBlobClient, credentialOrTier, tierOrOptions); + } } + return this.submitBatch(batch); } /** - * Commits a new block of data to the end of the existing append blob. - * @see https://docs.microsoft.com/rest/api/storageservices/append-block - * - * @param body - Data to be appended. - * @param contentLength - Length of the body in bytes. - * @param options - Options to the Append Block operation. + * Submit batch request which consists of multiple subrequests. * + * Get `blobBatchClient` and other details before running the snippets. + * `blobServiceClient.getBlobBatchClient()` gives the `blobBatchClient` * * Example usage: * * ```js - * const content = "Hello World!"; + * let batchRequest = new BlobBatch(); + * await batchRequest.deleteBlob(urlInString0, credential0); + * await batchRequest.deleteBlob(urlInString1, credential1, { + * deleteSnapshots: "include" + * }); + * const batchResp = await blobBatchClient.submitBatch(batchRequest); + * console.log(batchResp.subResponsesSucceededCount); + * ``` * - * // Create a new append blob and append data to the blob. - * const newAppendBlobClient = containerClient.getAppendBlobClient(""); - * await newAppendBlobClient.create(); - * await newAppendBlobClient.appendBlock(content, content.length); + * Example using a lease: * - * // Append data to an existing append blob. - * const existingAppendBlobClient = containerClient.getAppendBlobClient(""); - * await existingAppendBlobClient.appendBlock(content, content.length); + * ```js + * let batchRequest = new BlobBatch(); + * await batchRequest.setBlobAccessTier(blockBlobClient0, "Cool"); + * await batchRequest.setBlobAccessTier(blockBlobClient1, "Cool", { + * conditions: { leaseId: leaseId } + * }); + * const batchResp = await blobBatchClient.submitBatch(batchRequest); + * console.log(batchResp.subResponsesSucceededCount); * ``` - */ - async appendBlock(body, contentLength, options = {}) { - var _a; - const { span, updatedOptions } = createSpan("AppendBlobClient-appendBlock", options); - options.conditions = options.conditions || {}; - try { - ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps); - return await this.appendBlobContext.appendBlock(contentLength, body, Object.assign({ abortSignal: options.abortSignal, appendPositionAccessConditions: options.conditions, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), requestOptions: { - onUploadProgress: options.onProgress, - }, transactionalContentMD5: options.transactionalContentMD5, transactionalContentCrc64: options.transactionalContentCrc64, cpkInfo: options.customerProvidedKey, encryptionScope: options.encryptionScope }, convertTracingToRequestOptionsBase(updatedOptions))); - } - catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; - } - finally { - span.end(); - } - } - /** - * The Append Block operation commits a new block of data to the end of an existing append blob - * where the contents are read from a source url. - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/append-block-from-url * - * @param sourceURL - - * The url to the blob that will be the source of the copy. A source blob in the same storage account can - * be authenticated via Shared Key. However, if the source is a blob in another account, the source blob - * must either be public or must be authenticated via a shared access signature. If the source blob is - * public, no authentication is required to perform the operation. - * @param sourceOffset - Offset in source to be appended - * @param count - Number of bytes to be appended as a block + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/blob-batch + * + * @param batchRequest - A set of Delete or SetTier operations. * @param options - */ - async appendBlockFromURL(sourceURL, sourceOffset, count, options = {}) { - var _a; - const { span, updatedOptions } = createSpan("AppendBlobClient-appendBlockFromURL", options); - options.conditions = options.conditions || {}; - options.sourceConditions = options.sourceConditions || {}; + async submitBatch(batchRequest, options = {}) { + if (!batchRequest || batchRequest.getSubRequests().size === 0) { + throw new RangeError("Batch request should contain one or more sub requests."); + } + const { span, updatedOptions } = createSpan("BlobBatchClient-submitBatch", options); try { - ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps); - return await this.appendBlobContext.appendBlockFromUrl(sourceURL, 0, Object.assign({ abortSignal: options.abortSignal, sourceRange: rangeToString({ offset: sourceOffset, count }), sourceContentMD5: options.sourceContentMD5, sourceContentCrc64: options.sourceContentCrc64, leaseAccessConditions: options.conditions, appendPositionAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), sourceModifiedAccessConditions: { - sourceIfMatch: options.sourceConditions.ifMatch, - sourceIfModifiedSince: options.sourceConditions.ifModifiedSince, - sourceIfNoneMatch: options.sourceConditions.ifNoneMatch, - sourceIfUnmodifiedSince: options.sourceConditions.ifUnmodifiedSince, - }, copySourceAuthorization: httpAuthorizationToString(options.sourceAuthorization), cpkInfo: options.customerProvidedKey, encryptionScope: options.encryptionScope }, convertTracingToRequestOptionsBase(updatedOptions))); + const batchRequestBody = batchRequest.getHttpRequestBody(); + // ServiceSubmitBatchResponseModel and ContainerSubmitBatchResponse are compatible for now. + const rawBatchResponse = await this.serviceOrContainerContext.submitBatch(utf8ByteLength(batchRequestBody), batchRequest.getMultiPartContentType(), batchRequestBody, Object.assign(Object.assign({}, options), convertTracingToRequestOptionsBase(updatedOptions))); + // Parse the sub responses result, if logic reaches here(i.e. the batch request succeeded with status code 202). + const batchResponseParser = new BatchResponseParser(rawBatchResponse, batchRequest.getSubRequests()); + const responseSummary = await batchResponseParser.parseBatchResponse(); + const res = { + _response: rawBatchResponse._response, + contentType: rawBatchResponse.contentType, + errorCode: rawBatchResponse.errorCode, + requestId: rawBatchResponse.requestId, + clientRequestId: rawBatchResponse.clientRequestId, + version: rawBatchResponse.version, + subResponses: responseSummary.subResponses, + subResponsesSucceededCount: responseSummary.subResponsesSucceededCount, + subResponsesFailedCount: responseSummary.subResponsesFailedCount, + }; + return res; } catch (e) { span.setStatus({ @@ -42955,16 +42522,15 @@ class AppendBlobClient extends BlobClient { } } } + /** - * BlockBlobClient defines a set of operations applicable to block blobs. + * A ContainerClient represents a URL to the Azure Storage container allowing you to manipulate its blobs. */ -class BlockBlobClient extends BlobClient { - constructor(urlOrConnectionString, credentialOrPipelineOrContainerName, blobNameOrOptions, +class ContainerClient extends StorageClient { + constructor(urlOrConnectionString, credentialOrPipelineOrContainerName, // Legacy, no fix for eslint error without breaking. Disable it for this interface. /* eslint-disable-next-line @azure/azure-sdk/ts-naming-options*/ options) { - // In TypeScript we cannot simply pass all parameters to super() like below so have to duplicate the code instead. - // super(s, credentialOrPipelineOrContainerNameOrOptions, blobNameOrOptions, options); let pipeline; let url; options = options || {}; @@ -42978,7 +42544,6 @@ class BlockBlobClient extends BlobClient { coreHttp.isTokenCredential(credentialOrPipelineOrContainerName)) { // (url: string, credential?: StorageSharedKeyCredential | AnonymousCredential | TokenCredential, options?: StoragePipelineOptions) url = urlOrConnectionString; - options = blobNameOrOptions; pipeline = newPipeline(credentialOrPipelineOrContainerName, options); } else if (!credentialOrPipelineOrContainerName && @@ -42989,17 +42554,14 @@ class BlockBlobClient extends BlobClient { pipeline = newPipeline(new AnonymousCredential(), options); } else if (credentialOrPipelineOrContainerName && - typeof credentialOrPipelineOrContainerName === "string" && - blobNameOrOptions && - typeof blobNameOrOptions === "string") { + typeof credentialOrPipelineOrContainerName === "string") { // (connectionString: string, containerName: string, blobName: string, options?: StoragePipelineOptions) const containerName = credentialOrPipelineOrContainerName; - const blobName = blobNameOrOptions; const extractedCreds = extractConnectionStringParts(urlOrConnectionString); if (extractedCreds.kind === "AccountConnString") { if (coreHttp.isNode) { const sharedKeyCredential = new StorageSharedKeyCredential(extractedCreds.accountName, extractedCreds.accountKey); - url = appendToURLPath(appendToURLPath(extractedCreds.url, encodeURIComponent(containerName)), encodeURIComponent(blobName)); + url = appendToURLPath(extractedCreds.url, encodeURIComponent(containerName)); if (!options.proxyOptions) { options.proxyOptions = coreHttp.getDefaultProxySettings(extractedCreds.proxyUri); } @@ -43011,7 +42573,7 @@ class BlockBlobClient extends BlobClient { } else if (extractedCreds.kind === "SASConnString") { url = - appendToURLPath(appendToURLPath(extractedCreds.url, encodeURIComponent(containerName)), encodeURIComponent(blobName)) + + appendToURLPath(extractedCreds.url, encodeURIComponent(containerName)) + "?" + extractedCreds.accountSas; pipeline = newPipeline(new AnonymousCredential(), options); @@ -43021,232 +42583,40 @@ class BlockBlobClient extends BlobClient { } } else { - throw new Error("Expecting non-empty strings for containerName and blobName parameters"); + throw new Error("Expecting non-empty strings for containerName parameter"); } super(url, pipeline); - this.blockBlobContext = new BlockBlob(this.storageClientContext); - this._blobContext = new Blob$1(this.storageClientContext); - } - /** - * Creates a new BlockBlobClient object identical to the source but with the - * specified snapshot timestamp. - * Provide "" will remove the snapshot and return a URL to the base blob. - * - * @param snapshot - The snapshot timestamp. - * @returns A new BlockBlobClient object identical to the source but with the specified snapshot timestamp. - */ - withSnapshot(snapshot) { - return new BlockBlobClient(setURLParameter(this.url, URLConstants.Parameters.SNAPSHOT, snapshot.length === 0 ? undefined : snapshot), this.pipeline); - } - /** - * ONLY AVAILABLE IN NODE.JS RUNTIME. - * - * Quick query for a JSON or CSV formatted blob. - * - * Example usage (Node.js): - * - * ```js - * // Query and convert a blob to a string - * const queryBlockBlobResponse = await blockBlobClient.query("select * from BlobStorage"); - * const downloaded = (await streamToBuffer(queryBlockBlobResponse.readableStreamBody)).toString(); - * console.log("Query blob content:", downloaded); - * - * async function streamToBuffer(readableStream) { - * return new Promise((resolve, reject) => { - * const chunks = []; - * readableStream.on("data", (data) => { - * chunks.push(data instanceof Buffer ? data : Buffer.from(data)); - * }); - * readableStream.on("end", () => { - * resolve(Buffer.concat(chunks)); - * }); - * readableStream.on("error", reject); - * }); - * } - * ``` - * - * @param query - - * @param options - - */ - async query(query, options = {}) { - var _a; - ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps); - const { span, updatedOptions } = createSpan("BlockBlobClient-query", options); - try { - if (!coreHttp.isNode) { - throw new Error("This operation currently is only supported in Node.js."); - } - ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps); - const response = await this._blobContext.query(Object.assign({ abortSignal: options.abortSignal, queryRequest: { - queryType: "SQL", - expression: query, - inputSerialization: toQuerySerialization(options.inputTextConfiguration), - outputSerialization: toQuerySerialization(options.outputTextConfiguration), - }, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), cpkInfo: options.customerProvidedKey }, convertTracingToRequestOptionsBase(updatedOptions))); - return new BlobQueryResponse(response, { - abortSignal: options.abortSignal, - onProgress: options.onProgress, - onError: options.onError, - }); - } - catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; - } - finally { - span.end(); - } - } - /** - * Creates a new block blob, or updates the content of an existing block blob. - * Updating an existing block blob overwrites any existing metadata on the blob. - * Partial updates are not supported; the content of the existing blob is - * overwritten with the new content. To perform a partial update of a block blob's, - * use {@link stageBlock} and {@link commitBlockList}. - * - * This is a non-parallel uploading method, please use {@link uploadFile}, - * {@link uploadStream} or {@link uploadBrowserData} for better performance - * with concurrency uploading. - * - * @see https://docs.microsoft.com/rest/api/storageservices/put-blob - * - * @param body - Blob, string, ArrayBuffer, ArrayBufferView or a function - * which returns a new Readable stream whose offset is from data source beginning. - * @param contentLength - Length of body in bytes. Use Buffer.byteLength() to calculate body length for a - * string including non non-Base64/Hex-encoded characters. - * @param options - Options to the Block Blob Upload operation. - * @returns Response data for the Block Blob Upload operation. - * - * Example usage: - * - * ```js - * const content = "Hello world!"; - * const uploadBlobResponse = await blockBlobClient.upload(content, content.length); - * ``` - */ - async upload(body, contentLength, options = {}) { - var _a, _b, _c; - options.conditions = options.conditions || {}; - const { span, updatedOptions } = createSpan("BlockBlobClient-upload", options); - try { - ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps); - return await this.blockBlobContext.upload(contentLength, body, Object.assign({ abortSignal: options.abortSignal, blobHttpHeaders: options.blobHTTPHeaders, leaseAccessConditions: options.conditions, metadata: options.metadata, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), requestOptions: { - onUploadProgress: options.onProgress, - }, cpkInfo: options.customerProvidedKey, encryptionScope: options.encryptionScope, immutabilityPolicyExpiry: (_b = options.immutabilityPolicy) === null || _b === void 0 ? void 0 : _b.expiriesOn, immutabilityPolicyMode: (_c = options.immutabilityPolicy) === null || _c === void 0 ? void 0 : _c.policyMode, legalHold: options.legalHold, tier: toAccessTier(options.tier), blobTagsString: toBlobTagsString(options.tags) }, convertTracingToRequestOptionsBase(updatedOptions))); - } - catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; - } - finally { - span.end(); - } - } - /** - * Creates a new Block Blob where the contents of the blob are read from a given URL. - * This API is supported beginning with the 2020-04-08 version. Partial updates - * are not supported with Put Blob from URL; the content of an existing blob is overwritten with - * the content of the new blob. To perform partial updates to a block blob’s contents using a - * source URL, use {@link stageBlockFromURL} and {@link commitBlockList}. - * - * @param sourceURL - Specifies the URL of the blob. The value - * may be a URL of up to 2 KB in length that specifies a blob. - * The value should be URL-encoded as it would appear - * in a request URI. The source blob must either be public - * or must be authenticated via a shared access signature. - * If the source blob is public, no authentication is required - * to perform the operation. Here are some examples of source object URLs: - * - https://myaccount.blob.core.windows.net/mycontainer/myblob - * - https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= - * @param options - Optional parameters. - */ - async syncUploadFromURL(sourceURL, options = {}) { - var _a, _b, _c, _d, _e; - options.conditions = options.conditions || {}; - const { span, updatedOptions } = createSpan("BlockBlobClient-syncUploadFromURL", options); - try { - ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps); - return await this.blockBlobContext.putBlobFromUrl(0, sourceURL, Object.assign(Object.assign(Object.assign({}, options), { blobHttpHeaders: options.blobHTTPHeaders, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: options.conditions.tagConditions }), sourceModifiedAccessConditions: { - sourceIfMatch: (_a = options.sourceConditions) === null || _a === void 0 ? void 0 : _a.ifMatch, - sourceIfModifiedSince: (_b = options.sourceConditions) === null || _b === void 0 ? void 0 : _b.ifModifiedSince, - sourceIfNoneMatch: (_c = options.sourceConditions) === null || _c === void 0 ? void 0 : _c.ifNoneMatch, - sourceIfUnmodifiedSince: (_d = options.sourceConditions) === null || _d === void 0 ? void 0 : _d.ifUnmodifiedSince, - sourceIfTags: (_e = options.sourceConditions) === null || _e === void 0 ? void 0 : _e.tagConditions, - }, cpkInfo: options.customerProvidedKey, copySourceAuthorization: httpAuthorizationToString(options.sourceAuthorization), tier: toAccessTier(options.tier), blobTagsString: toBlobTagsString(options.tags), copySourceTags: options.copySourceTags }), convertTracingToRequestOptionsBase(updatedOptions))); - } - catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; - } - finally { - span.end(); - } + this._containerName = this.getContainerNameFromUrl(); + this.containerContext = new Container(this.storageClientContext); } /** - * Uploads the specified block to the block blob's "staging area" to be later - * committed by a call to commitBlockList. - * @see https://docs.microsoft.com/rest/api/storageservices/put-block - * - * @param blockId - A 64-byte value that is base64-encoded - * @param body - Data to upload to the staging area. - * @param contentLength - Number of bytes to upload. - * @param options - Options to the Block Blob Stage Block operation. - * @returns Response data for the Block Blob Stage Block operation. + * The name of the container. */ - async stageBlock(blockId, body, contentLength, options = {}) { - const { span, updatedOptions } = createSpan("BlockBlobClient-stageBlock", options); - try { - ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps); - return await this.blockBlobContext.stageBlock(blockId, contentLength, body, Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, requestOptions: { - onUploadProgress: options.onProgress, - }, transactionalContentMD5: options.transactionalContentMD5, transactionalContentCrc64: options.transactionalContentCrc64, cpkInfo: options.customerProvidedKey, encryptionScope: options.encryptionScope }, convertTracingToRequestOptionsBase(updatedOptions))); - } - catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; - } - finally { - span.end(); - } + get containerName() { + return this._containerName; } /** - * The Stage Block From URL operation creates a new block to be committed as part - * of a blob where the contents are read from a URL. - * This API is available starting in version 2018-03-28. - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url + * Creates a new container under the specified account. If the container with + * the same name already exists, the operation fails. + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/create-container * - * @param blockId - A 64-byte value that is base64-encoded - * @param sourceURL - Specifies the URL of the blob. The value - * may be a URL of up to 2 KB in length that specifies a blob. - * The value should be URL-encoded as it would appear - * in a request URI. The source blob must either be public - * or must be authenticated via a shared access signature. - * If the source blob is public, no authentication is required - * to perform the operation. Here are some examples of source object URLs: - * - https://myaccount.blob.core.windows.net/mycontainer/myblob - * - https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= - * @param offset - From which position of the blob to download, greater than or equal to 0 - * @param count - How much data to be downloaded, greater than 0. Will download to the end when undefined - * @param options - Options to the Block Blob Stage Block From URL operation. - * @returns Response data for the Block Blob Stage Block From URL operation. + * @param options - Options to Container Create operation. + * + * + * Example usage: + * + * ```js + * const containerClient = blobServiceClient.getContainerClient(""); + * const createContainerResponse = await containerClient.create(); + * console.log("Container was created successfully", createContainerResponse.requestId); + * ``` */ - async stageBlockFromURL(blockId, sourceURL, offset = 0, count, options = {}) { - const { span, updatedOptions } = createSpan("BlockBlobClient-stageBlockFromURL", options); + async create(options = {}) { + const { span, updatedOptions } = createSpan("ContainerClient-create", options); try { - ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps); - return await this.blockBlobContext.stageBlockFromURL(blockId, 0, sourceURL, Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, sourceContentMD5: options.sourceContentMD5, sourceContentCrc64: options.sourceContentCrc64, sourceRange: offset === 0 && !count ? undefined : rangeToString({ offset, count }), cpkInfo: options.customerProvidedKey, encryptionScope: options.encryptionScope, copySourceAuthorization: httpAuthorizationToString(options.sourceAuthorization) }, convertTracingToRequestOptionsBase(updatedOptions))); + // Spread operator in destructuring assignments, + // this will filter out unwanted properties from the response object into result object + return await this.containerContext.create(Object.assign(Object.assign({}, options), convertTracingToRequestOptionsBase(updatedOptions))); } catch (e) { span.setStatus({ @@ -43260,26 +42630,27 @@ class BlockBlobClient extends BlobClient { } } /** - * Writes a blob by specifying the list of block IDs that make up the blob. - * In order to be written as part of a blob, a block must have been successfully written - * to the server in a prior {@link stageBlock} operation. You can call {@link commitBlockList} to - * update a blob by uploading only those blocks that have changed, then committing the new and existing - * blocks together. Any blocks not specified in the block list and permanently deleted. - * @see https://docs.microsoft.com/rest/api/storageservices/put-block-list + * Creates a new container under the specified account. If the container with + * the same name already exists, it is not changed. + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/create-container * - * @param blocks - Array of 64-byte value that is base64-encoded - * @param options - Options to the Block Blob Commit Block List operation. - * @returns Response data for the Block Blob Commit Block List operation. + * @param options - */ - async commitBlockList(blocks, options = {}) { - var _a, _b, _c; - options.conditions = options.conditions || {}; - const { span, updatedOptions } = createSpan("BlockBlobClient-commitBlockList", options); + async createIfNotExists(options = {}) { + var _a, _b; + const { span, updatedOptions } = createSpan("ContainerClient-createIfNotExists", options); try { - ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps); - return await this.blockBlobContext.commitBlockList({ latest: blocks }, Object.assign({ abortSignal: options.abortSignal, blobHttpHeaders: options.blobHTTPHeaders, leaseAccessConditions: options.conditions, metadata: options.metadata, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), cpkInfo: options.customerProvidedKey, encryptionScope: options.encryptionScope, immutabilityPolicyExpiry: (_b = options.immutabilityPolicy) === null || _b === void 0 ? void 0 : _b.expiriesOn, immutabilityPolicyMode: (_c = options.immutabilityPolicy) === null || _c === void 0 ? void 0 : _c.policyMode, legalHold: options.legalHold, tier: toAccessTier(options.tier), blobTagsString: toBlobTagsString(options.tags) }, convertTracingToRequestOptionsBase(updatedOptions))); + const res = await this.create(updatedOptions); + return Object.assign(Object.assign({ succeeded: true }, res), { _response: res._response }); } catch (e) { + if (((_a = e.details) === null || _a === void 0 ? void 0 : _a.errorCode) === "ContainerAlreadyExists") { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: "Expected exception when creating a container only if it does not already exist.", + }); + return Object.assign(Object.assign({ succeeded: false }, (_b = e.response) === null || _b === void 0 ? void 0 : _b.parsedHeaders), { _response: e.response }); + } span.setStatus({ code: coreTracing.SpanStatusCode.ERROR, message: e.message, @@ -43291,29 +42662,31 @@ class BlockBlobClient extends BlobClient { } } /** - * Returns the list of blocks that have been uploaded as part of a block blob - * using the specified block list filter. - * @see https://docs.microsoft.com/rest/api/storageservices/get-block-list + * Returns true if the Azure container resource represented by this client exists; false otherwise. * - * @param listType - Specifies whether to return the list of committed blocks, - * the list of uncommitted blocks, or both lists together. - * @param options - Options to the Block Blob Get Block List operation. - * @returns Response data for the Block Blob Get Block List operation. + * NOTE: use this function with care since an existing container might be deleted by other clients or + * applications. Vice versa new containers with the same name might be added by other clients or + * applications after this function completes. + * + * @param options - */ - async getBlockList(listType, options = {}) { - var _a; - const { span, updatedOptions } = createSpan("BlockBlobClient-getBlockList", options); + async exists(options = {}) { + const { span, updatedOptions } = createSpan("ContainerClient-exists", options); try { - const res = await this.blockBlobContext.getBlockList(listType, Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }) }, convertTracingToRequestOptionsBase(updatedOptions))); - if (!res.committedBlocks) { - res.committedBlocks = []; - } - if (!res.uncommittedBlocks) { - res.uncommittedBlocks = []; - } - return res; + await this.getProperties({ + abortSignal: options.abortSignal, + tracingOptions: updatedOptions.tracingOptions, + }); + return true; } catch (e) { + if (e.statusCode === 404) { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: "Expected exception when checking container existence", + }); + return false; + } span.setStatus({ code: coreTracing.SpanStatusCode.ERROR, message: e.message, @@ -43324,176 +42697,68 @@ class BlockBlobClient extends BlobClient { span.end(); } } - // High level functions /** - * Uploads a Buffer(Node.js)/Blob(browsers)/ArrayBuffer/ArrayBufferView object to a BlockBlob. - * - * When data length is no more than the specifiled {@link BlockBlobParallelUploadOptions.maxSingleShotSize} (default is - * {@link BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES}), this method will use 1 {@link upload} call to finish the upload. - * Otherwise, this method will call {@link stageBlock} to upload blocks, and finally call {@link commitBlockList} - * to commit the block list. + * Creates a {@link BlobClient} * - * A common {@link BlockBlobParallelUploadOptions.blobHTTPHeaders} option to set is - * `blobContentType`, enabling the browser to provide - * functionality based on file type. + * @param blobName - A blob name + * @returns A new BlobClient object for the given blob name. + */ + getBlobClient(blobName) { + return new BlobClient(appendToURLPath(this.url, EscapePath(blobName)), this.pipeline); + } + /** + * Creates an {@link AppendBlobClient} * - * @param data - Buffer(Node.js), Blob, ArrayBuffer or ArrayBufferView - * @param options - + * @param blobName - An append blob name */ - async uploadData(data, options = {}) { - const { span, updatedOptions } = createSpan("BlockBlobClient-uploadData", options); - try { - if (coreHttp.isNode) { - let buffer; - if (data instanceof Buffer) { - buffer = data; - } - else if (data instanceof ArrayBuffer) { - buffer = Buffer.from(data); - } - else { - data = data; - buffer = Buffer.from(data.buffer, data.byteOffset, data.byteLength); - } - return this.uploadSeekableInternal((offset, size) => buffer.slice(offset, offset + size), buffer.byteLength, updatedOptions); - } - else { - const browserBlob = new Blob([data]); - return this.uploadSeekableInternal((offset, size) => browserBlob.slice(offset, offset + size), browserBlob.size, updatedOptions); - } - } - catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; - } - finally { - span.end(); - } + getAppendBlobClient(blobName) { + return new AppendBlobClient(appendToURLPath(this.url, EscapePath(blobName)), this.pipeline); } /** - * ONLY AVAILABLE IN BROWSERS. + * Creates a {@link BlockBlobClient} * - * Uploads a browser Blob/File/ArrayBuffer/ArrayBufferView object to block blob. + * @param blobName - A block blob name * - * When buffer length lesser than or equal to 256MB, this method will use 1 upload call to finish the upload. - * Otherwise, this method will call {@link stageBlock} to upload blocks, and finally call - * {@link commitBlockList} to commit the block list. * - * A common {@link BlockBlobParallelUploadOptions.blobHTTPHeaders} option to set is - * `blobContentType`, enabling the browser to provide - * functionality based on file type. + * Example usage: * - * @deprecated Use {@link uploadData} instead. + * ```js + * const content = "Hello world!"; * - * @param browserData - Blob, File, ArrayBuffer or ArrayBufferView - * @param options - Options to upload browser data. - * @returns Response data for the Blob Upload operation. + * const blockBlobClient = containerClient.getBlockBlobClient(""); + * const uploadBlobResponse = await blockBlobClient.upload(content, content.length); + * ``` */ - async uploadBrowserData(browserData, options = {}) { - const { span, updatedOptions } = createSpan("BlockBlobClient-uploadBrowserData", options); - try { - const browserBlob = new Blob([browserData]); - return await this.uploadSeekableInternal((offset, size) => browserBlob.slice(offset, offset + size), browserBlob.size, updatedOptions); - } - catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; - } - finally { - span.end(); - } + getBlockBlobClient(blobName) { + return new BlockBlobClient(appendToURLPath(this.url, EscapePath(blobName)), this.pipeline); } /** + * Creates a {@link PageBlobClient} * - * Uploads data to block blob. Requires a bodyFactory as the data source, - * which need to return a {@link HttpRequestBody} object with the offset and size provided. + * @param blobName - A page blob name + */ + getPageBlobClient(blobName) { + return new PageBlobClient(appendToURLPath(this.url, EscapePath(blobName)), this.pipeline); + } + /** + * Returns all user-defined metadata and system properties for the specified + * container. The data returned does not include the container's list of blobs. + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/get-container-properties * - * When data length is no more than the specified {@link BlockBlobParallelUploadOptions.maxSingleShotSize} (default is - * {@link BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES}), this method will use 1 {@link upload} call to finish the upload. - * Otherwise, this method will call {@link stageBlock} to upload blocks, and finally call {@link commitBlockList} - * to commit the block list. + * WARNING: The `metadata` object returned in the response will have its keys in lowercase, even if + * they originally contained uppercase characters. This differs from the metadata keys returned by + * the `listContainers` method of {@link BlobServiceClient} using the `includeMetadata` option, which + * will retain their original casing. * - * @param bodyFactory - - * @param size - size of the data to upload. - * @param options - Options to Upload to Block Blob operation. - * @returns Response data for the Blob Upload operation. + * @param options - Options to Container Get Properties operation. */ - async uploadSeekableInternal(bodyFactory, size, options = {}) { - if (!options.blockSize) { - options.blockSize = 0; - } - if (options.blockSize < 0 || options.blockSize > BLOCK_BLOB_MAX_STAGE_BLOCK_BYTES) { - throw new RangeError(`blockSize option must be >= 0 and <= ${BLOCK_BLOB_MAX_STAGE_BLOCK_BYTES}`); - } - if (options.maxSingleShotSize !== 0 && !options.maxSingleShotSize) { - options.maxSingleShotSize = BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES; - } - if (options.maxSingleShotSize < 0 || - options.maxSingleShotSize > BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES) { - throw new RangeError(`maxSingleShotSize option must be >= 0 and <= ${BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES}`); - } - if (options.blockSize === 0) { - if (size > BLOCK_BLOB_MAX_STAGE_BLOCK_BYTES * BLOCK_BLOB_MAX_BLOCKS) { - throw new RangeError(`${size} is too larger to upload to a block blob.`); - } - if (size > options.maxSingleShotSize) { - options.blockSize = Math.ceil(size / BLOCK_BLOB_MAX_BLOCKS); - if (options.blockSize < DEFAULT_BLOB_DOWNLOAD_BLOCK_BYTES) { - options.blockSize = DEFAULT_BLOB_DOWNLOAD_BLOCK_BYTES; - } - } - } - if (!options.blobHTTPHeaders) { - options.blobHTTPHeaders = {}; - } + async getProperties(options = {}) { if (!options.conditions) { options.conditions = {}; } - const { span, updatedOptions } = createSpan("BlockBlobClient-uploadSeekableInternal", options); + const { span, updatedOptions } = createSpan("ContainerClient-getProperties", options); try { - if (size <= options.maxSingleShotSize) { - return await this.upload(bodyFactory(0, size), size, updatedOptions); - } - const numBlocks = Math.floor((size - 1) / options.blockSize) + 1; - if (numBlocks > BLOCK_BLOB_MAX_BLOCKS) { - throw new RangeError(`The buffer's size is too big or the BlockSize is too small;` + - `the number of blocks must be <= ${BLOCK_BLOB_MAX_BLOCKS}`); - } - const blockList = []; - const blockIDPrefix = coreHttp.generateUuid(); - let transferProgress = 0; - const batch = new Batch(options.concurrency); - for (let i = 0; i < numBlocks; i++) { - batch.addOperation(async () => { - const blockID = generateBlockID(blockIDPrefix, i); - const start = options.blockSize * i; - const end = i === numBlocks - 1 ? size : start + options.blockSize; - const contentLength = end - start; - blockList.push(blockID); - await this.stageBlock(blockID, bodyFactory(start, contentLength), contentLength, { - abortSignal: options.abortSignal, - conditions: options.conditions, - encryptionScope: options.encryptionScope, - tracingOptions: updatedOptions.tracingOptions, - }); - // Update progress after block is successfully uploaded to server, in case of block trying - // TODO: Hook with convenience layer progress event in finer level - transferProgress += contentLength; - if (options.onProgress) { - options.onProgress({ - loadedBytes: transferProgress, - }); - } - }); - } - await batch.do(); - return this.commitBlockList(blockList, updatedOptions); + return await this.containerContext.getProperties(Object.assign(Object.assign({ abortSignal: options.abortSignal }, options.conditions), convertTracingToRequestOptionsBase(updatedOptions))); } catch (e) { span.setStatus({ @@ -43507,29 +42772,19 @@ class BlockBlobClient extends BlobClient { } } /** - * ONLY AVAILABLE IN NODE.JS RUNTIME. - * - * Uploads a local file in blocks to a block blob. - * - * When file size lesser than or equal to 256MB, this method will use 1 upload call to finish the upload. - * Otherwise, this method will call stageBlock to upload blocks, and finally call commitBlockList - * to commit the block list. + * Marks the specified container for deletion. The container and any blobs + * contained within it are later deleted during garbage collection. + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/delete-container * - * @param filePath - Full path of local file - * @param options - Options to Upload to Block Blob operation. - * @returns Response data for the Blob Upload operation. + * @param options - Options to Container Delete operation. */ - async uploadFile(filePath, options = {}) { - const { span, updatedOptions } = createSpan("BlockBlobClient-uploadFile", options); + async delete(options = {}) { + if (!options.conditions) { + options.conditions = {}; + } + const { span, updatedOptions } = createSpan("ContainerClient-delete", options); try { - const size = (await fsStat(filePath)).size; - return await this.uploadSeekableInternal((offset, count) => { - return () => fsCreateReadStream(filePath, { - autoClose: true, - end: count ? offset + count - 1 : Infinity, - start: offset, - }); - }, size, Object.assign(Object.assign({}, options), { tracingOptions: Object.assign(Object.assign({}, options.tracingOptions), convertTracingToRequestOptionsBase(updatedOptions)) })); + return await this.containerContext.delete(Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, modifiedAccessConditions: options.conditions }, convertTracingToRequestOptionsBase(updatedOptions))); } catch (e) { span.setStatus({ @@ -43543,167 +42798,59 @@ class BlockBlobClient extends BlobClient { } } /** - * ONLY AVAILABLE IN NODE.JS RUNTIME. - * - * Uploads a Node.js Readable stream into block blob. - * - * PERFORMANCE IMPROVEMENT TIPS: - * * Input stream highWaterMark is better to set a same value with bufferSize - * parameter, which will avoid Buffer.concat() operations. + * Marks the specified container for deletion if it exists. The container and any blobs + * contained within it are later deleted during garbage collection. + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/delete-container * - * @param stream - Node.js Readable stream - * @param bufferSize - Size of every buffer allocated, also the block size in the uploaded block blob. Default value is 8MB - * @param maxConcurrency - Max concurrency indicates the max number of buffers that can be allocated, - * positive correlation with max uploading concurrency. Default value is 5 - * @param options - Options to Upload Stream to Block Blob operation. - * @returns Response data for the Blob Upload operation. + * @param options - Options to Container Delete operation. */ - async uploadStream(stream, bufferSize = DEFAULT_BLOCK_BUFFER_SIZE_BYTES, maxConcurrency = 5, options = {}) { - if (!options.blobHTTPHeaders) { - options.blobHTTPHeaders = {}; - } - if (!options.conditions) { - options.conditions = {}; - } - const { span, updatedOptions } = createSpan("BlockBlobClient-uploadStream", options); + async deleteIfExists(options = {}) { + var _a, _b; + const { span, updatedOptions } = createSpan("ContainerClient-deleteIfExists", options); try { - let blockNum = 0; - const blockIDPrefix = coreHttp.generateUuid(); - let transferProgress = 0; - const blockList = []; - const scheduler = new BufferScheduler(stream, bufferSize, maxConcurrency, async (body, length) => { - const blockID = generateBlockID(blockIDPrefix, blockNum); - blockList.push(blockID); - blockNum++; - await this.stageBlock(blockID, body, length, { - conditions: options.conditions, - encryptionScope: options.encryptionScope, - tracingOptions: updatedOptions.tracingOptions, - }); - // Update progress after block is successfully uploaded to server, in case of block trying - transferProgress += length; - if (options.onProgress) { - options.onProgress({ loadedBytes: transferProgress }); - } - }, - // concurrency should set a smaller value than maxConcurrency, which is helpful to - // reduce the possibility when a outgoing handler waits for stream data, in - // this situation, outgoing handlers are blocked. - // Outgoing queue shouldn't be empty. - Math.ceil((maxConcurrency / 4) * 3)); - await scheduler.do(); - return await this.commitBlockList(blockList, Object.assign(Object.assign({}, options), { tracingOptions: Object.assign(Object.assign({}, options.tracingOptions), convertTracingToRequestOptionsBase(updatedOptions)) })); + const res = await this.delete(updatedOptions); + return Object.assign(Object.assign({ succeeded: true }, res), { _response: res._response }); } catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; - } - finally { - span.end(); - } - } -} -/** - * PageBlobClient defines a set of operations applicable to page blobs. - */ -class PageBlobClient extends BlobClient { - constructor(urlOrConnectionString, credentialOrPipelineOrContainerName, blobNameOrOptions, - // Legacy, no fix for eslint error without breaking. Disable it for this interface. - /* eslint-disable-next-line @azure/azure-sdk/ts-naming-options*/ - options) { - // In TypeScript we cannot simply pass all parameters to super() like below so have to duplicate the code instead. - // super(s, credentialOrPipelineOrContainerNameOrOptions, blobNameOrOptions, options); - let pipeline; - let url; - options = options || {}; - if (isPipelineLike(credentialOrPipelineOrContainerName)) { - // (url: string, pipeline: Pipeline) - url = urlOrConnectionString; - pipeline = credentialOrPipelineOrContainerName; - } - else if ((coreHttp.isNode && credentialOrPipelineOrContainerName instanceof StorageSharedKeyCredential) || - credentialOrPipelineOrContainerName instanceof AnonymousCredential || - coreHttp.isTokenCredential(credentialOrPipelineOrContainerName)) { - // (url: string, credential?: StorageSharedKeyCredential | AnonymousCredential | TokenCredential, options?: StoragePipelineOptions) - url = urlOrConnectionString; - options = blobNameOrOptions; - pipeline = newPipeline(credentialOrPipelineOrContainerName, options); - } - else if (!credentialOrPipelineOrContainerName && - typeof credentialOrPipelineOrContainerName !== "string") { - // (url: string, credential?: StorageSharedKeyCredential | AnonymousCredential | TokenCredential, options?: StoragePipelineOptions) - // The second parameter is undefined. Use anonymous credential. - url = urlOrConnectionString; - pipeline = newPipeline(new AnonymousCredential(), options); - } - else if (credentialOrPipelineOrContainerName && - typeof credentialOrPipelineOrContainerName === "string" && - blobNameOrOptions && - typeof blobNameOrOptions === "string") { - // (connectionString: string, containerName: string, blobName: string, options?: StoragePipelineOptions) - const containerName = credentialOrPipelineOrContainerName; - const blobName = blobNameOrOptions; - const extractedCreds = extractConnectionStringParts(urlOrConnectionString); - if (extractedCreds.kind === "AccountConnString") { - if (coreHttp.isNode) { - const sharedKeyCredential = new StorageSharedKeyCredential(extractedCreds.accountName, extractedCreds.accountKey); - url = appendToURLPath(appendToURLPath(extractedCreds.url, encodeURIComponent(containerName)), encodeURIComponent(blobName)); - if (!options.proxyOptions) { - options.proxyOptions = coreHttp.getDefaultProxySettings(extractedCreds.proxyUri); - } - pipeline = newPipeline(sharedKeyCredential, options); - } - else { - throw new Error("Account connection string is only supported in Node.js environment"); - } - } - else if (extractedCreds.kind === "SASConnString") { - url = - appendToURLPath(appendToURLPath(extractedCreds.url, encodeURIComponent(containerName)), encodeURIComponent(blobName)) + - "?" + - extractedCreds.accountSas; - pipeline = newPipeline(new AnonymousCredential(), options); - } - else { - throw new Error("Connection string must be either an Account connection string or a SAS connection string"); + if (((_a = e.details) === null || _a === void 0 ? void 0 : _a.errorCode) === "ContainerNotFound") { + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: "Expected exception when deleting a container only if it exists.", + }); + return Object.assign(Object.assign({ succeeded: false }, (_b = e.response) === null || _b === void 0 ? void 0 : _b.parsedHeaders), { _response: e.response }); } - } - else { - throw new Error("Expecting non-empty strings for containerName and blobName parameters"); - } - super(url, pipeline); - this.pageBlobContext = new PageBlob(this.storageClientContext); - } - /** - * Creates a new PageBlobClient object identical to the source but with the - * specified snapshot timestamp. - * Provide "" will remove the snapshot and return a Client to the base blob. - * - * @param snapshot - The snapshot timestamp. - * @returns A new PageBlobClient object identical to the source but with the specified snapshot timestamp. - */ - withSnapshot(snapshot) { - return new PageBlobClient(setURLParameter(this.url, URLConstants.Parameters.SNAPSHOT, snapshot.length === 0 ? undefined : snapshot), this.pipeline); + span.setStatus({ + code: coreTracing.SpanStatusCode.ERROR, + message: e.message, + }); + throw e; + } + finally { + span.end(); + } } /** - * Creates a page blob of the specified length. Call uploadPages to upload data - * data to a page blob. - * @see https://docs.microsoft.com/rest/api/storageservices/put-blob + * Sets one or more user-defined name-value pairs for the specified container. * - * @param size - size of the page blob. - * @param options - Options to the Page Blob Create operation. - * @returns Response data for the Page Blob Create operation. + * If no option provided, or no metadata defined in the parameter, the container + * metadata will be removed. + * + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/set-container-metadata + * + * @param metadata - Replace existing metadata with this value. + * If no value provided the existing metadata will be removed. + * @param options - Options to Container Set Metadata operation. */ - async create(size, options = {}) { - var _a, _b, _c; - options.conditions = options.conditions || {}; - const { span, updatedOptions } = createSpan("PageBlobClient-create", options); + async setMetadata(metadata, options = {}) { + if (!options.conditions) { + options.conditions = {}; + } + if (options.conditions.ifUnmodifiedSince) { + throw new RangeError("the IfUnmodifiedSince must have their default values because they are ignored by the blob service"); + } + const { span, updatedOptions } = createSpan("ContainerClient-setMetadata", options); try { - ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps); - return await this.pageBlobContext.create(0, size, Object.assign({ abortSignal: options.abortSignal, blobHttpHeaders: options.blobHTTPHeaders, blobSequenceNumber: options.blobSequenceNumber, leaseAccessConditions: options.conditions, metadata: options.metadata, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), cpkInfo: options.customerProvidedKey, encryptionScope: options.encryptionScope, immutabilityPolicyExpiry: (_b = options.immutabilityPolicy) === null || _b === void 0 ? void 0 : _b.expiriesOn, immutabilityPolicyMode: (_c = options.immutabilityPolicy) === null || _c === void 0 ? void 0 : _c.policyMode, legalHold: options.legalHold, tier: toAccessTier(options.tier), blobTagsString: toBlobTagsString(options.tags) }, convertTracingToRequestOptionsBase(updatedOptions))); + return await this.containerContext.setMetadata(Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, metadata, modifiedAccessConditions: options.conditions }, convertTracingToRequestOptionsBase(updatedOptions))); } catch (e) { span.setStatus({ @@ -43717,30 +42864,56 @@ class PageBlobClient extends BlobClient { } } /** - * Creates a page blob of the specified length. Call uploadPages to upload data - * data to a page blob. If the blob with the same name already exists, the content - * of the existing blob will remain unchanged. - * @see https://docs.microsoft.com/rest/api/storageservices/put-blob + * Gets the permissions for the specified container. The permissions indicate + * whether container data may be accessed publicly. * - * @param size - size of the page blob. - * @param options - + * WARNING: JavaScript Date will potentially lose precision when parsing startsOn and expiresOn strings. + * For example, new Date("2018-12-31T03:44:23.8827891Z").toISOString() will get "2018-12-31T03:44:23.882Z". + * + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/get-container-acl + * + * @param options - Options to Container Get Access Policy operation. */ - async createIfNotExists(size, options = {}) { - var _a, _b; - const { span, updatedOptions } = createSpan("PageBlobClient-createIfNotExists", options); - try { - const conditions = { ifNoneMatch: ETagAny }; - const res = await this.create(size, Object.assign(Object.assign({}, options), { conditions, tracingOptions: updatedOptions.tracingOptions })); - return Object.assign(Object.assign({ succeeded: true }, res), { _response: res._response }); + async getAccessPolicy(options = {}) { + if (!options.conditions) { + options.conditions = {}; } - catch (e) { - if (((_a = e.details) === null || _a === void 0 ? void 0 : _a.errorCode) === "BlobAlreadyExists") { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: "Expected exception when creating a blob only if it does not already exist.", + const { span, updatedOptions } = createSpan("ContainerClient-getAccessPolicy", options); + try { + const response = await this.containerContext.getAccessPolicy(Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions }, convertTracingToRequestOptionsBase(updatedOptions))); + const res = { + _response: response._response, + blobPublicAccess: response.blobPublicAccess, + date: response.date, + etag: response.etag, + errorCode: response.errorCode, + lastModified: response.lastModified, + requestId: response.requestId, + clientRequestId: response.clientRequestId, + signedIdentifiers: [], + version: response.version, + }; + for (const identifier of response) { + let accessPolicy = undefined; + if (identifier.accessPolicy) { + accessPolicy = { + permissions: identifier.accessPolicy.permissions, + }; + if (identifier.accessPolicy.expiresOn) { + accessPolicy.expiresOn = new Date(identifier.accessPolicy.expiresOn); + } + if (identifier.accessPolicy.startsOn) { + accessPolicy.startsOn = new Date(identifier.accessPolicy.startsOn); + } + } + res.signedIdentifiers.push({ + accessPolicy, + id: identifier.id, }); - return Object.assign(Object.assign({ succeeded: false }, (_b = e.response) === null || _b === void 0 ? void 0 : _b.parsedHeaders), { _response: e.response }); } + return res; + } + catch (e) { span.setStatus({ code: coreTracing.SpanStatusCode.ERROR, message: e.message, @@ -43752,24 +42925,42 @@ class PageBlobClient extends BlobClient { } } /** - * Writes 1 or more pages to the page blob. The start and end offsets must be a multiple of 512. - * @see https://docs.microsoft.com/rest/api/storageservices/put-page + * Sets the permissions for the specified container. The permissions indicate + * whether blobs in a container may be accessed publicly. * - * @param body - Data to upload - * @param offset - Offset of destination page blob - * @param count - Content length of the body, also number of bytes to be uploaded - * @param options - Options to the Page Blob Upload Pages operation. - * @returns Response data for the Page Blob Upload Pages operation. + * When you set permissions for a container, the existing permissions are replaced. + * If no access or containerAcl provided, the existing container ACL will be + * removed. + * + * When you establish a stored access policy on a container, it may take up to 30 seconds to take effect. + * During this interval, a shared access signature that is associated with the stored access policy will + * fail with status code 403 (Forbidden), until the access policy becomes active. + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/set-container-acl + * + * @param access - The level of public access to data in the container. + * @param containerAcl - Array of elements each having a unique Id and details of the access policy. + * @param options - Options to Container Set Access Policy operation. */ - async uploadPages(body, offset, count, options = {}) { - var _a; + async setAccessPolicy(access, containerAcl, options = {}) { options.conditions = options.conditions || {}; - const { span, updatedOptions } = createSpan("PageBlobClient-uploadPages", options); + const { span, updatedOptions } = createSpan("ContainerClient-setAccessPolicy", options); try { - ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps); - return await this.pageBlobContext.uploadPages(count, body, Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), requestOptions: { - onUploadProgress: options.onProgress, - }, range: rangeToString({ offset, count }), sequenceNumberAccessConditions: options.conditions, transactionalContentMD5: options.transactionalContentMD5, transactionalContentCrc64: options.transactionalContentCrc64, cpkInfo: options.customerProvidedKey, encryptionScope: options.encryptionScope }, convertTracingToRequestOptionsBase(updatedOptions))); + const acl = []; + for (const identifier of containerAcl || []) { + acl.push({ + accessPolicy: { + expiresOn: identifier.accessPolicy.expiresOn + ? truncatedISO8061Date(identifier.accessPolicy.expiresOn) + : "", + permissions: identifier.accessPolicy.permissions, + startsOn: identifier.accessPolicy.startsOn + ? truncatedISO8061Date(identifier.accessPolicy.startsOn) + : "", + }, + id: identifier.id, + }); + } + return await this.containerContext.setAccessPolicy(Object.assign({ abortSignal: options.abortSignal, access, containerAcl: acl, leaseAccessConditions: options.conditions, modifiedAccessConditions: options.conditions }, convertTracingToRequestOptionsBase(updatedOptions))); } catch (e) { span.setStatus({ @@ -43783,29 +42974,45 @@ class PageBlobClient extends BlobClient { } } /** - * The Upload Pages operation writes a range of pages to a page blob where the - * contents are read from a URL. - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/put-page-from-url + * Get a {@link BlobLeaseClient} that manages leases on the container. * - * @param sourceURL - Specify a URL to the copy source, Shared Access Signature(SAS) maybe needed for authentication - * @param sourceOffset - The source offset to copy from. Pass 0 to copy from the beginning of source page blob - * @param destOffset - Offset of destination page blob - * @param count - Number of bytes to be uploaded from source page blob - * @param options - + * @param proposeLeaseId - Initial proposed lease Id. + * @returns A new BlobLeaseClient object for managing leases on the container. */ - async uploadPagesFromURL(sourceURL, sourceOffset, destOffset, count, options = {}) { - var _a; - options.conditions = options.conditions || {}; - options.sourceConditions = options.sourceConditions || {}; - const { span, updatedOptions } = createSpan("PageBlobClient-uploadPagesFromURL", options); + getBlobLeaseClient(proposeLeaseId) { + return new BlobLeaseClient(this, proposeLeaseId); + } + /** + * Creates a new block blob, or updates the content of an existing block blob. + * + * Updating an existing block blob overwrites any existing metadata on the blob. + * Partial updates are not supported; the content of the existing blob is + * overwritten with the new content. To perform a partial update of a block blob's, + * use {@link BlockBlobClient.stageBlock} and {@link BlockBlobClient.commitBlockList}. + * + * This is a non-parallel uploading method, please use {@link BlockBlobClient.uploadFile}, + * {@link BlockBlobClient.uploadStream} or {@link BlockBlobClient.uploadBrowserData} for better + * performance with concurrency uploading. + * + * @see https://docs.microsoft.com/rest/api/storageservices/put-blob + * + * @param blobName - Name of the block blob to create or update. + * @param body - Blob, string, ArrayBuffer, ArrayBufferView or a function + * which returns a new Readable stream whose offset is from data source beginning. + * @param contentLength - Length of body in bytes. Use Buffer.byteLength() to calculate body length for a + * string including non non-Base64/Hex-encoded characters. + * @param options - Options to configure the Block Blob Upload operation. + * @returns Block Blob upload response data and the corresponding BlockBlobClient instance. + */ + async uploadBlockBlob(blobName, body, contentLength, options = {}) { + const { span, updatedOptions } = createSpan("ContainerClient-uploadBlockBlob", options); try { - ensureCpkIfSpecified(options.customerProvidedKey, this.isHttps); - return await this.pageBlobContext.uploadPagesFromURL(sourceURL, rangeToString({ offset: sourceOffset, count }), 0, rangeToString({ offset: destOffset, count }), Object.assign({ abortSignal: options.abortSignal, sourceContentMD5: options.sourceContentMD5, sourceContentCrc64: options.sourceContentCrc64, leaseAccessConditions: options.conditions, sequenceNumberAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), sourceModifiedAccessConditions: { - sourceIfMatch: options.sourceConditions.ifMatch, - sourceIfModifiedSince: options.sourceConditions.ifModifiedSince, - sourceIfNoneMatch: options.sourceConditions.ifNoneMatch, - sourceIfUnmodifiedSince: options.sourceConditions.ifUnmodifiedSince, - }, cpkInfo: options.customerProvidedKey, encryptionScope: options.encryptionScope, copySourceAuthorization: httpAuthorizationToString(options.sourceAuthorization) }, convertTracingToRequestOptionsBase(updatedOptions))); + const blockBlobClient = this.getBlockBlobClient(blobName); + const response = await blockBlobClient.upload(body, contentLength, updatedOptions); + return { + blockBlobClient, + response, + }; } catch (e) { span.setStatus({ @@ -43819,20 +43026,24 @@ class PageBlobClient extends BlobClient { } } /** - * Frees the specified pages from the page blob. - * @see https://docs.microsoft.com/rest/api/storageservices/put-page + * Marks the specified blob or snapshot for deletion. The blob is later deleted + * during garbage collection. Note that in order to delete a blob, you must delete + * all of its snapshots. You can delete both at the same time with the Delete + * Blob operation. + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/delete-blob * - * @param offset - Starting byte position of the pages to clear. - * @param count - Number of bytes to clear. - * @param options - Options to the Page Blob Clear Pages operation. - * @returns Response data for the Page Blob Clear Pages operation. + * @param blobName - + * @param options - Options to Blob Delete operation. + * @returns Block blob deletion response data. */ - async clearPages(offset = 0, count, options = {}) { - var _a; - options.conditions = options.conditions || {}; - const { span, updatedOptions } = createSpan("PageBlobClient-clearPages", options); + async deleteBlob(blobName, options = {}) { + const { span, updatedOptions } = createSpan("ContainerClient-deleteBlob", options); try { - return await this.pageBlobContext.clearPages(0, Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), range: rangeToString({ offset, count }), sequenceNumberAccessConditions: options.conditions, cpkInfo: options.customerProvidedKey, encryptionScope: options.encryptionScope }, convertTracingToRequestOptionsBase(updatedOptions))); + let blobClient = this.getBlobClient(blobName); + if (options.versionId) { + blobClient = blobClient.withVersion(options.versionId); + } + return await blobClient.delete(updatedOptions); } catch (e) { span.setStatus({ @@ -43846,22 +43057,28 @@ class PageBlobClient extends BlobClient { } } /** - * Returns the list of valid page ranges for a page blob or snapshot of a page blob. - * @see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges + * listBlobFlatSegment returns a single segment of blobs starting from the + * specified Marker. Use an empty Marker to start enumeration from the beginning. + * After getting a segment, process it, and then call listBlobsFlatSegment again + * (passing the the previously-returned Marker) to get the next segment. + * @see https://docs.microsoft.com/rest/api/storageservices/list-blobs * - * @param offset - Starting byte position of the page ranges. - * @param count - Number of bytes to get. - * @param options - Options to the Page Blob Get Ranges operation. - * @returns Response data for the Page Blob Get Ranges operation. + * @param marker - A string value that identifies the portion of the list to be returned with the next list operation. + * @param options - Options to Container List Blob Flat Segment operation. */ - async getPageRanges(offset = 0, count, options = {}) { - var _a; - options.conditions = options.conditions || {}; - const { span, updatedOptions } = createSpan("PageBlobClient-getPageRanges", options); + async listBlobFlatSegment(marker, options = {}) { + const { span, updatedOptions } = createSpan("ContainerClient-listBlobFlatSegment", options); try { - return await this.pageBlobContext - .getPageRanges(Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), range: rangeToString({ offset, count }) }, convertTracingToRequestOptionsBase(updatedOptions))) - .then(rangeResponseFromModel); + const response = await this.containerContext.listBlobFlatSegment(Object.assign(Object.assign({ marker }, options), convertTracingToRequestOptionsBase(updatedOptions))); + response.segment.blobItems = []; + if (response.segment["Blob"] !== undefined) { + response.segment.blobItems = ProcessBlobItems(response.segment["Blob"]); + } + const wrappedResponse = Object.assign(Object.assign({}, response), { _response: Object.assign(Object.assign({}, response._response), { parsedBody: ConvertInternalResponseOfListBlobFlat(response._response.parsedBody) }), segment: Object.assign(Object.assign({}, response.segment), { blobItems: response.segment.blobItems.map((blobItemInteral) => { + const blobItem = Object.assign(Object.assign({}, blobItemInteral), { name: BlobNameToString(blobItemInteral.name), tags: toTags(blobItemInteral.blobTags), objectReplicationSourceProperties: parseObjectReplicationRecord(blobItemInteral.objectReplicationMetadata) }); + return blobItem; + }) }) }); + return wrappedResponse; } catch (e) { span.setStatus({ @@ -43875,22 +43092,39 @@ class PageBlobClient extends BlobClient { } } /** - * getPageRangesSegment returns a single segment of page ranges starting from the - * specified Marker. Use an empty Marker to start enumeration from the beginning. - * After getting a segment, process it, and then call getPageRangesSegment again - * (passing the the previously-returned Marker) to get the next segment. - * @see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges + * listBlobHierarchySegment returns a single segment of blobs starting from + * the specified Marker. Use an empty Marker to start enumeration from the + * beginning. After getting a segment, process it, and then call listBlobsHierarchicalSegment + * again (passing the the previously-returned Marker) to get the next segment. + * @see https://docs.microsoft.com/rest/api/storageservices/list-blobs * - * @param offset - Starting byte position of the page ranges. - * @param count - Number of bytes to get. + * @param delimiter - The character or string used to define the virtual hierarchy * @param marker - A string value that identifies the portion of the list to be returned with the next list operation. - * @param options - Options to PageBlob Get Page Ranges Segment operation. + * @param options - Options to Container List Blob Hierarchy Segment operation. */ - async listPageRangesSegment(offset = 0, count, marker, options = {}) { + async listBlobHierarchySegment(delimiter, marker, options = {}) { var _a; - const { span, updatedOptions } = createSpan("PageBlobClient-getPageRangesSegment", options); + const { span, updatedOptions } = createSpan("ContainerClient-listBlobHierarchySegment", options); try { - return await this.pageBlobContext.getPageRanges(Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), range: rangeToString({ offset, count }), marker: marker, maxPageSize: options.maxPageSize }, convertTracingToRequestOptionsBase(updatedOptions))); + const response = await this.containerContext.listBlobHierarchySegment(delimiter, Object.assign(Object.assign({ marker }, options), convertTracingToRequestOptionsBase(updatedOptions))); + response.segment.blobItems = []; + if (response.segment["Blob"] !== undefined) { + response.segment.blobItems = ProcessBlobItems(response.segment["Blob"]); + } + response.segment.blobPrefixes = []; + if (response.segment["BlobPrefix"] !== undefined) { + response.segment.blobPrefixes = ProcessBlobPrefixes(response.segment["BlobPrefix"]); + } + const wrappedResponse = Object.assign(Object.assign({}, response), { _response: Object.assign(Object.assign({}, response._response), { parsedBody: ConvertInternalResponseOfListBlobHierarchy(response._response.parsedBody) }), segment: Object.assign(Object.assign({}, response.segment), { blobItems: response.segment.blobItems.map((blobItemInteral) => { + const blobItem = Object.assign(Object.assign({}, blobItemInteral), { name: BlobNameToString(blobItemInteral.name), tags: toTags(blobItemInteral.blobTags), objectReplicationSourceProperties: parseObjectReplicationRecord(blobItemInteral.objectReplicationMetadata) }); + return blobItem; + }), blobPrefixes: (_a = response.segment.blobPrefixes) === null || _a === void 0 ? void 0 : _a.map((blobPrefixInternal) => { + const blobPrefix = { + name: BlobNameToString(blobPrefixInternal.name), + }; + return blobPrefix; + }) }) }); + return wrappedResponse; } catch (e) { span.setStatus({ @@ -43904,46 +43138,42 @@ class PageBlobClient extends BlobClient { } } /** - * Returns an AsyncIterableIterator for {@link PageBlobGetPageRangesResponseModel} + * Returns an AsyncIterableIterator for ContainerListBlobFlatSegmentResponse * - * @param offset - Starting byte position of the page ranges. - * @param count - Number of bytes to get. * @param marker - A string value that identifies the portion of - * the get of page ranges to be returned with the next getting operation. The + * the list of blobs to be returned with the next listing operation. The * operation returns the ContinuationToken value within the response body if the - * getting operation did not return all page ranges remaining within the current page. - * The ContinuationToken value can be used as the value for - * the marker parameter in a subsequent call to request the next page of get + * listing operation did not return all blobs remaining to be listed + * with the current page. The ContinuationToken value can be used as the value for + * the marker parameter in a subsequent call to request the next page of list * items. The marker value is opaque to the client. - * @param options - Options to List Page Ranges operation. + * @param options - Options to list blobs operation. */ - listPageRangeItemSegments(offset = 0, count, marker, options = {}) { - return tslib.__asyncGenerator(this, arguments, function* listPageRangeItemSegments_1() { - let getPageRangeItemSegmentsResponse; + listSegments(marker, options = {}) { + return tslib.__asyncGenerator(this, arguments, function* listSegments_1() { + let listBlobsFlatSegmentResponse; if (!!marker || marker === undefined) { do { - getPageRangeItemSegmentsResponse = yield tslib.__await(this.listPageRangesSegment(offset, count, marker, options)); - marker = getPageRangeItemSegmentsResponse.continuationToken; - yield yield tslib.__await(yield tslib.__await(getPageRangeItemSegmentsResponse)); + listBlobsFlatSegmentResponse = yield tslib.__await(this.listBlobFlatSegment(marker, options)); + marker = listBlobsFlatSegmentResponse.continuationToken; + yield yield tslib.__await(yield tslib.__await(listBlobsFlatSegmentResponse)); } while (marker); } }); } /** - * Returns an AsyncIterableIterator of {@link PageRangeInfo} objects + * Returns an AsyncIterableIterator of {@link BlobItem} objects * - * @param offset - Starting byte position of the page ranges. - * @param count - Number of bytes to get. - * @param options - Options to List Page Ranges operation. + * @param options - Options to list blobs operation. */ - listPageRangeItems(offset = 0, count, options = {}) { - return tslib.__asyncGenerator(this, arguments, function* listPageRangeItems_1() { + listItems(options = {}) { + return tslib.__asyncGenerator(this, arguments, function* listItems_1() { var e_1, _a; let marker; try { - for (var _b = tslib.__asyncValues(this.listPageRangeItemSegments(offset, count, marker, options)), _c; _c = yield tslib.__await(_b.next()), !_c.done;) { - const getPageRangesSegment = _c.value; - yield tslib.__await(yield* tslib.__asyncDelegator(tslib.__asyncValues(ExtractPageRangeInfoItems(getPageRangesSegment)))); + for (var _b = tslib.__asyncValues(this.listSegments(marker, options)), _c; _c = yield tslib.__await(_b.next()), !_c.done;) { + const listBlobsFlatSegmentResponse = _c.value; + yield tslib.__await(yield* tslib.__asyncDelegator(tslib.__asyncValues(listBlobsFlatSegmentResponse.segment.blobItems))); } } catch (e_1_1) { e_1 = { error: e_1_1 }; } @@ -43956,19 +43186,19 @@ class PageBlobClient extends BlobClient { }); } /** - * Returns an async iterable iterator to list of page ranges for a page blob. - * @see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges + * Returns an async iterable iterator to list all the blobs + * under the specified account. * - * .byPage() returns an async iterable iterator to list of page ranges for a page blob. + * .byPage() returns an async iterable iterator to list the blobs in pages. * * Example using `for await` syntax: * * ```js - * // Get the pageBlobClient before you run these snippets, - * // Can be obtained from `blobServiceClient.getContainerClient("").getPageBlobClient("");` + * // Get the containerClient before you run these snippets, + * // Can be obtained from `blobServiceClient.getContainerClient("");` * let i = 1; - * for await (const pageRange of pageBlobClient.listPageRanges()) { - * console.log(`Page range ${i++}: ${pageRange.start} - ${pageRange.end}`); + * for await (const blob of containerClient.listBlobsFlat()) { + * console.log(`Blob ${i++}: ${blob.name}`); * } * ``` * @@ -43976,11 +43206,11 @@ class PageBlobClient extends BlobClient { * * ```js * let i = 1; - * let iter = pageBlobClient.listPageRanges(); - * let pageRangeItem = await iter.next(); - * while (!pageRangeItem.done) { - * console.log(`Page range ${i++}: ${pageRangeItem.value.start} - ${pageRangeItem.value.end}, IsClear: ${pageRangeItem.value.isClear}`); - * pageRangeItem = await iter.next(); + * let iter = containerClient.listBlobsFlat(); + * let blobItem = await iter.next(); + * while (!blobItem.done) { + * console.log(`Blob ${i++}: ${blobItem.value.name}`); + * blobItem = await iter.next(); * } * ``` * @@ -43989,9 +43219,9 @@ class PageBlobClient extends BlobClient { * ```js * // passing optional maxPageSize in the page settings * let i = 1; - * for await (const response of pageBlobClient.listPageRanges().byPage({ maxPageSize: 20 })) { - * for (const pageRange of response) { - * console.log(`Page range ${i++}: ${pageRange.start} - ${pageRange.end}`); + * for await (const response of containerClient.listBlobsFlat().byPage({ maxPageSize: 20 })) { + * for (const blob of response.segment.blobItems) { + * console.log(`Blob ${i++}: ${blob.name}`); * } * } * ``` @@ -44000,12 +43230,12 @@ class PageBlobClient extends BlobClient { * * ```js * let i = 1; - * let iterator = pageBlobClient.listPageRanges().byPage({ maxPageSize: 2 }); + * let iterator = containerClient.listBlobsFlat().byPage({ maxPageSize: 2 }); * let response = (await iterator.next()).value; * - * // Prints 2 page ranges - * for (const pageRange of response) { - * console.log(`Page range ${i++}: ${pageRange.start} - ${pageRange.end}`); + * // Prints 2 blob names + * for (const blob of response.segment.blobItems) { + * console.log(`Blob ${i++}: ${blob.name}`); * } * * // Gets next marker @@ -44013,23 +43243,56 @@ class PageBlobClient extends BlobClient { * * // Passing next marker as continuationToken * - * iterator = pageBlobClient.listPageRanges().byPage({ continuationToken: marker, maxPageSize: 10 }); + * iterator = containerClient.listBlobsFlat().byPage({ continuationToken: marker, maxPageSize: 10 }); * response = (await iterator.next()).value; * - * // Prints 10 page ranges - * for (const blob of response) { - * console.log(`Page range ${i++}: ${pageRange.start} - ${pageRange.end}`); + * // Prints 10 blob names + * for (const blob of response.segment.blobItems) { + * console.log(`Blob ${i++}: ${blob.name}`); * } * ``` - * @param offset - Starting byte position of the page ranges. - * @param count - Number of bytes to get. - * @param options - Options to the Page Blob Get Ranges operation. + * + * @param options - Options to list blobs. * @returns An asyncIterableIterator that supports paging. */ - listPageRanges(offset = 0, count, options = {}) { - options.conditions = options.conditions || {}; + listBlobsFlat(options = {}) { + const include = []; + if (options.includeCopy) { + include.push("copy"); + } + if (options.includeDeleted) { + include.push("deleted"); + } + if (options.includeMetadata) { + include.push("metadata"); + } + if (options.includeSnapshots) { + include.push("snapshots"); + } + if (options.includeVersions) { + include.push("versions"); + } + if (options.includeUncommitedBlobs) { + include.push("uncommittedblobs"); + } + if (options.includeTags) { + include.push("tags"); + } + if (options.includeDeletedWithVersions) { + include.push("deletedwithversions"); + } + if (options.includeImmutabilityPolicy) { + include.push("immutabilitypolicy"); + } + if (options.includeLegalHold) { + include.push("legalhold"); + } + if (options.prefix === "") { + options.prefix = undefined; + } + const updatedOptions = Object.assign(Object.assign({}, options), (include.length > 0 ? { include: include } : {})); // AsyncIterableIterator to iterate over blobs - const iter = this.listPageRangeItems(offset, count, options); + const iter = this.listItems(updatedOptions); return { /** * The next method, part of the iteration protocol @@ -44047,118 +43310,57 @@ class PageBlobClient extends BlobClient { * Return an AsyncIterableIterator that works a page at a time */ byPage: (settings = {}) => { - return this.listPageRangeItemSegments(offset, count, settings.continuationToken, Object.assign({ maxPageSize: settings.maxPageSize }, options)); + return this.listSegments(settings.continuationToken, Object.assign({ maxPageSize: settings.maxPageSize }, updatedOptions)); }, }; } /** - * Gets the collection of page ranges that differ between a specified snapshot and this page blob. - * @see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges - * - * @param offset - Starting byte position of the page blob - * @param count - Number of bytes to get ranges diff. - * @param prevSnapshot - Timestamp of snapshot to retrieve the difference. - * @param options - Options to the Page Blob Get Page Ranges Diff operation. - * @returns Response data for the Page Blob Get Page Range Diff operation. - */ - async getPageRangesDiff(offset, count, prevSnapshot, options = {}) { - var _a; - options.conditions = options.conditions || {}; - const { span, updatedOptions } = createSpan("PageBlobClient-getPageRangesDiff", options); - try { - return await this.pageBlobContext - .getPageRangesDiff(Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), prevsnapshot: prevSnapshot, range: rangeToString({ offset, count }) }, convertTracingToRequestOptionsBase(updatedOptions))) - .then(rangeResponseFromModel); - } - catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; - } - finally { - span.end(); - } - } - /** - * getPageRangesDiffSegment returns a single segment of page ranges starting from the - * specified Marker for difference between previous snapshot and the target page blob. - * Use an empty Marker to start enumeration from the beginning. - * After getting a segment, process it, and then call getPageRangesDiffSegment again - * (passing the the previously-returned Marker) to get the next segment. - * @see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges - * - * @param offset - Starting byte position of the page ranges. - * @param count - Number of bytes to get. - * @param prevSnapshotOrUrl - Timestamp of snapshot to retrieve the difference or URL of snapshot to retrieve the difference. - * @param marker - A string value that identifies the portion of the get to be returned with the next get operation. - * @param options - Options to the Page Blob Get Page Ranges Diff operation. - */ - async listPageRangesDiffSegment(offset, count, prevSnapshotOrUrl, marker, options) { - var _a; - const { span, updatedOptions } = createSpan("PageBlobClient-getPageRangesDiffSegment", options); - try { - return await this.pageBlobContext.getPageRangesDiff(Object.assign({ abortSignal: options === null || options === void 0 ? void 0 : options.abortSignal, leaseAccessConditions: options === null || options === void 0 ? void 0 : options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options === null || options === void 0 ? void 0 : options.conditions), { ifTags: (_a = options === null || options === void 0 ? void 0 : options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), prevsnapshot: prevSnapshotOrUrl, range: rangeToString({ - offset: offset, - count: count, - }), marker: marker, maxPageSize: options === null || options === void 0 ? void 0 : options.maxPageSize }, convertTracingToRequestOptionsBase(updatedOptions))); - } - catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; - } - finally { - span.end(); - } - } - /** - * Returns an AsyncIterableIterator for {@link PageBlobGetPageRangesDiffResponseModel} - * + * Returns an AsyncIterableIterator for ContainerListBlobHierarchySegmentResponse * - * @param offset - Starting byte position of the page ranges. - * @param count - Number of bytes to get. - * @param prevSnapshotOrUrl - Timestamp of snapshot to retrieve the difference or URL of snapshot to retrieve the difference. + * @param delimiter - The character or string used to define the virtual hierarchy * @param marker - A string value that identifies the portion of - * the get of page ranges to be returned with the next getting operation. The - * operation returns the ContinuationToken value within the response body if the - * getting operation did not return all page ranges remaining within the current page. - * The ContinuationToken value can be used as the value for - * the marker parameter in a subsequent call to request the next page of get + * the list of blobs to be returned with the next listing operation. The + * operation returns the ContinuationToken value within the response body if the + * listing operation did not return all blobs remaining to be listed + * with the current page. The ContinuationToken value can be used as the value for + * the marker parameter in a subsequent call to request the next page of list * items. The marker value is opaque to the client. - * @param options - Options to the Page Blob Get Page Ranges Diff operation. + * @param options - Options to list blobs operation. */ - listPageRangeDiffItemSegments(offset, count, prevSnapshotOrUrl, marker, options) { - return tslib.__asyncGenerator(this, arguments, function* listPageRangeDiffItemSegments_1() { - let getPageRangeItemSegmentsResponse; + listHierarchySegments(delimiter, marker, options = {}) { + return tslib.__asyncGenerator(this, arguments, function* listHierarchySegments_1() { + let listBlobsHierarchySegmentResponse; if (!!marker || marker === undefined) { do { - getPageRangeItemSegmentsResponse = yield tslib.__await(this.listPageRangesDiffSegment(offset, count, prevSnapshotOrUrl, marker, options)); - marker = getPageRangeItemSegmentsResponse.continuationToken; - yield yield tslib.__await(yield tslib.__await(getPageRangeItemSegmentsResponse)); + listBlobsHierarchySegmentResponse = yield tslib.__await(this.listBlobHierarchySegment(delimiter, marker, options)); + marker = listBlobsHierarchySegmentResponse.continuationToken; + yield yield tslib.__await(yield tslib.__await(listBlobsHierarchySegmentResponse)); } while (marker); } }); } /** - * Returns an AsyncIterableIterator of {@link PageRangeInfo} objects + * Returns an AsyncIterableIterator for {@link BlobPrefix} and {@link BlobItem} objects. * - * @param offset - Starting byte position of the page ranges. - * @param count - Number of bytes to get. - * @param prevSnapshotOrUrl - Timestamp of snapshot to retrieve the difference or URL of snapshot to retrieve the difference. - * @param options - Options to the Page Blob Get Page Ranges Diff operation. + * @param delimiter - The character or string used to define the virtual hierarchy + * @param options - Options to list blobs operation. */ - listPageRangeDiffItems(offset, count, prevSnapshotOrUrl, options) { - return tslib.__asyncGenerator(this, arguments, function* listPageRangeDiffItems_1() { + listItemsByHierarchy(delimiter, options = {}) { + return tslib.__asyncGenerator(this, arguments, function* listItemsByHierarchy_1() { var e_2, _a; let marker; try { - for (var _b = tslib.__asyncValues(this.listPageRangeDiffItemSegments(offset, count, prevSnapshotOrUrl, marker, options)), _c; _c = yield tslib.__await(_b.next()), !_c.done;) { - const getPageRangesSegment = _c.value; - yield tslib.__await(yield* tslib.__asyncDelegator(tslib.__asyncValues(ExtractPageRangeInfoItems(getPageRangesSegment)))); + for (var _b = tslib.__asyncValues(this.listHierarchySegments(delimiter, marker, options)), _c; _c = yield tslib.__await(_b.next()), !_c.done;) { + const listBlobsHierarchySegmentResponse = _c.value; + const segment = listBlobsHierarchySegmentResponse.segment; + if (segment.blobPrefixes) { + for (const prefix of segment.blobPrefixes) { + yield yield tslib.__await(Object.assign({ kind: "prefix" }, prefix)); + } + } + for (const blob of segment.blobItems) { + yield yield tslib.__await(Object.assign({ kind: "blob" }, blob)); + } } } catch (e_2_1) { e_2 = { error: e_2_1 }; } @@ -44171,86 +43373,129 @@ class PageBlobClient extends BlobClient { }); } /** - * Returns an async iterable iterator to list of page ranges that differ between a specified snapshot and this page blob. - * @see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges + * Returns an async iterable iterator to list all the blobs by hierarchy. + * under the specified account. * - * .byPage() returns an async iterable iterator to list of page ranges that differ between a specified snapshot and this page blob. + * .byPage() returns an async iterable iterator to list the blobs by hierarchy in pages. * * Example using `for await` syntax: * * ```js - * // Get the pageBlobClient before you run these snippets, - * // Can be obtained from `blobServiceClient.getContainerClient("").getPageBlobClient("");` - * let i = 1; - * for await (const pageRange of pageBlobClient.listPageRangesDiff()) { - * console.log(`Page range ${i++}: ${pageRange.start} - ${pageRange.end}`); + * for await (const item of containerClient.listBlobsByHierarchy("/")) { + * if (item.kind === "prefix") { + * console.log(`\tBlobPrefix: ${item.name}`); + * } else { + * console.log(`\tBlobItem: name - ${item.name}`); + * } * } * ``` * * Example using `iter.next()`: * * ```js - * let i = 1; - * let iter = pageBlobClient.listPageRangesDiff(); - * let pageRangeItem = await iter.next(); - * while (!pageRangeItem.done) { - * console.log(`Page range ${i++}: ${pageRangeItem.value.start} - ${pageRangeItem.value.end}, IsClear: ${pageRangeItem.value.isClear}`); - * pageRangeItem = await iter.next(); + * let iter = containerClient.listBlobsByHierarchy("/", { prefix: "prefix1/" }); + * let entity = await iter.next(); + * while (!entity.done) { + * let item = entity.value; + * if (item.kind === "prefix") { + * console.log(`\tBlobPrefix: ${item.name}`); + * } else { + * console.log(`\tBlobItem: name - ${item.name}`); + * } + * entity = await iter.next(); * } * ``` * * Example using `byPage()`: * * ```js - * // passing optional maxPageSize in the page settings - * let i = 1; - * for await (const response of pageBlobClient.listPageRangesDiff().byPage({ maxPageSize: 20 })) { - * for (const pageRange of response) { - * console.log(`Page range ${i++}: ${pageRange.start} - ${pageRange.end}`); + * console.log("Listing blobs by hierarchy by page"); + * for await (const response of containerClient.listBlobsByHierarchy("/").byPage()) { + * const segment = response.segment; + * if (segment.blobPrefixes) { + * for (const prefix of segment.blobPrefixes) { + * console.log(`\tBlobPrefix: ${prefix.name}`); + * } + * } + * for (const blob of response.segment.blobItems) { + * console.log(`\tBlobItem: name - ${blob.name}`); * } * } * ``` * - * Example using paging with a marker: + * Example using paging with a max page size: * * ```js - * let i = 1; - * let iterator = pageBlobClient.listPageRangesDiff().byPage({ maxPageSize: 2 }); - * let response = (await iterator.next()).value; - * - * // Prints 2 page ranges - * for (const pageRange of response) { - * console.log(`Page range ${i++}: ${pageRange.start} - ${pageRange.end}`); - * } - * - * // Gets next marker - * let marker = response.continuationToken; + * console.log("Listing blobs by hierarchy by page, specifying a prefix and a max page size"); * - * // Passing next marker as continuationToken + * let i = 1; + * for await (const response of containerClient + * .listBlobsByHierarchy("/", { prefix: "prefix2/sub1/" }) + * .byPage({ maxPageSize: 2 })) { + * console.log(`Page ${i++}`); + * const segment = response.segment; * - * iterator = pageBlobClient.listPageRangesDiff().byPage({ continuationToken: marker, maxPageSize: 10 }); - * response = (await iterator.next()).value; + * if (segment.blobPrefixes) { + * for (const prefix of segment.blobPrefixes) { + * console.log(`\tBlobPrefix: ${prefix.name}`); + * } + * } * - * // Prints 10 page ranges - * for (const blob of response) { - * console.log(`Page range ${i++}: ${pageRange.start} - ${pageRange.end}`); + * for (const blob of response.segment.blobItems) { + * console.log(`\tBlobItem: name - ${blob.name}`); + * } * } * ``` - * @param offset - Starting byte position of the page ranges. - * @param count - Number of bytes to get. - * @param prevSnapshot - Timestamp of snapshot to retrieve the difference. - * @param options - Options to the Page Blob Get Ranges operation. - * @returns An asyncIterableIterator that supports paging. + * + * @param delimiter - The character or string used to define the virtual hierarchy + * @param options - Options to list blobs operation. */ - listPageRangesDiff(offset, count, prevSnapshot, options = {}) { - options.conditions = options.conditions || {}; - // AsyncIterableIterator to iterate over blobs - const iter = this.listPageRangeDiffItems(offset, count, prevSnapshot, Object.assign({}, options)); + listBlobsByHierarchy(delimiter, options = {}) { + if (delimiter === "") { + throw new RangeError("delimiter should contain one or more characters"); + } + const include = []; + if (options.includeCopy) { + include.push("copy"); + } + if (options.includeDeleted) { + include.push("deleted"); + } + if (options.includeMetadata) { + include.push("metadata"); + } + if (options.includeSnapshots) { + include.push("snapshots"); + } + if (options.includeVersions) { + include.push("versions"); + } + if (options.includeUncommitedBlobs) { + include.push("uncommittedblobs"); + } + if (options.includeTags) { + include.push("tags"); + } + if (options.includeDeletedWithVersions) { + include.push("deletedwithversions"); + } + if (options.includeImmutabilityPolicy) { + include.push("immutabilitypolicy"); + } + if (options.includeLegalHold) { + include.push("legalhold"); + } + if (options.prefix === "") { + options.prefix = undefined; + } + const updatedOptions = Object.assign(Object.assign({}, options), (include.length > 0 ? { include: include } : {})); + // AsyncIterableIterator to iterate over blob prefixes and blobs + const iter = this.listItemsByHierarchy(delimiter, updatedOptions); return { /** * The next method, part of the iteration protocol */ - next() { + async next() { return iter.next(); }, /** @@ -44263,54 +43508,40 @@ class PageBlobClient extends BlobClient { * Return an AsyncIterableIterator that works a page at a time */ byPage: (settings = {}) => { - return this.listPageRangeDiffItemSegments(offset, count, prevSnapshot, settings.continuationToken, Object.assign({ maxPageSize: settings.maxPageSize }, options)); + return this.listHierarchySegments(delimiter, settings.continuationToken, Object.assign({ maxPageSize: settings.maxPageSize }, updatedOptions)); }, }; } /** - * Gets the collection of page ranges that differ between a specified snapshot and this page blob for managed disks. - * @see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges - * - * @param offset - Starting byte position of the page blob - * @param count - Number of bytes to get ranges diff. - * @param prevSnapshotUrl - URL of snapshot to retrieve the difference. - * @param options - Options to the Page Blob Get Page Ranges Diff operation. - * @returns Response data for the Page Blob Get Page Range Diff operation. - */ - async getPageRangesDiffForManagedDisks(offset, count, prevSnapshotUrl, options = {}) { - var _a; - options.conditions = options.conditions || {}; - const { span, updatedOptions } = createSpan("PageBlobClient-GetPageRangesDiffForManagedDisks", options); - try { - return await this.pageBlobContext - .getPageRangesDiff(Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), prevSnapshotUrl, range: rangeToString({ offset, count }) }, convertTracingToRequestOptionsBase(updatedOptions))) - .then(rangeResponseFromModel); - } - catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; - } - finally { - span.end(); - } - } - /** - * Resizes the page blob to the specified size (which must be a multiple of 512). - * @see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties + * The Filter Blobs operation enables callers to list blobs in the container whose tags + * match a given search expression. * - * @param size - Target size - * @param options - Options to the Page Blob Resize operation. - * @returns Response data for the Page Blob Resize operation. + * @param tagFilterSqlExpression - The where parameter enables the caller to query blobs whose tags match a given expression. + * The given expression must evaluate to true for a blob to be returned in the results. + * The[OData - ABNF] filter syntax rule defines the formal grammar for the value of the where query parameter; + * however, only a subset of the OData filter syntax is supported in the Blob service. + * @param marker - A string value that identifies the portion of + * the list of blobs to be returned with the next listing operation. The + * operation returns the continuationToken value within the response body if the + * listing operation did not return all blobs remaining to be listed + * with the current page. The continuationToken value can be used as the value for + * the marker parameter in a subsequent call to request the next page of list + * items. The marker value is opaque to the client. + * @param options - Options to find blobs by tags. */ - async resize(size, options = {}) { - var _a; - options.conditions = options.conditions || {}; - const { span, updatedOptions } = createSpan("PageBlobClient-resize", options); + async findBlobsByTagsSegment(tagFilterSqlExpression, marker, options = {}) { + const { span, updatedOptions } = createSpan("ContainerClient-findBlobsByTagsSegment", options); try { - return await this.pageBlobContext.resize(size, Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }), encryptionScope: options.encryptionScope }, convertTracingToRequestOptionsBase(updatedOptions))); + const response = await this.containerContext.filterBlobs(Object.assign({ abortSignal: options.abortSignal, where: tagFilterSqlExpression, marker, maxPageSize: options.maxPageSize }, convertTracingToRequestOptionsBase(updatedOptions))); + const wrappedResponse = Object.assign(Object.assign({}, response), { _response: response._response, blobs: response.blobs.map((blob) => { + var _a; + let tagValue = ""; + if (((_a = blob.tags) === null || _a === void 0 ? void 0 : _a.blobTagSet.length) === 1) { + tagValue = blob.tags.blobTagSet[0].value; + } + return Object.assign(Object.assign({}, blob), { tags: toTags(blob.tags), tagValue }); + }) }); + return wrappedResponse; } catch (e) { span.setStatus({ @@ -44324,921 +43555,792 @@ class PageBlobClient extends BlobClient { } } /** - * Sets a page blob's sequence number. - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-properties + * Returns an AsyncIterableIterator for ContainerFindBlobsByTagsSegmentResponse. * - * @param sequenceNumberAction - Indicates how the service should modify the blob's sequence number. - * @param sequenceNumber - Required if sequenceNumberAction is max or update - * @param options - Options to the Page Blob Update Sequence Number operation. - * @returns Response data for the Page Blob Update Sequence Number operation. + * @param tagFilterSqlExpression - The where parameter enables the caller to query blobs whose tags match a given expression. + * The given expression must evaluate to true for a blob to be returned in the results. + * The[OData - ABNF] filter syntax rule defines the formal grammar for the value of the where query parameter; + * however, only a subset of the OData filter syntax is supported in the Blob service. + * @param marker - A string value that identifies the portion of + * the list of blobs to be returned with the next listing operation. The + * operation returns the continuationToken value within the response body if the + * listing operation did not return all blobs remaining to be listed + * with the current page. The continuationToken value can be used as the value for + * the marker parameter in a subsequent call to request the next page of list + * items. The marker value is opaque to the client. + * @param options - Options to find blobs by tags. */ - async updateSequenceNumber(sequenceNumberAction, sequenceNumber, options = {}) { - var _a; - options.conditions = options.conditions || {}; - const { span, updatedOptions } = createSpan("PageBlobClient-updateSequenceNumber", options); - try { - return await this.pageBlobContext.updateSequenceNumber(sequenceNumberAction, Object.assign({ abortSignal: options.abortSignal, blobSequenceNumber: sequenceNumber, leaseAccessConditions: options.conditions, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }) }, convertTracingToRequestOptionsBase(updatedOptions))); - } - catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; - } - finally { - span.end(); - } + findBlobsByTagsSegments(tagFilterSqlExpression, marker, options = {}) { + return tslib.__asyncGenerator(this, arguments, function* findBlobsByTagsSegments_1() { + let response; + if (!!marker || marker === undefined) { + do { + response = yield tslib.__await(this.findBlobsByTagsSegment(tagFilterSqlExpression, marker, options)); + response.blobs = response.blobs || []; + marker = response.continuationToken; + yield yield tslib.__await(response); + } while (marker); + } + }); } /** - * Begins an operation to start an incremental copy from one page blob's snapshot to this page blob. - * The snapshot is copied such that only the differential changes between the previously - * copied snapshot are transferred to the destination. - * The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual. - * @see https://docs.microsoft.com/rest/api/storageservices/incremental-copy-blob - * @see https://docs.microsoft.com/en-us/azure/virtual-machines/windows/incremental-snapshots + * Returns an AsyncIterableIterator for blobs. * - * @param copySource - Specifies the name of the source page blob snapshot. For example, - * https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= - * @param options - Options to the Page Blob Copy Incremental operation. - * @returns Response data for the Page Blob Copy Incremental operation. + * @param tagFilterSqlExpression - The where parameter enables the caller to query blobs whose tags match a given expression. + * The given expression must evaluate to true for a blob to be returned in the results. + * The[OData - ABNF] filter syntax rule defines the formal grammar for the value of the where query parameter; + * however, only a subset of the OData filter syntax is supported in the Blob service. + * @param options - Options to findBlobsByTagsItems. */ - async startCopyIncremental(copySource, options = {}) { - var _a; - const { span, updatedOptions } = createSpan("PageBlobClient-startCopyIncremental", options); - try { - return await this.pageBlobContext.copyIncremental(copySource, Object.assign({ abortSignal: options.abortSignal, modifiedAccessConditions: Object.assign(Object.assign({}, options.conditions), { ifTags: (_a = options.conditions) === null || _a === void 0 ? void 0 : _a.tagConditions }) }, convertTracingToRequestOptionsBase(updatedOptions))); - } - catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; - } - finally { - span.end(); - } - } -} - -// Copyright (c) Microsoft Corporation. -async function getBodyAsText(batchResponse) { - let buffer = Buffer.alloc(BATCH_MAX_PAYLOAD_IN_BYTES); - const responseLength = await streamToBuffer2(batchResponse.readableStreamBody, buffer); - // Slice the buffer to trim the empty ending. - buffer = buffer.slice(0, responseLength); - return buffer.toString(); -} -function utf8ByteLength(str) { - return Buffer.byteLength(str); -} - -// Copyright (c) Microsoft Corporation. -const HTTP_HEADER_DELIMITER = ": "; -const SPACE_DELIMITER = " "; -const NOT_FOUND = -1; -/** - * Util class for parsing batch response. - */ -class BatchResponseParser { - constructor(batchResponse, subRequests) { - if (!batchResponse || !batchResponse.contentType) { - // In special case(reported), server may return invalid content-type which could not be parsed. - throw new RangeError("batchResponse is malformed or doesn't contain valid content-type."); - } - if (!subRequests || subRequests.size === 0) { - // This should be prevent during coding. - throw new RangeError("Invalid state: subRequests is not provided or size is 0."); - } - this.batchResponse = batchResponse; - this.subRequests = subRequests; - this.responseBatchBoundary = this.batchResponse.contentType.split("=")[1]; - this.perResponsePrefix = `--${this.responseBatchBoundary}${HTTP_LINE_ENDING}`; - this.batchResponseEnding = `--${this.responseBatchBoundary}--`; - } - // For example of response, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/blob-batch#response - async parseBatchResponse() { - // When logic reach here, suppose batch request has already succeeded with 202, so we can further parse - // sub request's response. - if (this.batchResponse._response.status !== HTTPURLConnection.HTTP_ACCEPTED) { - throw new Error(`Invalid state: batch request failed with status: '${this.batchResponse._response.status}'.`); - } - const responseBodyAsText = await getBodyAsText(this.batchResponse); - const subResponses = responseBodyAsText - .split(this.batchResponseEnding)[0] // string after ending is useless - .split(this.perResponsePrefix) - .slice(1); // string before first response boundary is useless - const subResponseCount = subResponses.length; - // Defensive coding in case of potential error parsing. - // Note: subResponseCount == 1 is special case where sub request is invalid. - // We try to prevent such cases through early validation, e.g. validate sub request count >= 1. - // While in unexpected sub request invalid case, we allow sub response to be parsed and return to user. - if (subResponseCount !== this.subRequests.size && subResponseCount !== 1) { - throw new Error("Invalid state: sub responses' count is not equal to sub requests' count."); - } - const deserializedSubResponses = new Array(subResponseCount); - let subResponsesSucceededCount = 0; - let subResponsesFailedCount = 0; - // Parse sub subResponses. - for (let index = 0; index < subResponseCount; index++) { - const subResponse = subResponses[index]; - const deserializedSubResponse = {}; - deserializedSubResponse.headers = new coreHttp.HttpHeaders(); - const responseLines = subResponse.split(`${HTTP_LINE_ENDING}`); - let subRespHeaderStartFound = false; - let subRespHeaderEndFound = false; - let subRespFailed = false; - let contentId = NOT_FOUND; - for (const responseLine of responseLines) { - if (!subRespHeaderStartFound) { - // Convention line to indicate content ID - if (responseLine.startsWith(HeaderConstants.CONTENT_ID)) { - contentId = parseInt(responseLine.split(HTTP_HEADER_DELIMITER)[1]); - } - // Http version line with status code indicates the start of sub request's response. - // Example: HTTP/1.1 202 Accepted - if (responseLine.startsWith(HTTP_VERSION_1_1)) { - subRespHeaderStartFound = true; - const tokens = responseLine.split(SPACE_DELIMITER); - deserializedSubResponse.status = parseInt(tokens[1]); - deserializedSubResponse.statusMessage = tokens.slice(2).join(SPACE_DELIMITER); - } - continue; // Skip convention headers not specifically for sub request i.e. Content-Type: application/http and Content-ID: * - } - if (responseLine.trim() === "") { - // Sub response's header start line already found, and the first empty line indicates header end line found. - if (!subRespHeaderEndFound) { - subRespHeaderEndFound = true; - } - continue; // Skip empty line - } - // Note: when code reach here, it indicates subRespHeaderStartFound == true - if (!subRespHeaderEndFound) { - if (responseLine.indexOf(HTTP_HEADER_DELIMITER) === -1) { - // Defensive coding to prevent from missing valuable lines. - throw new Error(`Invalid state: find non-empty line '${responseLine}' without HTTP header delimiter '${HTTP_HEADER_DELIMITER}'.`); - } - // Parse headers of sub response. - const tokens = responseLine.split(HTTP_HEADER_DELIMITER); - deserializedSubResponse.headers.set(tokens[0], tokens[1]); - if (tokens[0] === HeaderConstants.X_MS_ERROR_CODE) { - deserializedSubResponse.errorCode = tokens[1]; - subRespFailed = true; - } + findBlobsByTagsItems(tagFilterSqlExpression, options = {}) { + return tslib.__asyncGenerator(this, arguments, function* findBlobsByTagsItems_1() { + var e_3, _a; + let marker; + try { + for (var _b = tslib.__asyncValues(this.findBlobsByTagsSegments(tagFilterSqlExpression, marker, options)), _c; _c = yield tslib.__await(_b.next()), !_c.done;) { + const segment = _c.value; + yield tslib.__await(yield* tslib.__asyncDelegator(tslib.__asyncValues(segment.blobs))); } - else { - // Assemble body of sub response. - if (!deserializedSubResponse.bodyAsText) { - deserializedSubResponse.bodyAsText = ""; - } - deserializedSubResponse.bodyAsText += responseLine; + } + catch (e_3_1) { e_3 = { error: e_3_1 }; } + finally { + try { + if (_c && !_c.done && (_a = _b.return)) yield tslib.__await(_a.call(_b)); } - } // Inner for end - // The response will contain the Content-ID header for each corresponding subrequest response to use for tracking. - // The Content-IDs are set to a valid index in the subrequests we sent. In the status code 202 path, we could expect it - // to be 1-1 mapping from the [0, subRequests.size) to the Content-IDs returned. If not, we simply don't return that - // unexpected subResponse in the parsed reponse and we can always look it up in the raw response for debugging purpose. - if (contentId !== NOT_FOUND && - Number.isInteger(contentId) && - contentId >= 0 && - contentId < this.subRequests.size && - deserializedSubResponses[contentId] === undefined) { - deserializedSubResponse._request = this.subRequests.get(contentId); - deserializedSubResponses[contentId] = deserializedSubResponse; + finally { if (e_3) throw e_3.error; } } - else { - logger.error(`subResponses[${index}] is dropped as the Content-ID is not found or invalid, Content-ID: ${contentId}`); + }); + } + /** + * Returns an async iterable iterator to find all blobs with specified tag + * under the specified container. + * + * .byPage() returns an async iterable iterator to list the blobs in pages. + * + * Example using `for await` syntax: + * + * ```js + * let i = 1; + * for await (const blob of containerClient.findBlobsByTags("tagkey='tagvalue'")) { + * console.log(`Blob ${i++}: ${blob.name}`); + * } + * ``` + * + * Example using `iter.next()`: + * + * ```js + * let i = 1; + * const iter = containerClient.findBlobsByTags("tagkey='tagvalue'"); + * let blobItem = await iter.next(); + * while (!blobItem.done) { + * console.log(`Blob ${i++}: ${blobItem.value.name}`); + * blobItem = await iter.next(); + * } + * ``` + * + * Example using `byPage()`: + * + * ```js + * // passing optional maxPageSize in the page settings + * let i = 1; + * for await (const response of containerClient.findBlobsByTags("tagkey='tagvalue'").byPage({ maxPageSize: 20 })) { + * if (response.blobs) { + * for (const blob of response.blobs) { + * console.log(`Blob ${i++}: ${blob.name}`); + * } + * } + * } + * ``` + * + * Example using paging with a marker: + * + * ```js + * let i = 1; + * let iterator = containerClient.findBlobsByTags("tagkey='tagvalue'").byPage({ maxPageSize: 2 }); + * let response = (await iterator.next()).value; + * + * // Prints 2 blob names + * if (response.blobs) { + * for (const blob of response.blobs) { + * console.log(`Blob ${i++}: ${blob.name}`); + * } + * } + * + * // Gets next marker + * let marker = response.continuationToken; + * // Passing next marker as continuationToken + * iterator = containerClient + * .findBlobsByTags("tagkey='tagvalue'") + * .byPage({ continuationToken: marker, maxPageSize: 10 }); + * response = (await iterator.next()).value; + * + * // Prints blob names + * if (response.blobs) { + * for (const blob of response.blobs) { + * console.log(`Blob ${i++}: ${blob.name}`); + * } + * } + * ``` + * + * @param tagFilterSqlExpression - The where parameter enables the caller to query blobs whose tags match a given expression. + * The given expression must evaluate to true for a blob to be returned in the results. + * The[OData - ABNF] filter syntax rule defines the formal grammar for the value of the where query parameter; + * however, only a subset of the OData filter syntax is supported in the Blob service. + * @param options - Options to find blobs by tags. + */ + findBlobsByTags(tagFilterSqlExpression, options = {}) { + // AsyncIterableIterator to iterate over blobs + const listSegmentOptions = Object.assign({}, options); + const iter = this.findBlobsByTagsItems(tagFilterSqlExpression, listSegmentOptions); + return { + /** + * The next method, part of the iteration protocol + */ + next() { + return iter.next(); + }, + /** + * The connection to the async iterator, part of the iteration protocol + */ + [Symbol.asyncIterator]() { + return this; + }, + /** + * Return an AsyncIterableIterator that works a page at a time + */ + byPage: (settings = {}) => { + return this.findBlobsByTagsSegments(tagFilterSqlExpression, settings.continuationToken, Object.assign({ maxPageSize: settings.maxPageSize }, listSegmentOptions)); + }, + }; + } + getContainerNameFromUrl() { + let containerName; + try { + // URL may look like the following + // "https://myaccount.blob.core.windows.net/mycontainer?sasString"; + // "https://myaccount.blob.core.windows.net/mycontainer"; + // IPv4/IPv6 address hosts, Endpoints - `http://127.0.0.1:10000/devstoreaccount1/containername` + // http://localhost:10001/devstoreaccount1/containername + const parsedUrl = coreHttp.URLBuilder.parse(this.url); + if (parsedUrl.getHost().split(".")[1] === "blob") { + // "https://myaccount.blob.core.windows.net/containername". + // "https://customdomain.com/containername". + // .getPath() -> /containername + containerName = parsedUrl.getPath().split("/")[1]; } - if (subRespFailed) { - subResponsesFailedCount++; + else if (isIpEndpointStyle(parsedUrl)) { + // IPv4/IPv6 address hosts... Example - http://192.0.0.10:10001/devstoreaccount1/containername + // Single word domain without a [dot] in the endpoint... Example - http://localhost:10001/devstoreaccount1/containername + // .getPath() -> /devstoreaccount1/containername + containerName = parsedUrl.getPath().split("/")[2]; } else { - subResponsesSucceededCount++; + // "https://customdomain.com/containername". + // .getPath() -> /containername + containerName = parsedUrl.getPath().split("/")[1]; } + // decode the encoded containerName - to get all the special characters that might be present in it + containerName = decodeURIComponent(containerName); + if (!containerName) { + throw new Error("Provided containerName is invalid."); + } + return containerName; + } + catch (error) { + throw new Error("Unable to extract containerName with provided information."); } - return { - subResponses: deserializedSubResponses, - subResponsesSucceededCount: subResponsesSucceededCount, - subResponsesFailedCount: subResponsesFailedCount, - }; } -} - -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. -var MutexLockStatus; -(function (MutexLockStatus) { - MutexLockStatus[MutexLockStatus["LOCKED"] = 0] = "LOCKED"; - MutexLockStatus[MutexLockStatus["UNLOCKED"] = 1] = "UNLOCKED"; -})(MutexLockStatus || (MutexLockStatus = {})); -/** - * An async mutex lock. - */ -class Mutex { /** - * Lock for a specific key. If the lock has been acquired by another customer, then - * will wait until getting the lock. + * Only available for ContainerClient constructed with a shared key credential. * - * @param key - lock key + * Generates a Blob Container Service Shared Access Signature (SAS) URI based on the client properties + * and parameters passed in. The SAS is signed by the shared key credential of the client. + * + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas + * + * @param options - Optional parameters. + * @returns The SAS URI consisting of the URI to the resource represented by this client, followed by the generated SAS token. */ - static async lock(key) { + generateSasUrl(options) { return new Promise((resolve) => { - if (this.keys[key] === undefined || this.keys[key] === MutexLockStatus.UNLOCKED) { - this.keys[key] = MutexLockStatus.LOCKED; - resolve(); - } - else { - this.onUnlockEvent(key, () => { - this.keys[key] = MutexLockStatus.LOCKED; - resolve(); - }); + if (!(this.credential instanceof StorageSharedKeyCredential)) { + throw new RangeError("Can only generate the SAS when the client is initialized with a shared key credential"); } + const sas = generateBlobSASQueryParameters(Object.assign({ containerName: this._containerName }, options), this.credential).toString(); + resolve(appendToURLQuery(this.url, sas)); }); } /** - * Unlock a key. + * Creates a BlobBatchClient object to conduct batch operations. * - * @param key - + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/blob-batch + * + * @returns A new BlobBatchClient object for this container. */ - static async unlock(key) { - return new Promise((resolve) => { - if (this.keys[key] === MutexLockStatus.LOCKED) { - this.emitUnlockEvent(key); - } - delete this.keys[key]; - resolve(); - }); - } - static onUnlockEvent(key, handler) { - if (this.listeners[key] === undefined) { - this.listeners[key] = [handler]; - } - else { - this.listeners[key].push(handler); - } - } - static emitUnlockEvent(key) { - if (this.listeners[key] !== undefined && this.listeners[key].length > 0) { - const handler = this.listeners[key].shift(); - setImmediate(() => { - handler.call(this); - }); - } + getBlobBatchClient() { + return new BlobBatchClient(this.url, this.pipeline); } } -Mutex.keys = {}; -Mutex.listeners = {}; // Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. /** - * A BlobBatch represents an aggregated set of operations on blobs. - * Currently, only `delete` and `setAccessTier` are supported. + * ONLY AVAILABLE IN NODE.JS RUNTIME. + * + * This is a helper class to construct a string representing the permissions granted by an AccountSAS. Setting a value + * to true means that any SAS which uses these permissions will grant permissions for that operation. Once all the + * values are set, this should be serialized with toString and set as the permissions field on an + * {@link AccountSASSignatureValues} object. It is possible to construct the permissions string without this class, but + * the order of the permissions is particular and this class guarantees correctness. */ -class BlobBatch { +class AccountSASPermissions { constructor() { - this.batch = "batch"; - this.batchRequest = new InnerBatchRequest(); - } - /** - * Get the value of Content-Type for a batch request. - * The value must be multipart/mixed with a batch boundary. - * Example: multipart/mixed; boundary=batch_a81786c8-e301-4e42-a729-a32ca24ae252 - */ - getMultiPartContentType() { - return this.batchRequest.getMultipartContentType(); + /** + * Permission to read resources and list queues and tables granted. + */ + this.read = false; + /** + * Permission to write resources granted. + */ + this.write = false; + /** + * Permission to create blobs and files granted. + */ + this.delete = false; + /** + * Permission to delete versions granted. + */ + this.deleteVersion = false; + /** + * Permission to list blob containers, blobs, shares, directories, and files granted. + */ + this.list = false; + /** + * Permission to add messages, table entities, and append to blobs granted. + */ + this.add = false; + /** + * Permission to create blobs and files granted. + */ + this.create = false; + /** + * Permissions to update messages and table entities granted. + */ + this.update = false; + /** + * Permission to get and delete messages granted. + */ + this.process = false; + /** + * Specfies Tag access granted. + */ + this.tag = false; + /** + * Permission to filter blobs. + */ + this.filter = false; + /** + * Permission to set immutability policy. + */ + this.setImmutabilityPolicy = false; + /** + * Specifies that Permanent Delete is permitted. + */ + this.permanentDelete = false; } /** - * Get assembled HTTP request body for sub requests. + * Parse initializes the AccountSASPermissions fields from a string. + * + * @param permissions - */ - getHttpRequestBody() { - return this.batchRequest.getHttpRequestBody(); + static parse(permissions) { + const accountSASPermissions = new AccountSASPermissions(); + for (const c of permissions) { + switch (c) { + case "r": + accountSASPermissions.read = true; + break; + case "w": + accountSASPermissions.write = true; + break; + case "d": + accountSASPermissions.delete = true; + break; + case "x": + accountSASPermissions.deleteVersion = true; + break; + case "l": + accountSASPermissions.list = true; + break; + case "a": + accountSASPermissions.add = true; + break; + case "c": + accountSASPermissions.create = true; + break; + case "u": + accountSASPermissions.update = true; + break; + case "p": + accountSASPermissions.process = true; + break; + case "t": + accountSASPermissions.tag = true; + break; + case "f": + accountSASPermissions.filter = true; + break; + case "i": + accountSASPermissions.setImmutabilityPolicy = true; + break; + case "y": + accountSASPermissions.permanentDelete = true; + break; + default: + throw new RangeError(`Invalid permission character: ${c}`); + } + } + return accountSASPermissions; } /** - * Get sub requests that are added into the batch request. + * Creates a {@link AccountSASPermissions} from a raw object which contains same keys as it + * and boolean values for them. + * + * @param permissionLike - */ - getSubRequests() { - return this.batchRequest.getSubRequests(); - } - async addSubRequestInternal(subRequest, assembleSubRequestFunc) { - await Mutex.lock(this.batch); - try { - this.batchRequest.preAddSubRequest(subRequest); - await assembleSubRequestFunc(); - this.batchRequest.postAddSubRequest(subRequest); - } - finally { - await Mutex.unlock(this.batch); - } - } - setBatchType(batchType) { - if (!this.batchType) { - this.batchType = batchType; - } - if (this.batchType !== batchType) { - throw new RangeError(`BlobBatch only supports one operation type per batch and it already is being used for ${this.batchType} operations.`); - } - } - async deleteBlob(urlOrBlobClient, credentialOrOptions, options) { - let url; - let credential; - if (typeof urlOrBlobClient === "string" && - ((coreHttp.isNode && credentialOrOptions instanceof StorageSharedKeyCredential) || - credentialOrOptions instanceof AnonymousCredential || - coreHttp.isTokenCredential(credentialOrOptions))) { - // First overload - url = urlOrBlobClient; - credential = credentialOrOptions; - } - else if (urlOrBlobClient instanceof BlobClient) { - // Second overload - url = urlOrBlobClient.url; - credential = urlOrBlobClient.credential; - options = credentialOrOptions; + static from(permissionLike) { + const accountSASPermissions = new AccountSASPermissions(); + if (permissionLike.read) { + accountSASPermissions.read = true; } - else { - throw new RangeError("Invalid arguments. Either url and credential, or BlobClient need be provided."); + if (permissionLike.write) { + accountSASPermissions.write = true; } - if (!options) { - options = {}; + if (permissionLike.delete) { + accountSASPermissions.delete = true; } - const { span, updatedOptions } = createSpan("BatchDeleteRequest-addSubRequest", options); - try { - this.setBatchType("delete"); - await this.addSubRequestInternal({ - url: url, - credential: credential, - }, async () => { - await new BlobClient(url, this.batchRequest.createPipeline(credential)).delete(updatedOptions); - }); + if (permissionLike.deleteVersion) { + accountSASPermissions.deleteVersion = true; } - catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; + if (permissionLike.filter) { + accountSASPermissions.filter = true; } - finally { - span.end(); + if (permissionLike.tag) { + accountSASPermissions.tag = true; } - } - async setBlobAccessTier(urlOrBlobClient, credentialOrTier, tierOrOptions, options) { - let url; - let credential; - let tier; - if (typeof urlOrBlobClient === "string" && - ((coreHttp.isNode && credentialOrTier instanceof StorageSharedKeyCredential) || - credentialOrTier instanceof AnonymousCredential || - coreHttp.isTokenCredential(credentialOrTier))) { - // First overload - url = urlOrBlobClient; - credential = credentialOrTier; - tier = tierOrOptions; + if (permissionLike.list) { + accountSASPermissions.list = true; } - else if (urlOrBlobClient instanceof BlobClient) { - // Second overload - url = urlOrBlobClient.url; - credential = urlOrBlobClient.credential; - tier = credentialOrTier; - options = tierOrOptions; + if (permissionLike.add) { + accountSASPermissions.add = true; } - else { - throw new RangeError("Invalid arguments. Either url and credential, or BlobClient need be provided."); + if (permissionLike.create) { + accountSASPermissions.create = true; } - if (!options) { - options = {}; + if (permissionLike.update) { + accountSASPermissions.update = true; } - const { span, updatedOptions } = createSpan("BatchSetTierRequest-addSubRequest", options); - try { - this.setBatchType("setAccessTier"); - await this.addSubRequestInternal({ - url: url, - credential: credential, - }, async () => { - await new BlobClient(url, this.batchRequest.createPipeline(credential)).setAccessTier(tier, updatedOptions); - }); + if (permissionLike.process) { + accountSASPermissions.process = true; } - catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; + if (permissionLike.setImmutabilityPolicy) { + accountSASPermissions.setImmutabilityPolicy = true; } - finally { - span.end(); + if (permissionLike.permanentDelete) { + accountSASPermissions.permanentDelete = true; } - } -} -/** - * Inner batch request class which is responsible for assembling and serializing sub requests. - * See https://docs.microsoft.com/en-us/rest/api/storageservices/blob-batch#request-body for how requests are assembled. - */ -class InnerBatchRequest { - constructor() { - this.operationCount = 0; - this.body = ""; - const tempGuid = coreHttp.generateUuid(); - // batch_{batchid} - this.boundary = `batch_${tempGuid}`; - // --batch_{batchid} - // Content-Type: application/http - // Content-Transfer-Encoding: binary - this.subRequestPrefix = `--${this.boundary}${HTTP_LINE_ENDING}${HeaderConstants.CONTENT_TYPE}: application/http${HTTP_LINE_ENDING}${HeaderConstants.CONTENT_TRANSFER_ENCODING}: binary`; - // multipart/mixed; boundary=batch_{batchid} - this.multipartContentType = `multipart/mixed; boundary=${this.boundary}`; - // --batch_{batchid}-- - this.batchRequestEnding = `--${this.boundary}--`; - this.subRequests = new Map(); + return accountSASPermissions; } /** - * Create pipeline to assemble sub requests. The idea here is to use existing - * credential and serialization/deserialization components, with additional policies to - * filter unnecessary headers, assemble sub requests into request's body - * and intercept request from going to wire. - * @param credential - Such as AnonymousCredential, StorageSharedKeyCredential or any credential from the `@azure/identity` package to authenticate requests to the service. You can also provide an object that implements the TokenCredential interface. If not specified, AnonymousCredential is used. + * Produces the SAS permissions string for an Azure Storage account. + * Call this method to set AccountSASSignatureValues Permissions field. + * + * Using this method will guarantee the resource types are in + * an order accepted by the service. + * + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas + * */ - createPipeline(credential) { - const isAnonymousCreds = credential instanceof AnonymousCredential; - const policyFactoryLength = 3 + (isAnonymousCreds ? 0 : 1); // [deserializationPolicy, BatchHeaderFilterPolicyFactory, (Optional)Credential, BatchRequestAssemblePolicyFactory] - const factories = new Array(policyFactoryLength); - factories[0] = coreHttp.deserializationPolicy(); // Default deserializationPolicy is provided by protocol layer - factories[1] = new BatchHeaderFilterPolicyFactory(); // Use batch header filter policy to exclude unnecessary headers - if (!isAnonymousCreds) { - factories[2] = coreHttp.isTokenCredential(credential) - ? attachCredential(coreHttp.bearerTokenAuthenticationPolicy(credential, StorageOAuthScopes), credential) - : credential; + toString() { + // The order of the characters should be as specified here to ensure correctness: + // https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas + // Use a string array instead of string concatenating += operator for performance + const permissions = []; + if (this.read) { + permissions.push("r"); } - factories[policyFactoryLength - 1] = new BatchRequestAssemblePolicyFactory(this); // Use batch assemble policy to assemble request and intercept request from going to wire - return new Pipeline(factories, {}); - } - appendSubRequestToBody(request) { - // Start to assemble sub request - this.body += [ - this.subRequestPrefix, - `${HeaderConstants.CONTENT_ID}: ${this.operationCount}`, - "", - `${request.method.toString()} ${getURLPathAndQuery(request.url)} ${HTTP_VERSION_1_1}${HTTP_LINE_ENDING}`, // sub request start line with method - ].join(HTTP_LINE_ENDING); - for (const header of request.headers.headersArray()) { - this.body += `${header.name}: ${header.value}${HTTP_LINE_ENDING}`; + if (this.write) { + permissions.push("w"); + } + if (this.delete) { + permissions.push("d"); + } + if (this.deleteVersion) { + permissions.push("x"); + } + if (this.filter) { + permissions.push("f"); } - this.body += HTTP_LINE_ENDING; // sub request's headers need be ending with an empty line - // No body to assemble for current batch request support - // End to assemble sub request - } - preAddSubRequest(subRequest) { - if (this.operationCount >= BATCH_MAX_REQUEST) { - throw new RangeError(`Cannot exceed ${BATCH_MAX_REQUEST} sub requests in a single batch`); + if (this.tag) { + permissions.push("t"); } - // Fast fail if url for sub request is invalid - const path = getURLPath(subRequest.url); - if (!path || path === "") { - throw new RangeError(`Invalid url for sub request: '${subRequest.url}'`); + if (this.list) { + permissions.push("l"); } - } - postAddSubRequest(subRequest) { - this.subRequests.set(this.operationCount, subRequest); - this.operationCount++; - } - // Return the http request body with assembling the ending line to the sub request body. - getHttpRequestBody() { - return `${this.body}${this.batchRequestEnding}${HTTP_LINE_ENDING}`; - } - getMultipartContentType() { - return this.multipartContentType; - } - getSubRequests() { - return this.subRequests; - } -} -class BatchRequestAssemblePolicy extends coreHttp.BaseRequestPolicy { - constructor(batchRequest, nextPolicy, options) { - super(nextPolicy, options); - this.dummyResponse = { - request: new coreHttp.WebResource(), - status: 200, - headers: new coreHttp.HttpHeaders(), - }; - this.batchRequest = batchRequest; - } - async sendRequest(request) { - await this.batchRequest.appendSubRequestToBody(request); - return this.dummyResponse; // Intercept request from going to wire - } -} -class BatchRequestAssemblePolicyFactory { - constructor(batchRequest) { - this.batchRequest = batchRequest; - } - create(nextPolicy, options) { - return new BatchRequestAssemblePolicy(this.batchRequest, nextPolicy, options); - } -} -class BatchHeaderFilterPolicy extends coreHttp.BaseRequestPolicy { - // The base class has a protected constructor. Adding a public one to enable constructing of this class. - /* eslint-disable-next-line @typescript-eslint/no-useless-constructor*/ - constructor(nextPolicy, options) { - super(nextPolicy, options); - } - async sendRequest(request) { - let xMsHeaderName = ""; - for (const header of request.headers.headersArray()) { - if (iEqual(header.name, HeaderConstants.X_MS_VERSION)) { - xMsHeaderName = header.name; - } + if (this.add) { + permissions.push("a"); } - if (xMsHeaderName !== "") { - request.headers.remove(xMsHeaderName); // The subrequests should not have the x-ms-version header. + if (this.create) { + permissions.push("c"); } - return this._nextPolicy.sendRequest(request); - } -} -class BatchHeaderFilterPolicyFactory { - create(nextPolicy, options) { - return new BatchHeaderFilterPolicy(nextPolicy, options); + if (this.update) { + permissions.push("u"); + } + if (this.process) { + permissions.push("p"); + } + if (this.setImmutabilityPolicy) { + permissions.push("i"); + } + if (this.permanentDelete) { + permissions.push("y"); + } + return permissions.join(""); } } // Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. /** - * A BlobBatchClient allows you to make batched requests to the Azure Storage Blob service. + * ONLY AVAILABLE IN NODE.JS RUNTIME. * - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/blob-batch + * This is a helper class to construct a string representing the resources accessible by an AccountSAS. Setting a value + * to true means that any SAS which uses these permissions will grant access to that resource type. Once all the + * values are set, this should be serialized with toString and set as the resources field on an + * {@link AccountSASSignatureValues} object. It is possible to construct the resources string without this class, but + * the order of the resources is particular and this class guarantees correctness. */ -class BlobBatchClient { - constructor(url, credentialOrPipeline, - // Legacy, no fix for eslint error without breaking. Disable it for this interface. - /* eslint-disable-next-line @azure/azure-sdk/ts-naming-options*/ - options) { - let pipeline; - if (isPipelineLike(credentialOrPipeline)) { - pipeline = credentialOrPipeline; - } - else if (!credentialOrPipeline) { - // no credential provided - pipeline = newPipeline(new AnonymousCredential(), options); - } - else { - pipeline = newPipeline(credentialOrPipeline, options); - } - const storageClientContext = new StorageClientContext(url, pipeline.toServiceClientOptions()); - const path = getURLPath(url); - if (path && path !== "/") { - // Container scoped. - this.serviceOrContainerContext = new Container(storageClientContext); - } - else { - this.serviceOrContainerContext = new Service(storageClientContext); - } +class AccountSASResourceTypes { + constructor() { + /** + * Permission to access service level APIs granted. + */ + this.service = false; + /** + * Permission to access container level APIs (Blob Containers, Tables, Queues, File Shares) granted. + */ + this.container = false; + /** + * Permission to access object level APIs (Blobs, Table Entities, Queue Messages, Files) granted. + */ + this.object = false; } /** - * Creates a {@link BlobBatch}. - * A BlobBatch represents an aggregated set of operations on blobs. + * Creates an {@link AccountSASResourceTypes} from the specified resource types string. This method will throw an + * Error if it encounters a character that does not correspond to a valid resource type. + * + * @param resourceTypes - */ - createBatch() { - return new BlobBatch(); - } - async deleteBlobs(urlsOrBlobClients, credentialOrOptions, - // Legacy, no fix for eslint error without breaking. Disable it for this interface. - /* eslint-disable-next-line @azure/azure-sdk/ts-naming-options*/ - options) { - const batch = new BlobBatch(); - for (const urlOrBlobClient of urlsOrBlobClients) { - if (typeof urlOrBlobClient === "string") { - await batch.deleteBlob(urlOrBlobClient, credentialOrOptions, options); - } - else { - await batch.deleteBlob(urlOrBlobClient, credentialOrOptions); - } - } - return this.submitBatch(batch); - } - async setBlobsAccessTier(urlsOrBlobClients, credentialOrTier, tierOrOptions, - // Legacy, no fix for eslint error without breaking. Disable it for this interface. - /* eslint-disable-next-line @azure/azure-sdk/ts-naming-options*/ - options) { - const batch = new BlobBatch(); - for (const urlOrBlobClient of urlsOrBlobClients) { - if (typeof urlOrBlobClient === "string") { - await batch.setBlobAccessTier(urlOrBlobClient, credentialOrTier, tierOrOptions, options); - } - else { - await batch.setBlobAccessTier(urlOrBlobClient, credentialOrTier, tierOrOptions); + static parse(resourceTypes) { + const accountSASResourceTypes = new AccountSASResourceTypes(); + for (const c of resourceTypes) { + switch (c) { + case "s": + accountSASResourceTypes.service = true; + break; + case "c": + accountSASResourceTypes.container = true; + break; + case "o": + accountSASResourceTypes.object = true; + break; + default: + throw new RangeError(`Invalid resource type: ${c}`); } } - return this.submitBatch(batch); + return accountSASResourceTypes; } /** - * Submit batch request which consists of multiple subrequests. - * - * Get `blobBatchClient` and other details before running the snippets. - * `blobServiceClient.getBlobBatchClient()` gives the `blobBatchClient` - * - * Example usage: - * - * ```js - * let batchRequest = new BlobBatch(); - * await batchRequest.deleteBlob(urlInString0, credential0); - * await batchRequest.deleteBlob(urlInString1, credential1, { - * deleteSnapshots: "include" - * }); - * const batchResp = await blobBatchClient.submitBatch(batchRequest); - * console.log(batchResp.subResponsesSucceededCount); - * ``` - * - * Example using a lease: - * - * ```js - * let batchRequest = new BlobBatch(); - * await batchRequest.setBlobAccessTier(blockBlobClient0, "Cool"); - * await batchRequest.setBlobAccessTier(blockBlobClient1, "Cool", { - * conditions: { leaseId: leaseId } - * }); - * const batchResp = await blobBatchClient.submitBatch(batchRequest); - * console.log(batchResp.subResponsesSucceededCount); - * ``` + * Converts the given resource types to a string. * - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/blob-batch + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas * - * @param batchRequest - A set of Delete or SetTier operations. - * @param options - */ - async submitBatch(batchRequest, options = {}) { - if (!batchRequest || batchRequest.getSubRequests().size === 0) { - throw new RangeError("Batch request should contain one or more sub requests."); - } - const { span, updatedOptions } = createSpan("BlobBatchClient-submitBatch", options); - try { - const batchRequestBody = batchRequest.getHttpRequestBody(); - // ServiceSubmitBatchResponseModel and ContainerSubmitBatchResponse are compatible for now. - const rawBatchResponse = await this.serviceOrContainerContext.submitBatch(utf8ByteLength(batchRequestBody), batchRequest.getMultiPartContentType(), batchRequestBody, Object.assign(Object.assign({}, options), convertTracingToRequestOptionsBase(updatedOptions))); - // Parse the sub responses result, if logic reaches here(i.e. the batch request succeeded with status code 202). - const batchResponseParser = new BatchResponseParser(rawBatchResponse, batchRequest.getSubRequests()); - const responseSummary = await batchResponseParser.parseBatchResponse(); - const res = { - _response: rawBatchResponse._response, - contentType: rawBatchResponse.contentType, - errorCode: rawBatchResponse.errorCode, - requestId: rawBatchResponse.requestId, - clientRequestId: rawBatchResponse.clientRequestId, - version: rawBatchResponse.version, - subResponses: responseSummary.subResponses, - subResponsesSucceededCount: responseSummary.subResponsesSucceededCount, - subResponsesFailedCount: responseSummary.subResponsesFailedCount, - }; - return res; + toString() { + const resourceTypes = []; + if (this.service) { + resourceTypes.push("s"); } - catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; + if (this.container) { + resourceTypes.push("c"); } - finally { - span.end(); + if (this.object) { + resourceTypes.push("o"); } + return resourceTypes.join(""); } } +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. /** - * A ContainerClient represents a URL to the Azure Storage container allowing you to manipulate its blobs. + * ONLY AVAILABLE IN NODE.JS RUNTIME. + * + * This is a helper class to construct a string representing the services accessible by an AccountSAS. Setting a value + * to true means that any SAS which uses these permissions will grant access to that service. Once all the + * values are set, this should be serialized with toString and set as the services field on an + * {@link AccountSASSignatureValues} object. It is possible to construct the services string without this class, but + * the order of the services is particular and this class guarantees correctness. */ -class ContainerClient extends StorageClient { - constructor(urlOrConnectionString, credentialOrPipelineOrContainerName, - // Legacy, no fix for eslint error without breaking. Disable it for this interface. - /* eslint-disable-next-line @azure/azure-sdk/ts-naming-options*/ - options) { - let pipeline; - let url; - options = options || {}; - if (isPipelineLike(credentialOrPipelineOrContainerName)) { - // (url: string, pipeline: Pipeline) - url = urlOrConnectionString; - pipeline = credentialOrPipelineOrContainerName; - } - else if ((coreHttp.isNode && credentialOrPipelineOrContainerName instanceof StorageSharedKeyCredential) || - credentialOrPipelineOrContainerName instanceof AnonymousCredential || - coreHttp.isTokenCredential(credentialOrPipelineOrContainerName)) { - // (url: string, credential?: StorageSharedKeyCredential | AnonymousCredential | TokenCredential, options?: StoragePipelineOptions) - url = urlOrConnectionString; - pipeline = newPipeline(credentialOrPipelineOrContainerName, options); - } - else if (!credentialOrPipelineOrContainerName && - typeof credentialOrPipelineOrContainerName !== "string") { - // (url: string, credential?: StorageSharedKeyCredential | AnonymousCredential | TokenCredential, options?: StoragePipelineOptions) - // The second parameter is undefined. Use anonymous credential. - url = urlOrConnectionString; - pipeline = newPipeline(new AnonymousCredential(), options); - } - else if (credentialOrPipelineOrContainerName && - typeof credentialOrPipelineOrContainerName === "string") { - // (connectionString: string, containerName: string, blobName: string, options?: StoragePipelineOptions) - const containerName = credentialOrPipelineOrContainerName; - const extractedCreds = extractConnectionStringParts(urlOrConnectionString); - if (extractedCreds.kind === "AccountConnString") { - if (coreHttp.isNode) { - const sharedKeyCredential = new StorageSharedKeyCredential(extractedCreds.accountName, extractedCreds.accountKey); - url = appendToURLPath(extractedCreds.url, encodeURIComponent(containerName)); - if (!options.proxyOptions) { - options.proxyOptions = coreHttp.getDefaultProxySettings(extractedCreds.proxyUri); - } - pipeline = newPipeline(sharedKeyCredential, options); - } - else { - throw new Error("Account connection string is only supported in Node.js environment"); - } - } - else if (extractedCreds.kind === "SASConnString") { - url = - appendToURLPath(extractedCreds.url, encodeURIComponent(containerName)) + - "?" + - extractedCreds.accountSas; - pipeline = newPipeline(new AnonymousCredential(), options); - } - else { - throw new Error("Connection string must be either an Account connection string or a SAS connection string"); - } - } - else { - throw new Error("Expecting non-empty strings for containerName parameter"); - } - super(url, pipeline); - this._containerName = this.getContainerNameFromUrl(); - this.containerContext = new Container(this.storageClientContext); +class AccountSASServices { + constructor() { + /** + * Permission to access blob resources granted. + */ + this.blob = false; + /** + * Permission to access file resources granted. + */ + this.file = false; + /** + * Permission to access queue resources granted. + */ + this.queue = false; + /** + * Permission to access table resources granted. + */ + this.table = false; } /** - * The name of the container. + * Creates an {@link AccountSASServices} from the specified services string. This method will throw an + * Error if it encounters a character that does not correspond to a valid service. + * + * @param services - */ - get containerName() { - return this._containerName; + static parse(services) { + const accountSASServices = new AccountSASServices(); + for (const c of services) { + switch (c) { + case "b": + accountSASServices.blob = true; + break; + case "f": + accountSASServices.file = true; + break; + case "q": + accountSASServices.queue = true; + break; + case "t": + accountSASServices.table = true; + break; + default: + throw new RangeError(`Invalid service character: ${c}`); + } + } + return accountSASServices; } /** - * Creates a new container under the specified account. If the container with - * the same name already exists, the operation fails. - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/create-container - * - * @param options - Options to Container Create operation. - * - * - * Example usage: + * Converts the given services to a string. * - * ```js - * const containerClient = blobServiceClient.getContainerClient(""); - * const createContainerResponse = await containerClient.create(); - * console.log("Container was created successfully", createContainerResponse.requestId); - * ``` */ - async create(options = {}) { - const { span, updatedOptions } = createSpan("ContainerClient-create", options); - try { - // Spread operator in destructuring assignments, - // this will filter out unwanted properties from the response object into result object - return await this.containerContext.create(Object.assign(Object.assign({}, options), convertTracingToRequestOptionsBase(updatedOptions))); + toString() { + const services = []; + if (this.blob) { + services.push("b"); } - catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; + if (this.table) { + services.push("t"); } - finally { - span.end(); + if (this.queue) { + services.push("q"); + } + if (this.file) { + services.push("f"); } + return services.join(""); } - /** - * Creates a new container under the specified account. If the container with - * the same name already exists, it is not changed. - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/create-container - * - * @param options - - */ - async createIfNotExists(options = {}) { - var _a, _b; - const { span, updatedOptions } = createSpan("ContainerClient-createIfNotExists", options); - try { - const res = await this.create(updatedOptions); - return Object.assign(Object.assign({ succeeded: true }, res), { _response: res._response }); +} + +// Copyright (c) Microsoft Corporation. +/** + * ONLY AVAILABLE IN NODE.JS RUNTIME. + * + * Generates a {@link SASQueryParameters} object which contains all SAS query parameters needed to make an actual + * REST request. + * + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas + * + * @param accountSASSignatureValues - + * @param sharedKeyCredential - + */ +function generateAccountSASQueryParameters(accountSASSignatureValues, sharedKeyCredential) { + const version = accountSASSignatureValues.version + ? accountSASSignatureValues.version + : SERVICE_VERSION; + if (accountSASSignatureValues.permissions && + accountSASSignatureValues.permissions.setImmutabilityPolicy && + version < "2020-08-04") { + throw RangeError("'version' must be >= '2020-08-04' when provided 'i' permission."); + } + if (accountSASSignatureValues.permissions && + accountSASSignatureValues.permissions.deleteVersion && + version < "2019-10-10") { + throw RangeError("'version' must be >= '2019-10-10' when provided 'x' permission."); + } + if (accountSASSignatureValues.permissions && + accountSASSignatureValues.permissions.permanentDelete && + version < "2019-10-10") { + throw RangeError("'version' must be >= '2019-10-10' when provided 'y' permission."); + } + if (accountSASSignatureValues.permissions && + accountSASSignatureValues.permissions.tag && + version < "2019-12-12") { + throw RangeError("'version' must be >= '2019-12-12' when provided 't' permission."); + } + if (accountSASSignatureValues.permissions && + accountSASSignatureValues.permissions.filter && + version < "2019-12-12") { + throw RangeError("'version' must be >= '2019-12-12' when provided 'f' permission."); + } + if (accountSASSignatureValues.encryptionScope && version < "2020-12-06") { + throw RangeError("'version' must be >= '2020-12-06' when provided 'encryptionScope' in SAS."); + } + const parsedPermissions = AccountSASPermissions.parse(accountSASSignatureValues.permissions.toString()); + const parsedServices = AccountSASServices.parse(accountSASSignatureValues.services).toString(); + const parsedResourceTypes = AccountSASResourceTypes.parse(accountSASSignatureValues.resourceTypes).toString(); + let stringToSign; + if (version >= "2020-12-06") { + stringToSign = [ + sharedKeyCredential.accountName, + parsedPermissions, + parsedServices, + parsedResourceTypes, + accountSASSignatureValues.startsOn + ? truncatedISO8061Date(accountSASSignatureValues.startsOn, false) + : "", + truncatedISO8061Date(accountSASSignatureValues.expiresOn, false), + accountSASSignatureValues.ipRange ? ipRangeToString(accountSASSignatureValues.ipRange) : "", + accountSASSignatureValues.protocol ? accountSASSignatureValues.protocol : "", + version, + accountSASSignatureValues.encryptionScope ? accountSASSignatureValues.encryptionScope : "", + "", // Account SAS requires an additional newline character + ].join("\n"); + } + else { + stringToSign = [ + sharedKeyCredential.accountName, + parsedPermissions, + parsedServices, + parsedResourceTypes, + accountSASSignatureValues.startsOn + ? truncatedISO8061Date(accountSASSignatureValues.startsOn, false) + : "", + truncatedISO8061Date(accountSASSignatureValues.expiresOn, false), + accountSASSignatureValues.ipRange ? ipRangeToString(accountSASSignatureValues.ipRange) : "", + accountSASSignatureValues.protocol ? accountSASSignatureValues.protocol : "", + version, + "", // Account SAS requires an additional newline character + ].join("\n"); + } + const signature = sharedKeyCredential.computeHMACSHA256(stringToSign); + return new SASQueryParameters(version, signature, parsedPermissions.toString(), parsedServices, parsedResourceTypes, accountSASSignatureValues.protocol, accountSASSignatureValues.startsOn, accountSASSignatureValues.expiresOn, accountSASSignatureValues.ipRange, undefined, undefined, undefined, undefined, undefined, undefined, undefined, undefined, undefined, undefined, accountSASSignatureValues.encryptionScope); +} + +/** + * A BlobServiceClient represents a Client to the Azure Storage Blob service allowing you + * to manipulate blob containers. + */ +class BlobServiceClient extends StorageClient { + constructor(url, credentialOrPipeline, + // Legacy, no fix for eslint error without breaking. Disable it for this interface. + /* eslint-disable-next-line @azure/azure-sdk/ts-naming-options*/ + options) { + let pipeline; + if (isPipelineLike(credentialOrPipeline)) { + pipeline = credentialOrPipeline; } - catch (e) { - if (((_a = e.details) === null || _a === void 0 ? void 0 : _a.errorCode) === "ContainerAlreadyExists") { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: "Expected exception when creating a container only if it does not already exist.", - }); - return Object.assign(Object.assign({ succeeded: false }, (_b = e.response) === null || _b === void 0 ? void 0 : _b.parsedHeaders), { _response: e.response }); - } - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; + else if ((coreHttp.isNode && credentialOrPipeline instanceof StorageSharedKeyCredential) || + credentialOrPipeline instanceof AnonymousCredential || + coreHttp.isTokenCredential(credentialOrPipeline)) { + pipeline = newPipeline(credentialOrPipeline, options); } - finally { - span.end(); + else { + // The second parameter is undefined. Use anonymous credential + pipeline = newPipeline(new AnonymousCredential(), options); } + super(url, pipeline); + this.serviceContext = new Service(this.storageClientContext); } /** - * Returns true if the Azure container resource represented by this client exists; false otherwise. * - * NOTE: use this function with care since an existing container might be deleted by other clients or - * applications. Vice versa new containers with the same name might be added by other clients or - * applications after this function completes. + * Creates an instance of BlobServiceClient from connection string. * - * @param options - + * @param connectionString - Account connection string or a SAS connection string of an Azure storage account. + * [ Note - Account connection string can only be used in NODE.JS runtime. ] + * Account connection string example - + * `DefaultEndpointsProtocol=https;AccountName=myaccount;AccountKey=accountKey;EndpointSuffix=core.windows.net` + * SAS connection string example - + * `BlobEndpoint=https://myaccount.blob.core.windows.net/;QueueEndpoint=https://myaccount.queue.core.windows.net/;FileEndpoint=https://myaccount.file.core.windows.net/;TableEndpoint=https://myaccount.table.core.windows.net/;SharedAccessSignature=sasString` + * @param options - Optional. Options to configure the HTTP pipeline. */ - async exists(options = {}) { - const { span, updatedOptions } = createSpan("ContainerClient-exists", options); - try { - await this.getProperties({ - abortSignal: options.abortSignal, - tracingOptions: updatedOptions.tracingOptions, - }); - return true; - } - catch (e) { - if (e.statusCode === 404) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: "Expected exception when checking container existence", - }); - return false; + static fromConnectionString(connectionString, + // Legacy, no fix for eslint error without breaking. Disable it for this interface. + /* eslint-disable-next-line @azure/azure-sdk/ts-naming-options*/ + options) { + options = options || {}; + const extractedCreds = extractConnectionStringParts(connectionString); + if (extractedCreds.kind === "AccountConnString") { + if (coreHttp.isNode) { + const sharedKeyCredential = new StorageSharedKeyCredential(extractedCreds.accountName, extractedCreds.accountKey); + if (!options.proxyOptions) { + options.proxyOptions = coreHttp.getDefaultProxySettings(extractedCreds.proxyUri); + } + const pipeline = newPipeline(sharedKeyCredential, options); + return new BlobServiceClient(extractedCreds.url, pipeline); + } + else { + throw new Error("Account connection string is only supported in Node.js environment"); } - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; } - finally { - span.end(); + else if (extractedCreds.kind === "SASConnString") { + const pipeline = newPipeline(new AnonymousCredential(), options); + return new BlobServiceClient(extractedCreds.url + "?" + extractedCreds.accountSas, pipeline); + } + else { + throw new Error("Connection string must be either an Account connection string or a SAS connection string"); } } /** - * Creates a {@link BlobClient} - * - * @param blobName - A blob name - * @returns A new BlobClient object for the given blob name. - */ - getBlobClient(blobName) { - return new BlobClient(appendToURLPath(this.url, EscapePath(blobName)), this.pipeline); - } - /** - * Creates an {@link AppendBlobClient} - * - * @param blobName - An append blob name - */ - getAppendBlobClient(blobName) { - return new AppendBlobClient(appendToURLPath(this.url, EscapePath(blobName)), this.pipeline); - } - /** - * Creates a {@link BlockBlobClient} - * - * @param blobName - A block blob name + * Creates a {@link ContainerClient} object * + * @param containerName - A container name + * @returns A new ContainerClient object for the given container name. * * Example usage: * * ```js - * const content = "Hello world!"; - * - * const blockBlobClient = containerClient.getBlockBlobClient(""); - * const uploadBlobResponse = await blockBlobClient.upload(content, content.length); + * const containerClient = blobServiceClient.getContainerClient(""); * ``` */ - getBlockBlobClient(blobName) { - return new BlockBlobClient(appendToURLPath(this.url, EscapePath(blobName)), this.pipeline); - } - /** - * Creates a {@link PageBlobClient} - * - * @param blobName - A page blob name - */ - getPageBlobClient(blobName) { - return new PageBlobClient(appendToURLPath(this.url, EscapePath(blobName)), this.pipeline); + getContainerClient(containerName) { + return new ContainerClient(appendToURLPath(this.url, encodeURIComponent(containerName)), this.pipeline); } /** - * Returns all user-defined metadata and system properties for the specified - * container. The data returned does not include the container's list of blobs. - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/get-container-properties - * - * WARNING: The `metadata` object returned in the response will have its keys in lowercase, even if - * they originally contained uppercase characters. This differs from the metadata keys returned by - * the `listContainers` method of {@link BlobServiceClient} using the `includeMetadata` option, which - * will retain their original casing. + * Create a Blob container. * - * @param options - Options to Container Get Properties operation. + * @param containerName - Name of the container to create. + * @param options - Options to configure Container Create operation. + * @returns Container creation response and the corresponding container client. */ - async getProperties(options = {}) { - if (!options.conditions) { - options.conditions = {}; - } - const { span, updatedOptions } = createSpan("ContainerClient-getProperties", options); + async createContainer(containerName, options = {}) { + const { span, updatedOptions } = createSpan("BlobServiceClient-createContainer", options); try { - return await this.containerContext.getProperties(Object.assign(Object.assign({ abortSignal: options.abortSignal }, options.conditions), convertTracingToRequestOptionsBase(updatedOptions))); + const containerClient = this.getContainerClient(containerName); + const containerCreateResponse = await containerClient.create(updatedOptions); + return { + containerClient, + containerCreateResponse, + }; } catch (e) { span.setStatus({ @@ -45252,19 +44354,17 @@ class ContainerClient extends StorageClient { } } /** - * Marks the specified container for deletion. The container and any blobs - * contained within it are later deleted during garbage collection. - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/delete-container + * Deletes a Blob container. * - * @param options - Options to Container Delete operation. + * @param containerName - Name of the container to delete. + * @param options - Options to configure Container Delete operation. + * @returns Container deletion response. */ - async delete(options = {}) { - if (!options.conditions) { - options.conditions = {}; - } - const { span, updatedOptions } = createSpan("ContainerClient-delete", options); + async deleteContainer(containerName, options = {}) { + const { span, updatedOptions } = createSpan("BlobServiceClient-deleteContainer", options); try { - return await this.containerContext.delete(Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, modifiedAccessConditions: options.conditions }, convertTracingToRequestOptionsBase(updatedOptions))); + const containerClient = this.getContainerClient(containerName); + return await containerClient.delete(updatedOptions); } catch (e) { span.setStatus({ @@ -45278,27 +44378,25 @@ class ContainerClient extends StorageClient { } } /** - * Marks the specified container for deletion if it exists. The container and any blobs - * contained within it are later deleted during garbage collection. - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/delete-container + * Restore a previously deleted Blob container. + * This API is only functional if Container Soft Delete is enabled for the storage account associated with the container. * - * @param options - Options to Container Delete operation. + * @param deletedContainerName - Name of the previously deleted container. + * @param deletedContainerVersion - Version of the previously deleted container, used to uniquely identify the deleted container. + * @param options - Options to configure Container Restore operation. + * @returns Container deletion response. */ - async deleteIfExists(options = {}) { - var _a, _b; - const { span, updatedOptions } = createSpan("ContainerClient-deleteIfExists", options); + async undeleteContainer(deletedContainerName, deletedContainerVersion, options = {}) { + const { span, updatedOptions } = createSpan("BlobServiceClient-undeleteContainer", options); try { - const res = await this.delete(updatedOptions); - return Object.assign(Object.assign({ succeeded: true }, res), { _response: res._response }); + const containerClient = this.getContainerClient(options.destinationContainerName || deletedContainerName); + // Hack to access a protected member. + const containerContext = new Container(containerClient["storageClientContext"]); + const containerUndeleteResponse = await containerContext.restore(Object.assign({ deletedContainerName, + deletedContainerVersion }, updatedOptions)); + return { containerClient, containerUndeleteResponse }; } catch (e) { - if (((_a = e.details) === null || _a === void 0 ? void 0 : _a.errorCode) === "ContainerNotFound") { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: "Expected exception when deleting a container only if it exists.", - }); - return Object.assign(Object.assign({ succeeded: false }, (_b = e.response) === null || _b === void 0 ? void 0 : _b.parsedHeaders), { _response: e.response }); - } span.setStatus({ code: coreTracing.SpanStatusCode.ERROR, message: e.message, @@ -45310,27 +44408,23 @@ class ContainerClient extends StorageClient { } } /** - * Sets one or more user-defined name-value pairs for the specified container. - * - * If no option provided, or no metadata defined in the parameter, the container - * metadata will be removed. - * - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/set-container-metadata + * Rename an existing Blob Container. * - * @param metadata - Replace existing metadata with this value. - * If no value provided the existing metadata will be removed. - * @param options - Options to Container Set Metadata operation. + * @param sourceContainerName - The name of the source container. + * @param destinationContainerName - The new name of the container. + * @param options - Options to configure Container Rename operation. */ - async setMetadata(metadata, options = {}) { - if (!options.conditions) { - options.conditions = {}; - } - if (options.conditions.ifUnmodifiedSince) { - throw new RangeError("the IfUnmodifiedSince must have their default values because they are ignored by the blob service"); - } - const { span, updatedOptions } = createSpan("ContainerClient-setMetadata", options); + /* eslint-disable-next-line @typescript-eslint/ban-ts-comment */ + // @ts-ignore Need to hide this interface for now. Make it public and turn on the live tests for it when the service is ready. + async renameContainer(sourceContainerName, destinationContainerName, options = {}) { + var _a; + const { span, updatedOptions } = createSpan("BlobServiceClient-renameContainer", options); try { - return await this.containerContext.setMetadata(Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions, metadata, modifiedAccessConditions: options.conditions }, convertTracingToRequestOptionsBase(updatedOptions))); + const containerClient = this.getContainerClient(destinationContainerName); + // Hack to access a protected member. + const containerContext = new Container(containerClient["storageClientContext"]); + const containerRenameResponse = await containerContext.rename(sourceContainerName, Object.assign(Object.assign({}, updatedOptions), { sourceLeaseId: (_a = options.sourceCondition) === null || _a === void 0 ? void 0 : _a.leaseId })); + return { containerClient, containerRenameResponse }; } catch (e) { span.setStatus({ @@ -45344,54 +44438,17 @@ class ContainerClient extends StorageClient { } } /** - * Gets the permissions for the specified container. The permissions indicate - * whether container data may be accessed publicly. - * - * WARNING: JavaScript Date will potentially lose precision when parsing startsOn and expiresOn strings. - * For example, new Date("2018-12-31T03:44:23.8827891Z").toISOString() will get "2018-12-31T03:44:23.882Z". - * - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/get-container-acl + * Gets the properties of a storage account’s Blob service, including properties + * for Storage Analytics and CORS (Cross-Origin Resource Sharing) rules. + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-service-properties * - * @param options - Options to Container Get Access Policy operation. + * @param options - Options to the Service Get Properties operation. + * @returns Response data for the Service Get Properties operation. */ - async getAccessPolicy(options = {}) { - if (!options.conditions) { - options.conditions = {}; - } - const { span, updatedOptions } = createSpan("ContainerClient-getAccessPolicy", options); + async getProperties(options = {}) { + const { span, updatedOptions } = createSpan("BlobServiceClient-getProperties", options); try { - const response = await this.containerContext.getAccessPolicy(Object.assign({ abortSignal: options.abortSignal, leaseAccessConditions: options.conditions }, convertTracingToRequestOptionsBase(updatedOptions))); - const res = { - _response: response._response, - blobPublicAccess: response.blobPublicAccess, - date: response.date, - etag: response.etag, - errorCode: response.errorCode, - lastModified: response.lastModified, - requestId: response.requestId, - clientRequestId: response.clientRequestId, - signedIdentifiers: [], - version: response.version, - }; - for (const identifier of response) { - let accessPolicy = undefined; - if (identifier.accessPolicy) { - accessPolicy = { - permissions: identifier.accessPolicy.permissions, - }; - if (identifier.accessPolicy.expiresOn) { - accessPolicy.expiresOn = new Date(identifier.accessPolicy.expiresOn); - } - if (identifier.accessPolicy.startsOn) { - accessPolicy.startsOn = new Date(identifier.accessPolicy.startsOn); - } - } - res.signedIdentifiers.push({ - accessPolicy, - id: identifier.id, - }); - } - return res; + return await this.serviceContext.getProperties(Object.assign({ abortSignal: options.abortSignal }, convertTracingToRequestOptionsBase(updatedOptions))); } catch (e) { span.setStatus({ @@ -45405,42 +44462,18 @@ class ContainerClient extends StorageClient { } } /** - * Sets the permissions for the specified container. The permissions indicate - * whether blobs in a container may be accessed publicly. - * - * When you set permissions for a container, the existing permissions are replaced. - * If no access or containerAcl provided, the existing container ACL will be - * removed. - * - * When you establish a stored access policy on a container, it may take up to 30 seconds to take effect. - * During this interval, a shared access signature that is associated with the stored access policy will - * fail with status code 403 (Forbidden), until the access policy becomes active. - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/set-container-acl + * Sets properties for a storage account’s Blob service endpoint, including properties + * for Storage Analytics, CORS (Cross-Origin Resource Sharing) rules and soft delete settings. + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-service-properties * - * @param access - The level of public access to data in the container. - * @param containerAcl - Array of elements each having a unique Id and details of the access policy. - * @param options - Options to Container Set Access Policy operation. + * @param properties - + * @param options - Options to the Service Set Properties operation. + * @returns Response data for the Service Set Properties operation. */ - async setAccessPolicy(access, containerAcl, options = {}) { - options.conditions = options.conditions || {}; - const { span, updatedOptions } = createSpan("ContainerClient-setAccessPolicy", options); + async setProperties(properties, options = {}) { + const { span, updatedOptions } = createSpan("BlobServiceClient-setProperties", options); try { - const acl = []; - for (const identifier of containerAcl || []) { - acl.push({ - accessPolicy: { - expiresOn: identifier.accessPolicy.expiresOn - ? truncatedISO8061Date(identifier.accessPolicy.expiresOn) - : "", - permissions: identifier.accessPolicy.permissions, - startsOn: identifier.accessPolicy.startsOn - ? truncatedISO8061Date(identifier.accessPolicy.startsOn) - : "", - }, - id: identifier.id, - }); - } - return await this.containerContext.setAccessPolicy(Object.assign({ abortSignal: options.abortSignal, access, containerAcl: acl, leaseAccessConditions: options.conditions, modifiedAccessConditions: options.conditions }, convertTracingToRequestOptionsBase(updatedOptions))); + return await this.serviceContext.setProperties(properties, Object.assign({ abortSignal: options.abortSignal }, convertTracingToRequestOptionsBase(updatedOptions))); } catch (e) { span.setStatus({ @@ -45454,45 +44487,18 @@ class ContainerClient extends StorageClient { } } /** - * Get a {@link BlobLeaseClient} that manages leases on the container. - * - * @param proposeLeaseId - Initial proposed lease Id. - * @returns A new BlobLeaseClient object for managing leases on the container. - */ - getBlobLeaseClient(proposeLeaseId) { - return new BlobLeaseClient(this, proposeLeaseId); - } - /** - * Creates a new block blob, or updates the content of an existing block blob. - * - * Updating an existing block blob overwrites any existing metadata on the blob. - * Partial updates are not supported; the content of the existing blob is - * overwritten with the new content. To perform a partial update of a block blob's, - * use {@link BlockBlobClient.stageBlock} and {@link BlockBlobClient.commitBlockList}. - * - * This is a non-parallel uploading method, please use {@link BlockBlobClient.uploadFile}, - * {@link BlockBlobClient.uploadStream} or {@link BlockBlobClient.uploadBrowserData} for better - * performance with concurrency uploading. - * - * @see https://docs.microsoft.com/rest/api/storageservices/put-blob + * Retrieves statistics related to replication for the Blob service. It is only + * available on the secondary location endpoint when read-access geo-redundant + * replication is enabled for the storage account. + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-service-stats * - * @param blobName - Name of the block blob to create or update. - * @param body - Blob, string, ArrayBuffer, ArrayBufferView or a function - * which returns a new Readable stream whose offset is from data source beginning. - * @param contentLength - Length of body in bytes. Use Buffer.byteLength() to calculate body length for a - * string including non non-Base64/Hex-encoded characters. - * @param options - Options to configure the Block Blob Upload operation. - * @returns Block Blob upload response data and the corresponding BlockBlobClient instance. + * @param options - Options to the Service Get Statistics operation. + * @returns Response data for the Service Get Statistics operation. */ - async uploadBlockBlob(blobName, body, contentLength, options = {}) { - const { span, updatedOptions } = createSpan("ContainerClient-uploadBlockBlob", options); + async getStatistics(options = {}) { + const { span, updatedOptions } = createSpan("BlobServiceClient-getStatistics", options); try { - const blockBlobClient = this.getBlockBlobClient(blobName); - const response = await blockBlobClient.upload(body, contentLength, updatedOptions); - return { - blockBlobClient, - response, - }; + return await this.serviceContext.getStatistics(Object.assign({ abortSignal: options.abortSignal }, convertTracingToRequestOptionsBase(updatedOptions))); } catch (e) { span.setStatus({ @@ -45506,24 +44512,19 @@ class ContainerClient extends StorageClient { } } /** - * Marks the specified blob or snapshot for deletion. The blob is later deleted - * during garbage collection. Note that in order to delete a blob, you must delete - * all of its snapshots. You can delete both at the same time with the Delete - * Blob operation. - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/delete-blob + * The Get Account Information operation returns the sku name and account kind + * for the specified account. + * The Get Account Information operation is available on service versions beginning + * with version 2018-03-28. + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/get-account-information * - * @param blobName - - * @param options - Options to Blob Delete operation. - * @returns Block blob deletion response data. + * @param options - Options to the Service Get Account Info operation. + * @returns Response data for the Service Get Account Info operation. */ - async deleteBlob(blobName, options = {}) { - const { span, updatedOptions } = createSpan("ContainerClient-deleteBlob", options); + async getAccountInfo(options = {}) { + const { span, updatedOptions } = createSpan("BlobServiceClient-getAccountInfo", options); try { - let blobClient = this.getBlobClient(blobName); - if (options.versionId) { - blobClient = blobClient.withVersion(options.versionId); - } - return await blobClient.delete(updatedOptions); + return await this.serviceContext.getAccountInfo(Object.assign({ abortSignal: options.abortSignal }, convertTracingToRequestOptionsBase(updatedOptions))); } catch (e) { span.setStatus({ @@ -45537,28 +44538,23 @@ class ContainerClient extends StorageClient { } } /** - * listBlobFlatSegment returns a single segment of blobs starting from the - * specified Marker. Use an empty Marker to start enumeration from the beginning. - * After getting a segment, process it, and then call listBlobsFlatSegment again - * (passing the the previously-returned Marker) to get the next segment. - * @see https://docs.microsoft.com/rest/api/storageservices/list-blobs + * Returns a list of the containers under the specified account. + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/list-containers2 * - * @param marker - A string value that identifies the portion of the list to be returned with the next list operation. - * @param options - Options to Container List Blob Flat Segment operation. + * @param marker - A string value that identifies the portion of + * the list of containers to be returned with the next listing operation. The + * operation returns the continuationToken value within the response body if the + * listing operation did not return all containers remaining to be listed + * with the current page. The continuationToken value can be used as the value for + * the marker parameter in a subsequent call to request the next page of list + * items. The marker value is opaque to the client. + * @param options - Options to the Service List Container Segment operation. + * @returns Response data for the Service List Container Segment operation. */ - async listBlobFlatSegment(marker, options = {}) { - const { span, updatedOptions } = createSpan("ContainerClient-listBlobFlatSegment", options); + async listContainersSegment(marker, options = {}) { + const { span, updatedOptions } = createSpan("BlobServiceClient-listContainersSegment", options); try { - const response = await this.containerContext.listBlobFlatSegment(Object.assign(Object.assign({ marker }, options), convertTracingToRequestOptionsBase(updatedOptions))); - response.segment.blobItems = []; - if (response.segment["Blob"] !== undefined) { - response.segment.blobItems = ProcessBlobItems(response.segment["Blob"]); - } - const wrappedResponse = Object.assign(Object.assign({}, response), { _response: Object.assign(Object.assign({}, response._response), { parsedBody: ConvertInternalResponseOfListBlobFlat(response._response.parsedBody) }), segment: Object.assign(Object.assign({}, response.segment), { blobItems: response.segment.blobItems.map((blobItemInteral) => { - const blobItem = Object.assign(Object.assign({}, blobItemInteral), { name: BlobNameToString(blobItemInteral.name), tags: toTags(blobItemInteral.blobTags), objectReplicationSourceProperties: parseObjectReplicationRecord(blobItemInteral.objectReplicationMetadata) }); - return blobItem; - }) }) }); - return wrappedResponse; + return await this.serviceContext.listContainersSegment(Object.assign(Object.assign(Object.assign({ abortSignal: options.abortSignal, marker }, options), { include: typeof options.include === "string" ? [options.include] : options.include }), convertTracingToRequestOptionsBase(updatedOptions))); } catch (e) { span.setStatus({ @@ -45572,38 +44568,35 @@ class ContainerClient extends StorageClient { } } /** - * listBlobHierarchySegment returns a single segment of blobs starting from - * the specified Marker. Use an empty Marker to start enumeration from the - * beginning. After getting a segment, process it, and then call listBlobsHierarchicalSegment - * again (passing the the previously-returned Marker) to get the next segment. - * @see https://docs.microsoft.com/rest/api/storageservices/list-blobs + * The Filter Blobs operation enables callers to list blobs across all containers whose tags + * match a given search expression. Filter blobs searches across all containers within a + * storage account but can be scoped within the expression to a single container. * - * @param delimiter - The character or string used to define the virtual hierarchy - * @param marker - A string value that identifies the portion of the list to be returned with the next list operation. - * @param options - Options to Container List Blob Hierarchy Segment operation. + * @param tagFilterSqlExpression - The where parameter enables the caller to query blobs whose tags match a given expression. + * The given expression must evaluate to true for a blob to be returned in the results. + * The[OData - ABNF] filter syntax rule defines the formal grammar for the value of the where query parameter; + * however, only a subset of the OData filter syntax is supported in the Blob service. + * @param marker - A string value that identifies the portion of + * the list of blobs to be returned with the next listing operation. The + * operation returns the continuationToken value within the response body if the + * listing operation did not return all blobs remaining to be listed + * with the current page. The continuationToken value can be used as the value for + * the marker parameter in a subsequent call to request the next page of list + * items. The marker value is opaque to the client. + * @param options - Options to find blobs by tags. */ - async listBlobHierarchySegment(delimiter, marker, options = {}) { - var _a; - const { span, updatedOptions } = createSpan("ContainerClient-listBlobHierarchySegment", options); + async findBlobsByTagsSegment(tagFilterSqlExpression, marker, options = {}) { + const { span, updatedOptions } = createSpan("BlobServiceClient-findBlobsByTagsSegment", options); try { - const response = await this.containerContext.listBlobHierarchySegment(delimiter, Object.assign(Object.assign({ marker }, options), convertTracingToRequestOptionsBase(updatedOptions))); - response.segment.blobItems = []; - if (response.segment["Blob"] !== undefined) { - response.segment.blobItems = ProcessBlobItems(response.segment["Blob"]); - } - response.segment.blobPrefixes = []; - if (response.segment["BlobPrefix"] !== undefined) { - response.segment.blobPrefixes = ProcessBlobPrefixes(response.segment["BlobPrefix"]); - } - const wrappedResponse = Object.assign(Object.assign({}, response), { _response: Object.assign(Object.assign({}, response._response), { parsedBody: ConvertInternalResponseOfListBlobHierarchy(response._response.parsedBody) }), segment: Object.assign(Object.assign({}, response.segment), { blobItems: response.segment.blobItems.map((blobItemInteral) => { - const blobItem = Object.assign(Object.assign({}, blobItemInteral), { name: BlobNameToString(blobItemInteral.name), tags: toTags(blobItemInteral.blobTags), objectReplicationSourceProperties: parseObjectReplicationRecord(blobItemInteral.objectReplicationMetadata) }); - return blobItem; - }), blobPrefixes: (_a = response.segment.blobPrefixes) === null || _a === void 0 ? void 0 : _a.map((blobPrefixInternal) => { - const blobPrefix = { - name: BlobNameToString(blobPrefixInternal.name), - }; - return blobPrefix; - }) }) }); + const response = await this.serviceContext.filterBlobs(Object.assign({ abortSignal: options.abortSignal, where: tagFilterSqlExpression, marker, maxPageSize: options.maxPageSize }, convertTracingToRequestOptionsBase(updatedOptions))); + const wrappedResponse = Object.assign(Object.assign({}, response), { _response: response._response, blobs: response.blobs.map((blob) => { + var _a; + let tagValue = ""; + if (((_a = blob.tags) === null || _a === void 0 ? void 0 : _a.blobTagSet.length) === 1) { + tagValue = blob.tags.blobTagSet[0].value; + } + return Object.assign(Object.assign({}, blob), { tags: toTags(blob.tags), tagValue }); + }) }); return wrappedResponse; } catch (e) { @@ -45618,42 +44611,51 @@ class ContainerClient extends StorageClient { } } /** - * Returns an AsyncIterableIterator for ContainerListBlobFlatSegmentResponse + * Returns an AsyncIterableIterator for ServiceFindBlobsByTagsSegmentResponse. * + * @param tagFilterSqlExpression - The where parameter enables the caller to query blobs whose tags match a given expression. + * The given expression must evaluate to true for a blob to be returned in the results. + * The[OData - ABNF] filter syntax rule defines the formal grammar for the value of the where query parameter; + * however, only a subset of the OData filter syntax is supported in the Blob service. * @param marker - A string value that identifies the portion of * the list of blobs to be returned with the next listing operation. The - * operation returns the ContinuationToken value within the response body if the + * operation returns the continuationToken value within the response body if the * listing operation did not return all blobs remaining to be listed - * with the current page. The ContinuationToken value can be used as the value for + * with the current page. The continuationToken value can be used as the value for * the marker parameter in a subsequent call to request the next page of list * items. The marker value is opaque to the client. - * @param options - Options to list blobs operation. + * @param options - Options to find blobs by tags. */ - listSegments(marker, options = {}) { - return tslib.__asyncGenerator(this, arguments, function* listSegments_1() { - let listBlobsFlatSegmentResponse; + findBlobsByTagsSegments(tagFilterSqlExpression, marker, options = {}) { + return tslib.__asyncGenerator(this, arguments, function* findBlobsByTagsSegments_1() { + let response; if (!!marker || marker === undefined) { do { - listBlobsFlatSegmentResponse = yield tslib.__await(this.listBlobFlatSegment(marker, options)); - marker = listBlobsFlatSegmentResponse.continuationToken; - yield yield tslib.__await(yield tslib.__await(listBlobsFlatSegmentResponse)); + response = yield tslib.__await(this.findBlobsByTagsSegment(tagFilterSqlExpression, marker, options)); + response.blobs = response.blobs || []; + marker = response.continuationToken; + yield yield tslib.__await(response); } while (marker); } }); } /** - * Returns an AsyncIterableIterator of {@link BlobItem} objects + * Returns an AsyncIterableIterator for blobs. * - * @param options - Options to list blobs operation. + * @param tagFilterSqlExpression - The where parameter enables the caller to query blobs whose tags match a given expression. + * The given expression must evaluate to true for a blob to be returned in the results. + * The[OData - ABNF] filter syntax rule defines the formal grammar for the value of the where query parameter; + * however, only a subset of the OData filter syntax is supported in the Blob service. + * @param options - Options to findBlobsByTagsItems. */ - listItems(options = {}) { - return tslib.__asyncGenerator(this, arguments, function* listItems_1() { + findBlobsByTagsItems(tagFilterSqlExpression, options = {}) { + return tslib.__asyncGenerator(this, arguments, function* findBlobsByTagsItems_1() { var e_1, _a; let marker; try { - for (var _b = tslib.__asyncValues(this.listSegments(marker, options)), _c; _c = yield tslib.__await(_b.next()), !_c.done;) { - const listBlobsFlatSegmentResponse = _c.value; - yield tslib.__await(yield* tslib.__asyncDelegator(tslib.__asyncValues(listBlobsFlatSegmentResponse.segment.blobItems))); + for (var _b = tslib.__asyncValues(this.findBlobsByTagsSegments(tagFilterSqlExpression, marker, options)), _c; _c = yield tslib.__await(_b.next()), !_c.done;) { + const segment = _c.value; + yield tslib.__await(yield* tslib.__asyncDelegator(tslib.__asyncValues(segment.blobs))); } } catch (e_1_1) { e_1 = { error: e_1_1 }; } @@ -45666,19 +44668,19 @@ class ContainerClient extends StorageClient { }); } /** - * Returns an async iterable iterator to list all the blobs + * Returns an async iterable iterator to find all blobs with specified tag * under the specified account. * * .byPage() returns an async iterable iterator to list the blobs in pages. * + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-service-properties + * * Example using `for await` syntax: * * ```js - * // Get the containerClient before you run these snippets, - * // Can be obtained from `blobServiceClient.getContainerClient("");` * let i = 1; - * for await (const blob of containerClient.listBlobsFlat()) { - * console.log(`Blob ${i++}: ${blob.name}`); + * for await (const blob of blobServiceClient.findBlobsByTags("tagkey='tagvalue'")) { + * console.log(`Blob ${i++}: ${container.name}`); * } * ``` * @@ -45686,7 +44688,7 @@ class ContainerClient extends StorageClient { * * ```js * let i = 1; - * let iter = containerClient.listBlobsFlat(); + * const iter = blobServiceClient.findBlobsByTags("tagkey='tagvalue'"); * let blobItem = await iter.next(); * while (!blobItem.done) { * console.log(`Blob ${i++}: ${blobItem.value.name}`); @@ -45699,9 +44701,11 @@ class ContainerClient extends StorageClient { * ```js * // passing optional maxPageSize in the page settings * let i = 1; - * for await (const response of containerClient.listBlobsFlat().byPage({ maxPageSize: 20 })) { - * for (const blob of response.segment.blobItems) { - * console.log(`Blob ${i++}: ${blob.name}`); + * for await (const response of blobServiceClient.findBlobsByTags("tagkey='tagvalue'").byPage({ maxPageSize: 20 })) { + * if (response.blobs) { + * for (const blob of response.blobs) { + * console.log(`Blob ${i++}: ${blob.name}`); + * } * } * } * ``` @@ -45710,69 +44714,42 @@ class ContainerClient extends StorageClient { * * ```js * let i = 1; - * let iterator = containerClient.listBlobsFlat().byPage({ maxPageSize: 2 }); + * let iterator = blobServiceClient.findBlobsByTags("tagkey='tagvalue'").byPage({ maxPageSize: 2 }); * let response = (await iterator.next()).value; * * // Prints 2 blob names - * for (const blob of response.segment.blobItems) { - * console.log(`Blob ${i++}: ${blob.name}`); + * if (response.blobs) { + * for (const blob of response.blobs) { + * console.log(`Blob ${i++}: ${blob.name}`); + * } * } * * // Gets next marker * let marker = response.continuationToken; - * * // Passing next marker as continuationToken - * - * iterator = containerClient.listBlobsFlat().byPage({ continuationToken: marker, maxPageSize: 10 }); + * iterator = blobServiceClient + * .findBlobsByTags("tagkey='tagvalue'") + * .byPage({ continuationToken: marker, maxPageSize: 10 }); * response = (await iterator.next()).value; * - * // Prints 10 blob names - * for (const blob of response.segment.blobItems) { - * console.log(`Blob ${i++}: ${blob.name}`); + * // Prints blob names + * if (response.blobs) { + * for (const blob of response.blobs) { + * console.log(`Blob ${i++}: ${blob.name}`); + * } * } * ``` * - * @param options - Options to list blobs. - * @returns An asyncIterableIterator that supports paging. + * @param tagFilterSqlExpression - The where parameter enables the caller to query blobs whose tags match a given expression. + * The given expression must evaluate to true for a blob to be returned in the results. + * The[OData - ABNF] filter syntax rule defines the formal grammar for the value of the where query parameter; + * however, only a subset of the OData filter syntax is supported in the Blob service. + * @param options - Options to find blobs by tags. */ - listBlobsFlat(options = {}) { - const include = []; - if (options.includeCopy) { - include.push("copy"); - } - if (options.includeDeleted) { - include.push("deleted"); - } - if (options.includeMetadata) { - include.push("metadata"); - } - if (options.includeSnapshots) { - include.push("snapshots"); - } - if (options.includeVersions) { - include.push("versions"); - } - if (options.includeUncommitedBlobs) { - include.push("uncommittedblobs"); - } - if (options.includeTags) { - include.push("tags"); - } - if (options.includeDeletedWithVersions) { - include.push("deletedwithversions"); - } - if (options.includeImmutabilityPolicy) { - include.push("immutabilitypolicy"); - } - if (options.includeLegalHold) { - include.push("legalhold"); - } - if (options.prefix === "") { - options.prefix = undefined; - } - const updatedOptions = Object.assign(Object.assign({}, options), (include.length > 0 ? { include: include } : {})); + findBlobsByTags(tagFilterSqlExpression, options = {}) { // AsyncIterableIterator to iterate over blobs - const iter = this.listItems(updatedOptions); + const listSegmentOptions = Object.assign({}, options); + const iter = this.findBlobsByTagsItems(tagFilterSqlExpression, listSegmentOptions); return { /** * The next method, part of the iteration protocol @@ -45790,57 +44767,49 @@ class ContainerClient extends StorageClient { * Return an AsyncIterableIterator that works a page at a time */ byPage: (settings = {}) => { - return this.listSegments(settings.continuationToken, Object.assign({ maxPageSize: settings.maxPageSize }, updatedOptions)); + return this.findBlobsByTagsSegments(tagFilterSqlExpression, settings.continuationToken, Object.assign({ maxPageSize: settings.maxPageSize }, listSegmentOptions)); }, }; } /** - * Returns an AsyncIterableIterator for ContainerListBlobHierarchySegmentResponse + * Returns an AsyncIterableIterator for ServiceListContainersSegmentResponses * - * @param delimiter - The character or string used to define the virtual hierarchy * @param marker - A string value that identifies the portion of - * the list of blobs to be returned with the next listing operation. The - * operation returns the ContinuationToken value within the response body if the - * listing operation did not return all blobs remaining to be listed - * with the current page. The ContinuationToken value can be used as the value for - * the marker parameter in a subsequent call to request the next page of list - * items. The marker value is opaque to the client. - * @param options - Options to list blobs operation. + * the list of containers to be returned with the next listing operation. The + * operation returns the continuationToken value within the response body if the + * listing operation did not return all containers remaining to be listed + * with the current page. The continuationToken value can be used as the value for + * the marker parameter in a subsequent call to request the next page of list + * items. The marker value is opaque to the client. + * @param options - Options to list containers operation. */ - listHierarchySegments(delimiter, marker, options = {}) { - return tslib.__asyncGenerator(this, arguments, function* listHierarchySegments_1() { - let listBlobsHierarchySegmentResponse; + listSegments(marker, options = {}) { + return tslib.__asyncGenerator(this, arguments, function* listSegments_1() { + let listContainersSegmentResponse; if (!!marker || marker === undefined) { do { - listBlobsHierarchySegmentResponse = yield tslib.__await(this.listBlobHierarchySegment(delimiter, marker, options)); - marker = listBlobsHierarchySegmentResponse.continuationToken; - yield yield tslib.__await(yield tslib.__await(listBlobsHierarchySegmentResponse)); + listContainersSegmentResponse = yield tslib.__await(this.listContainersSegment(marker, options)); + listContainersSegmentResponse.containerItems = + listContainersSegmentResponse.containerItems || []; + marker = listContainersSegmentResponse.continuationToken; + yield yield tslib.__await(yield tslib.__await(listContainersSegmentResponse)); } while (marker); } }); } /** - * Returns an AsyncIterableIterator for {@link BlobPrefix} and {@link BlobItem} objects. + * Returns an AsyncIterableIterator for Container Items * - * @param delimiter - The character or string used to define the virtual hierarchy - * @param options - Options to list blobs operation. + * @param options - Options to list containers operation. */ - listItemsByHierarchy(delimiter, options = {}) { - return tslib.__asyncGenerator(this, arguments, function* listItemsByHierarchy_1() { + listItems(options = {}) { + return tslib.__asyncGenerator(this, arguments, function* listItems_1() { var e_2, _a; let marker; try { - for (var _b = tslib.__asyncValues(this.listHierarchySegments(delimiter, marker, options)), _c; _c = yield tslib.__await(_b.next()), !_c.done;) { - const listBlobsHierarchySegmentResponse = _c.value; - const segment = listBlobsHierarchySegmentResponse.segment; - if (segment.blobPrefixes) { - for (const prefix of segment.blobPrefixes) { - yield yield tslib.__await(Object.assign({ kind: "prefix" }, prefix)); - } - } - for (const blob of segment.blobItems) { - yield yield tslib.__await(Object.assign({ kind: "blob" }, blob)); - } + for (var _b = tslib.__asyncValues(this.listSegments(marker, options)), _c; _c = yield tslib.__await(_b.next()), !_c.done;) { + const segment = _c.value; + yield tslib.__await(yield* tslib.__asyncDelegator(tslib.__asyncValues(segment.containerItems))); } } catch (e_2_1) { e_2 = { error: e_2_1 }; } @@ -45853,129 +44822,101 @@ class ContainerClient extends StorageClient { }); } /** - * Returns an async iterable iterator to list all the blobs by hierarchy. + * Returns an async iterable iterator to list all the containers * under the specified account. * - * .byPage() returns an async iterable iterator to list the blobs by hierarchy in pages. + * .byPage() returns an async iterable iterator to list the containers in pages. * * Example using `for await` syntax: * * ```js - * for await (const item of containerClient.listBlobsByHierarchy("/")) { - * if (item.kind === "prefix") { - * console.log(`\tBlobPrefix: ${item.name}`); - * } else { - * console.log(`\tBlobItem: name - ${item.name}`); - * } + * let i = 1; + * for await (const container of blobServiceClient.listContainers()) { + * console.log(`Container ${i++}: ${container.name}`); * } * ``` * * Example using `iter.next()`: * * ```js - * let iter = containerClient.listBlobsByHierarchy("/", { prefix: "prefix1/" }); - * let entity = await iter.next(); - * while (!entity.done) { - * let item = entity.value; - * if (item.kind === "prefix") { - * console.log(`\tBlobPrefix: ${item.name}`); - * } else { - * console.log(`\tBlobItem: name - ${item.name}`); - * } - * entity = await iter.next(); + * let i = 1; + * const iter = blobServiceClient.listContainers(); + * let containerItem = await iter.next(); + * while (!containerItem.done) { + * console.log(`Container ${i++}: ${containerItem.value.name}`); + * containerItem = await iter.next(); * } * ``` * * Example using `byPage()`: * * ```js - * console.log("Listing blobs by hierarchy by page"); - * for await (const response of containerClient.listBlobsByHierarchy("/").byPage()) { - * const segment = response.segment; - * if (segment.blobPrefixes) { - * for (const prefix of segment.blobPrefixes) { - * console.log(`\tBlobPrefix: ${prefix.name}`); + * // passing optional maxPageSize in the page settings + * let i = 1; + * for await (const response of blobServiceClient.listContainers().byPage({ maxPageSize: 20 })) { + * if (response.containerItems) { + * for (const container of response.containerItems) { + * console.log(`Container ${i++}: ${container.name}`); * } * } - * for (const blob of response.segment.blobItems) { - * console.log(`\tBlobItem: name - ${blob.name}`); - * } * } * ``` * - * Example using paging with a max page size: + * Example using paging with a marker: * * ```js - * console.log("Listing blobs by hierarchy by page, specifying a prefix and a max page size"); - * * let i = 1; - * for await (const response of containerClient - * .listBlobsByHierarchy("/", { prefix: "prefix2/sub1/" }) - * .byPage({ maxPageSize: 2 })) { - * console.log(`Page ${i++}`); - * const segment = response.segment; + * let iterator = blobServiceClient.listContainers().byPage({ maxPageSize: 2 }); + * let response = (await iterator.next()).value; * - * if (segment.blobPrefixes) { - * for (const prefix of segment.blobPrefixes) { - * console.log(`\tBlobPrefix: ${prefix.name}`); - * } + * // Prints 2 container names + * if (response.containerItems) { + * for (const container of response.containerItems) { + * console.log(`Container ${i++}: ${container.name}`); * } + * } * - * for (const blob of response.segment.blobItems) { - * console.log(`\tBlobItem: name - ${blob.name}`); + * // Gets next marker + * let marker = response.continuationToken; + * // Passing next marker as continuationToken + * iterator = blobServiceClient + * .listContainers() + * .byPage({ continuationToken: marker, maxPageSize: 10 }); + * response = (await iterator.next()).value; + * + * // Prints 10 container names + * if (response.containerItems) { + * for (const container of response.containerItems) { + * console.log(`Container ${i++}: ${container.name}`); * } * } * ``` * - * @param delimiter - The character or string used to define the virtual hierarchy - * @param options - Options to list blobs operation. + * @param options - Options to list containers. + * @returns An asyncIterableIterator that supports paging. */ - listBlobsByHierarchy(delimiter, options = {}) { - if (delimiter === "") { - throw new RangeError("delimiter should contain one or more characters"); + listContainers(options = {}) { + if (options.prefix === "") { + options.prefix = undefined; } const include = []; - if (options.includeCopy) { - include.push("copy"); - } if (options.includeDeleted) { include.push("deleted"); } if (options.includeMetadata) { include.push("metadata"); } - if (options.includeSnapshots) { - include.push("snapshots"); - } - if (options.includeVersions) { - include.push("versions"); - } - if (options.includeUncommitedBlobs) { - include.push("uncommittedblobs"); - } - if (options.includeTags) { - include.push("tags"); - } - if (options.includeDeletedWithVersions) { - include.push("deletedwithversions"); - } - if (options.includeImmutabilityPolicy) { - include.push("immutabilitypolicy"); - } - if (options.includeLegalHold) { - include.push("legalhold"); - } - if (options.prefix === "") { - options.prefix = undefined; + if (options.includeSystem) { + include.push("system"); } - const updatedOptions = Object.assign(Object.assign({}, options), (include.length > 0 ? { include: include } : {})); - // AsyncIterableIterator to iterate over blob prefixes and blobs - const iter = this.listItemsByHierarchy(delimiter, updatedOptions); + // AsyncIterableIterator to iterate over containers + const listSegmentOptions = Object.assign(Object.assign({}, options), (include.length > 0 ? { include } : {})); + const iter = this.listItems(listSegmentOptions); return { /** * The next method, part of the iteration protocol */ - async next() { + next() { return iter.next(); }, /** @@ -45988,40 +44929,39 @@ class ContainerClient extends StorageClient { * Return an AsyncIterableIterator that works a page at a time */ byPage: (settings = {}) => { - return this.listHierarchySegments(delimiter, settings.continuationToken, Object.assign({ maxPageSize: settings.maxPageSize }, updatedOptions)); + return this.listSegments(settings.continuationToken, Object.assign({ maxPageSize: settings.maxPageSize }, listSegmentOptions)); }, }; } /** - * The Filter Blobs operation enables callers to list blobs in the container whose tags - * match a given search expression. + * ONLY AVAILABLE WHEN USING BEARER TOKEN AUTHENTICATION (TokenCredential). * - * @param tagFilterSqlExpression - The where parameter enables the caller to query blobs whose tags match a given expression. - * The given expression must evaluate to true for a blob to be returned in the results. - * The[OData - ABNF] filter syntax rule defines the formal grammar for the value of the where query parameter; - * however, only a subset of the OData filter syntax is supported in the Blob service. - * @param marker - A string value that identifies the portion of - * the list of blobs to be returned with the next listing operation. The - * operation returns the continuationToken value within the response body if the - * listing operation did not return all blobs remaining to be listed - * with the current page. The continuationToken value can be used as the value for - * the marker parameter in a subsequent call to request the next page of list - * items. The marker value is opaque to the client. - * @param options - Options to find blobs by tags. + * Retrieves a user delegation key for the Blob service. This is only a valid operation when using + * bearer token authentication. + * + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/get-user-delegation-key + * + * @param startsOn - The start time for the user delegation SAS. Must be within 7 days of the current time + * @param expiresOn - The end time for the user delegation SAS. Must be within 7 days of the current time */ - async findBlobsByTagsSegment(tagFilterSqlExpression, marker, options = {}) { - const { span, updatedOptions } = createSpan("ContainerClient-findBlobsByTagsSegment", options); + async getUserDelegationKey(startsOn, expiresOn, options = {}) { + const { span, updatedOptions } = createSpan("BlobServiceClient-getUserDelegationKey", options); try { - const response = await this.containerContext.filterBlobs(Object.assign({ abortSignal: options.abortSignal, where: tagFilterSqlExpression, marker, maxPageSize: options.maxPageSize }, convertTracingToRequestOptionsBase(updatedOptions))); - const wrappedResponse = Object.assign(Object.assign({}, response), { _response: response._response, blobs: response.blobs.map((blob) => { - var _a; - let tagValue = ""; - if (((_a = blob.tags) === null || _a === void 0 ? void 0 : _a.blobTagSet.length) === 1) { - tagValue = blob.tags.blobTagSet[0].value; - } - return Object.assign(Object.assign({}, blob), { tags: toTags(blob.tags), tagValue }); - }) }); - return wrappedResponse; + const response = await this.serviceContext.getUserDelegationKey({ + startsOn: truncatedISO8061Date(startsOn, false), + expiresOn: truncatedISO8061Date(expiresOn, false), + }, Object.assign({ abortSignal: options.abortSignal }, convertTracingToRequestOptionsBase(updatedOptions))); + const userDelegationKey = { + signedObjectId: response.signedObjectId, + signedTenantId: response.signedTenantId, + signedStartsOn: new Date(response.signedStartsOn), + signedExpiresOn: new Date(response.signedExpiresOn), + signedService: response.signedService, + signedVersion: response.signedVersion, + value: response.value, + }; + const res = Object.assign({ _response: response._response, requestId: response.requestId, clientRequestId: response.clientRequestId, version: response.version, date: response.date, errorCode: response.errorCode }, userDelegationKey); + return res; } catch (e) { span.setStatus({ @@ -46035,1527 +44975,1926 @@ class ContainerClient extends StorageClient { } } /** - * Returns an AsyncIterableIterator for ContainerFindBlobsByTagsSegmentResponse. + * Creates a BlobBatchClient object to conduct batch operations. + * + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/blob-batch + * + * @returns A new BlobBatchClient object for this service. + */ + getBlobBatchClient() { + return new BlobBatchClient(this.url, this.pipeline); + } + /** + * Only available for BlobServiceClient constructed with a shared key credential. + * + * Generates a Blob account Shared Access Signature (SAS) URI based on the client properties + * and parameters passed in. The SAS is signed by the shared key credential of the client. + * + * @see https://docs.microsoft.com/en-us/rest/api/storageservices/create-account-sas + * + * @param expiresOn - Optional. The time at which the shared access signature becomes invalid. Default to an hour later if not provided. + * @param permissions - Specifies the list of permissions to be associated with the SAS. + * @param resourceTypes - Specifies the resource types associated with the shared access signature. + * @param options - Optional parameters. + * @returns An account SAS URI consisting of the URI to the resource represented by this client, followed by the generated SAS token. + */ + generateAccountSasUrl(expiresOn, permissions = AccountSASPermissions.parse("r"), resourceTypes = "sco", options = {}) { + if (!(this.credential instanceof StorageSharedKeyCredential)) { + throw RangeError("Can only generate the account SAS when the client is initialized with a shared key credential"); + } + if (expiresOn === undefined) { + const now = new Date(); + expiresOn = new Date(now.getTime() + 3600 * 1000); + } + const sas = generateAccountSASQueryParameters(Object.assign({ permissions, + expiresOn, + resourceTypes, services: AccountSASServices.parse("b").toString() }, options), this.credential).toString(); + return appendToURLQuery(this.url, sas); + } +} + +Object.defineProperty(exports, "BaseRequestPolicy", ({ + enumerable: true, + get: function () { return coreHttp.BaseRequestPolicy; } +})); +Object.defineProperty(exports, "HttpHeaders", ({ + enumerable: true, + get: function () { return coreHttp.HttpHeaders; } +})); +Object.defineProperty(exports, "RequestPolicyOptions", ({ + enumerable: true, + get: function () { return coreHttp.RequestPolicyOptions; } +})); +Object.defineProperty(exports, "RestError", ({ + enumerable: true, + get: function () { return coreHttp.RestError; } +})); +Object.defineProperty(exports, "WebResource", ({ + enumerable: true, + get: function () { return coreHttp.WebResource; } +})); +Object.defineProperty(exports, "deserializationPolicy", ({ + enumerable: true, + get: function () { return coreHttp.deserializationPolicy; } +})); +exports.AccountSASPermissions = AccountSASPermissions; +exports.AccountSASResourceTypes = AccountSASResourceTypes; +exports.AccountSASServices = AccountSASServices; +exports.AnonymousCredential = AnonymousCredential; +exports.AnonymousCredentialPolicy = AnonymousCredentialPolicy; +exports.AppendBlobClient = AppendBlobClient; +exports.BlobBatch = BlobBatch; +exports.BlobBatchClient = BlobBatchClient; +exports.BlobClient = BlobClient; +exports.BlobLeaseClient = BlobLeaseClient; +exports.BlobSASPermissions = BlobSASPermissions; +exports.BlobServiceClient = BlobServiceClient; +exports.BlockBlobClient = BlockBlobClient; +exports.ContainerClient = ContainerClient; +exports.ContainerSASPermissions = ContainerSASPermissions; +exports.Credential = Credential; +exports.CredentialPolicy = CredentialPolicy; +exports.PageBlobClient = PageBlobClient; +exports.Pipeline = Pipeline; +exports.SASQueryParameters = SASQueryParameters; +exports.StorageBrowserPolicy = StorageBrowserPolicy; +exports.StorageBrowserPolicyFactory = StorageBrowserPolicyFactory; +exports.StorageOAuthScopes = StorageOAuthScopes; +exports.StorageRetryPolicy = StorageRetryPolicy; +exports.StorageRetryPolicyFactory = StorageRetryPolicyFactory; +exports.StorageSharedKeyCredential = StorageSharedKeyCredential; +exports.StorageSharedKeyCredentialPolicy = StorageSharedKeyCredentialPolicy; +exports.generateAccountSASQueryParameters = generateAccountSASQueryParameters; +exports.generateBlobSASQueryParameters = generateBlobSASQueryParameters; +exports.isPipelineLike = isPipelineLike; +exports.logger = logger; +exports.newPipeline = newPipeline; +//# sourceMappingURL=index.js.map + + +/***/ }), + +/***/ 7171: +/***/ (function(__unused_webpack_module, exports, __nccwpck_require__) { + +"use strict"; + +/* + * Copyright The OpenTelemetry Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +var __spreadArray = (this && this.__spreadArray) || function (to, from) { + for (var i = 0, il = from.length, j = to.length; i < il; i++, j++) + to[j] = from[i]; + return to; +}; +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.ContextAPI = void 0; +var NoopContextManager_1 = __nccwpck_require__(4118); +var global_utils_1 = __nccwpck_require__(5135); +var diag_1 = __nccwpck_require__(1877); +var API_NAME = 'context'; +var NOOP_CONTEXT_MANAGER = new NoopContextManager_1.NoopContextManager(); +/** + * Singleton object which represents the entry point to the OpenTelemetry Context API + */ +var ContextAPI = /** @class */ (function () { + /** Empty private constructor prevents end users from constructing a new instance of the API */ + function ContextAPI() { + } + /** Get the singleton instance of the Context API */ + ContextAPI.getInstance = function () { + if (!this._instance) { + this._instance = new ContextAPI(); + } + return this._instance; + }; + /** + * Set the current context manager. + * + * @returns true if the context manager was successfully registered, else false + */ + ContextAPI.prototype.setGlobalContextManager = function (contextManager) { + return global_utils_1.registerGlobal(API_NAME, contextManager, diag_1.DiagAPI.instance()); + }; + /** + * Get the currently active context + */ + ContextAPI.prototype.active = function () { + return this._getContextManager().active(); + }; + /** + * Execute a function with an active context * - * @param tagFilterSqlExpression - The where parameter enables the caller to query blobs whose tags match a given expression. - * The given expression must evaluate to true for a blob to be returned in the results. - * The[OData - ABNF] filter syntax rule defines the formal grammar for the value of the where query parameter; - * however, only a subset of the OData filter syntax is supported in the Blob service. - * @param marker - A string value that identifies the portion of - * the list of blobs to be returned with the next listing operation. The - * operation returns the continuationToken value within the response body if the - * listing operation did not return all blobs remaining to be listed - * with the current page. The continuationToken value can be used as the value for - * the marker parameter in a subsequent call to request the next page of list - * items. The marker value is opaque to the client. - * @param options - Options to find blobs by tags. + * @param context context to be active during function execution + * @param fn function to execute in a context + * @param thisArg optional receiver to be used for calling fn + * @param args optional arguments forwarded to fn */ - findBlobsByTagsSegments(tagFilterSqlExpression, marker, options = {}) { - return tslib.__asyncGenerator(this, arguments, function* findBlobsByTagsSegments_1() { - let response; - if (!!marker || marker === undefined) { - do { - response = yield tslib.__await(this.findBlobsByTagsSegment(tagFilterSqlExpression, marker, options)); - response.blobs = response.blobs || []; - marker = response.continuationToken; - yield yield tslib.__await(response); - } while (marker); - } - }); - } + ContextAPI.prototype.with = function (context, fn, thisArg) { + var _a; + var args = []; + for (var _i = 3; _i < arguments.length; _i++) { + args[_i - 3] = arguments[_i]; + } + return (_a = this._getContextManager()).with.apply(_a, __spreadArray([context, fn, thisArg], args)); + }; /** - * Returns an AsyncIterableIterator for blobs. + * Bind a context to a target function or event emitter * - * @param tagFilterSqlExpression - The where parameter enables the caller to query blobs whose tags match a given expression. - * The given expression must evaluate to true for a blob to be returned in the results. - * The[OData - ABNF] filter syntax rule defines the formal grammar for the value of the where query parameter; - * however, only a subset of the OData filter syntax is supported in the Blob service. - * @param options - Options to findBlobsByTagsItems. + * @param context context to bind to the event emitter or function. Defaults to the currently active context + * @param target function or event emitter to bind */ - findBlobsByTagsItems(tagFilterSqlExpression, options = {}) { - return tslib.__asyncGenerator(this, arguments, function* findBlobsByTagsItems_1() { - var e_3, _a; - let marker; - try { - for (var _b = tslib.__asyncValues(this.findBlobsByTagsSegments(tagFilterSqlExpression, marker, options)), _c; _c = yield tslib.__await(_b.next()), !_c.done;) { - const segment = _c.value; - yield tslib.__await(yield* tslib.__asyncDelegator(tslib.__asyncValues(segment.blobs))); + ContextAPI.prototype.bind = function (context, target) { + return this._getContextManager().bind(context, target); + }; + ContextAPI.prototype._getContextManager = function () { + return global_utils_1.getGlobal(API_NAME) || NOOP_CONTEXT_MANAGER; + }; + /** Disable and remove the global context manager */ + ContextAPI.prototype.disable = function () { + this._getContextManager().disable(); + global_utils_1.unregisterGlobal(API_NAME, diag_1.DiagAPI.instance()); + }; + return ContextAPI; +}()); +exports.ContextAPI = ContextAPI; +//# sourceMappingURL=context.js.map + +/***/ }), + +/***/ 1877: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + +"use strict"; + +/* + * Copyright The OpenTelemetry Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.DiagAPI = void 0; +var ComponentLogger_1 = __nccwpck_require__(7978); +var logLevelLogger_1 = __nccwpck_require__(9639); +var types_1 = __nccwpck_require__(8077); +var global_utils_1 = __nccwpck_require__(5135); +var API_NAME = 'diag'; +/** + * Singleton object which represents the entry point to the OpenTelemetry internal + * diagnostic API + */ +var DiagAPI = /** @class */ (function () { + /** + * Private internal constructor + * @private + */ + function DiagAPI() { + function _logProxy(funcName) { + return function () { + var args = []; + for (var _i = 0; _i < arguments.length; _i++) { + args[_i] = arguments[_i]; } + var logger = global_utils_1.getGlobal('diag'); + // shortcut if logger not set + if (!logger) + return; + return logger[funcName].apply(logger, args); + }; + } + // Using self local variable for minification purposes as 'this' cannot be minified + var self = this; + // DiagAPI specific functions + self.setLogger = function (logger, logLevel) { + var _a, _b; + if (logLevel === void 0) { logLevel = types_1.DiagLogLevel.INFO; } + if (logger === self) { + // There isn't much we can do here. + // Logging to the console might break the user application. + // Try to log to self. If a logger was previously registered it will receive the log. + var err = new Error('Cannot use diag as the logger for itself. Please use a DiagLogger implementation like ConsoleDiagLogger or a custom implementation'); + self.error((_a = err.stack) !== null && _a !== void 0 ? _a : err.message); + return false; } - catch (e_3_1) { e_3 = { error: e_3_1 }; } - finally { - try { - if (_c && !_c.done && (_a = _b.return)) yield tslib.__await(_a.call(_b)); - } - finally { if (e_3) throw e_3.error; } + var oldLogger = global_utils_1.getGlobal('diag'); + var newLogger = logLevelLogger_1.createLogLevelDiagLogger(logLevel, logger); + // There already is an logger registered. We'll let it know before overwriting it. + if (oldLogger) { + var stack = (_b = new Error().stack) !== null && _b !== void 0 ? _b : ''; + oldLogger.warn("Current logger will be overwritten from " + stack); + newLogger.warn("Current logger will overwrite one already registered from " + stack); } - }); - } - /** - * Returns an async iterable iterator to find all blobs with specified tag - * under the specified container. - * - * .byPage() returns an async iterable iterator to list the blobs in pages. - * - * Example using `for await` syntax: - * - * ```js - * let i = 1; - * for await (const blob of containerClient.findBlobsByTags("tagkey='tagvalue'")) { - * console.log(`Blob ${i++}: ${blob.name}`); - * } - * ``` - * - * Example using `iter.next()`: - * - * ```js - * let i = 1; - * const iter = containerClient.findBlobsByTags("tagkey='tagvalue'"); - * let blobItem = await iter.next(); - * while (!blobItem.done) { - * console.log(`Blob ${i++}: ${blobItem.value.name}`); - * blobItem = await iter.next(); - * } - * ``` - * - * Example using `byPage()`: - * - * ```js - * // passing optional maxPageSize in the page settings - * let i = 1; - * for await (const response of containerClient.findBlobsByTags("tagkey='tagvalue'").byPage({ maxPageSize: 20 })) { - * if (response.blobs) { - * for (const blob of response.blobs) { - * console.log(`Blob ${i++}: ${blob.name}`); - * } - * } - * } - * ``` - * - * Example using paging with a marker: - * - * ```js - * let i = 1; - * let iterator = containerClient.findBlobsByTags("tagkey='tagvalue'").byPage({ maxPageSize: 2 }); - * let response = (await iterator.next()).value; - * - * // Prints 2 blob names - * if (response.blobs) { - * for (const blob of response.blobs) { - * console.log(`Blob ${i++}: ${blob.name}`); - * } - * } - * - * // Gets next marker - * let marker = response.continuationToken; - * // Passing next marker as continuationToken - * iterator = containerClient - * .findBlobsByTags("tagkey='tagvalue'") - * .byPage({ continuationToken: marker, maxPageSize: 10 }); - * response = (await iterator.next()).value; - * - * // Prints blob names - * if (response.blobs) { - * for (const blob of response.blobs) { - * console.log(`Blob ${i++}: ${blob.name}`); - * } - * } - * ``` - * - * @param tagFilterSqlExpression - The where parameter enables the caller to query blobs whose tags match a given expression. - * The given expression must evaluate to true for a blob to be returned in the results. - * The[OData - ABNF] filter syntax rule defines the formal grammar for the value of the where query parameter; - * however, only a subset of the OData filter syntax is supported in the Blob service. - * @param options - Options to find blobs by tags. - */ - findBlobsByTags(tagFilterSqlExpression, options = {}) { - // AsyncIterableIterator to iterate over blobs - const listSegmentOptions = Object.assign({}, options); - const iter = this.findBlobsByTagsItems(tagFilterSqlExpression, listSegmentOptions); - return { - /** - * The next method, part of the iteration protocol - */ - next() { - return iter.next(); - }, - /** - * The connection to the async iterator, part of the iteration protocol - */ - [Symbol.asyncIterator]() { - return this; - }, - /** - * Return an AsyncIterableIterator that works a page at a time - */ - byPage: (settings = {}) => { - return this.findBlobsByTagsSegments(tagFilterSqlExpression, settings.continuationToken, Object.assign({ maxPageSize: settings.maxPageSize }, listSegmentOptions)); - }, + return global_utils_1.registerGlobal('diag', newLogger, self, true); + }; + self.disable = function () { + global_utils_1.unregisterGlobal(API_NAME, self); + }; + self.createComponentLogger = function (options) { + return new ComponentLogger_1.DiagComponentLogger(options); }; + self.verbose = _logProxy('verbose'); + self.debug = _logProxy('debug'); + self.info = _logProxy('info'); + self.warn = _logProxy('warn'); + self.error = _logProxy('error'); } - getContainerNameFromUrl() { - let containerName; - try { - // URL may look like the following - // "https://myaccount.blob.core.windows.net/mycontainer?sasString"; - // "https://myaccount.blob.core.windows.net/mycontainer"; - // IPv4/IPv6 address hosts, Endpoints - `http://127.0.0.1:10000/devstoreaccount1/containername` - // http://localhost:10001/devstoreaccount1/containername - const parsedUrl = coreHttp.URLBuilder.parse(this.url); - if (parsedUrl.getHost().split(".")[1] === "blob") { - // "https://myaccount.blob.core.windows.net/containername". - // "https://customdomain.com/containername". - // .getPath() -> /containername - containerName = parsedUrl.getPath().split("/")[1]; - } - else if (isIpEndpointStyle(parsedUrl)) { - // IPv4/IPv6 address hosts... Example - http://192.0.0.10:10001/devstoreaccount1/containername - // Single word domain without a [dot] in the endpoint... Example - http://localhost:10001/devstoreaccount1/containername - // .getPath() -> /devstoreaccount1/containername - containerName = parsedUrl.getPath().split("/")[2]; - } - else { - // "https://customdomain.com/containername". - // .getPath() -> /containername - containerName = parsedUrl.getPath().split("/")[1]; - } - // decode the encoded containerName - to get all the special characters that might be present in it - containerName = decodeURIComponent(containerName); - if (!containerName) { - throw new Error("Provided containerName is invalid."); - } - return containerName; - } - catch (error) { - throw new Error("Unable to extract containerName with provided information."); + /** Get the singleton instance of the DiagAPI API */ + DiagAPI.instance = function () { + if (!this._instance) { + this._instance = new DiagAPI(); } + return this._instance; + }; + return DiagAPI; +}()); +exports.DiagAPI = DiagAPI; +//# sourceMappingURL=diag.js.map + +/***/ }), + +/***/ 9909: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + +"use strict"; + +/* + * Copyright The OpenTelemetry Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.PropagationAPI = void 0; +var global_utils_1 = __nccwpck_require__(5135); +var NoopTextMapPropagator_1 = __nccwpck_require__(2368); +var TextMapPropagator_1 = __nccwpck_require__(865); +var context_helpers_1 = __nccwpck_require__(7682); +var utils_1 = __nccwpck_require__(8136); +var diag_1 = __nccwpck_require__(1877); +var API_NAME = 'propagation'; +var NOOP_TEXT_MAP_PROPAGATOR = new NoopTextMapPropagator_1.NoopTextMapPropagator(); +/** + * Singleton object which represents the entry point to the OpenTelemetry Propagation API + */ +var PropagationAPI = /** @class */ (function () { + /** Empty private constructor prevents end users from constructing a new instance of the API */ + function PropagationAPI() { + this.createBaggage = utils_1.createBaggage; + this.getBaggage = context_helpers_1.getBaggage; + this.setBaggage = context_helpers_1.setBaggage; + this.deleteBaggage = context_helpers_1.deleteBaggage; } + /** Get the singleton instance of the Propagator API */ + PropagationAPI.getInstance = function () { + if (!this._instance) { + this._instance = new PropagationAPI(); + } + return this._instance; + }; /** - * Only available for ContainerClient constructed with a shared key credential. - * - * Generates a Blob Container Service Shared Access Signature (SAS) URI based on the client properties - * and parameters passed in. The SAS is signed by the shared key credential of the client. - * - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas + * Set the current propagator. * - * @param options - Optional parameters. - * @returns The SAS URI consisting of the URI to the resource represented by this client, followed by the generated SAS token. + * @returns true if the propagator was successfully registered, else false */ - generateSasUrl(options) { - return new Promise((resolve) => { - if (!(this.credential instanceof StorageSharedKeyCredential)) { - throw new RangeError("Can only generate the SAS when the client is initialized with a shared key credential"); - } - const sas = generateBlobSASQueryParameters(Object.assign({ containerName: this._containerName }, options), this.credential).toString(); - resolve(appendToURLQuery(this.url, sas)); - }); - } + PropagationAPI.prototype.setGlobalPropagator = function (propagator) { + return global_utils_1.registerGlobal(API_NAME, propagator, diag_1.DiagAPI.instance()); + }; /** - * Creates a BlobBatchClient object to conduct batch operations. + * Inject context into a carrier to be propagated inter-process * - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/blob-batch + * @param context Context carrying tracing data to inject + * @param carrier carrier to inject context into + * @param setter Function used to set values on the carrier + */ + PropagationAPI.prototype.inject = function (context, carrier, setter) { + if (setter === void 0) { setter = TextMapPropagator_1.defaultTextMapSetter; } + return this._getGlobalPropagator().inject(context, carrier, setter); + }; + /** + * Extract context from a carrier * - * @returns A new BlobBatchClient object for this container. + * @param context Context which the newly created context will inherit from + * @param carrier Carrier to extract context from + * @param getter Function used to extract keys from a carrier */ - getBlobBatchClient() { - return new BlobBatchClient(this.url, this.pipeline); - } -} + PropagationAPI.prototype.extract = function (context, carrier, getter) { + if (getter === void 0) { getter = TextMapPropagator_1.defaultTextMapGetter; } + return this._getGlobalPropagator().extract(context, carrier, getter); + }; + /** + * Return a list of all fields which may be used by the propagator. + */ + PropagationAPI.prototype.fields = function () { + return this._getGlobalPropagator().fields(); + }; + /** Remove the global propagator */ + PropagationAPI.prototype.disable = function () { + global_utils_1.unregisterGlobal(API_NAME, diag_1.DiagAPI.instance()); + }; + PropagationAPI.prototype._getGlobalPropagator = function () { + return global_utils_1.getGlobal(API_NAME) || NOOP_TEXT_MAP_PROPAGATOR; + }; + return PropagationAPI; +}()); +exports.PropagationAPI = PropagationAPI; +//# sourceMappingURL=propagation.js.map -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. -/** - * ONLY AVAILABLE IN NODE.JS RUNTIME. +/***/ }), + +/***/ 1539: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + +"use strict"; + +/* + * Copyright The OpenTelemetry Authors * - * This is a helper class to construct a string representing the permissions granted by an AccountSAS. Setting a value - * to true means that any SAS which uses these permissions will grant permissions for that operation. Once all the - * values are set, this should be serialized with toString and set as the permissions field on an - * {@link AccountSASSignatureValues} object. It is possible to construct the permissions string without this class, but - * the order of the permissions is particular and this class guarantees correctness. - */ -class AccountSASPermissions { - constructor() { - /** - * Permission to read resources and list queues and tables granted. - */ - this.read = false; - /** - * Permission to write resources granted. - */ - this.write = false; - /** - * Permission to create blobs and files granted. - */ - this.delete = false; - /** - * Permission to delete versions granted. - */ - this.deleteVersion = false; - /** - * Permission to list blob containers, blobs, shares, directories, and files granted. - */ - this.list = false; - /** - * Permission to add messages, table entities, and append to blobs granted. - */ - this.add = false; - /** - * Permission to create blobs and files granted. - */ - this.create = false; - /** - * Permissions to update messages and table entities granted. - */ - this.update = false; - /** - * Permission to get and delete messages granted. - */ - this.process = false; - /** - * Specfies Tag access granted. - */ - this.tag = false; - /** - * Permission to filter blobs. - */ - this.filter = false; - /** - * Permission to set immutability policy. - */ - this.setImmutabilityPolicy = false; - /** - * Specifies that Permanent Delete is permitted. - */ - this.permanentDelete = false; + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.TraceAPI = void 0; +var global_utils_1 = __nccwpck_require__(5135); +var ProxyTracerProvider_1 = __nccwpck_require__(2285); +var spancontext_utils_1 = __nccwpck_require__(9745); +var context_utils_1 = __nccwpck_require__(3326); +var diag_1 = __nccwpck_require__(1877); +var API_NAME = 'trace'; +/** + * Singleton object which represents the entry point to the OpenTelemetry Tracing API + */ +var TraceAPI = /** @class */ (function () { + /** Empty private constructor prevents end users from constructing a new instance of the API */ + function TraceAPI() { + this._proxyTracerProvider = new ProxyTracerProvider_1.ProxyTracerProvider(); + this.wrapSpanContext = spancontext_utils_1.wrapSpanContext; + this.isSpanContextValid = spancontext_utils_1.isSpanContextValid; + this.deleteSpan = context_utils_1.deleteSpan; + this.getSpan = context_utils_1.getSpan; + this.getActiveSpan = context_utils_1.getActiveSpan; + this.getSpanContext = context_utils_1.getSpanContext; + this.setSpan = context_utils_1.setSpan; + this.setSpanContext = context_utils_1.setSpanContext; } + /** Get the singleton instance of the Trace API */ + TraceAPI.getInstance = function () { + if (!this._instance) { + this._instance = new TraceAPI(); + } + return this._instance; + }; /** - * Parse initializes the AccountSASPermissions fields from a string. + * Set the current global tracer. * - * @param permissions - + * @returns true if the tracer provider was successfully registered, else false */ - static parse(permissions) { - const accountSASPermissions = new AccountSASPermissions(); - for (const c of permissions) { - switch (c) { - case "r": - accountSASPermissions.read = true; - break; - case "w": - accountSASPermissions.write = true; - break; - case "d": - accountSASPermissions.delete = true; - break; - case "x": - accountSASPermissions.deleteVersion = true; - break; - case "l": - accountSASPermissions.list = true; - break; - case "a": - accountSASPermissions.add = true; - break; - case "c": - accountSASPermissions.create = true; - break; - case "u": - accountSASPermissions.update = true; - break; - case "p": - accountSASPermissions.process = true; - break; - case "t": - accountSASPermissions.tag = true; - break; - case "f": - accountSASPermissions.filter = true; - break; - case "i": - accountSASPermissions.setImmutabilityPolicy = true; - break; - case "y": - accountSASPermissions.permanentDelete = true; - break; - default: - throw new RangeError(`Invalid permission character: ${c}`); - } + TraceAPI.prototype.setGlobalTracerProvider = function (provider) { + var success = global_utils_1.registerGlobal(API_NAME, this._proxyTracerProvider, diag_1.DiagAPI.instance()); + if (success) { + this._proxyTracerProvider.setDelegate(provider); } - return accountSASPermissions; - } + return success; + }; /** - * Creates a {@link AccountSASPermissions} from a raw object which contains same keys as it - * and boolean values for them. - * - * @param permissionLike - + * Returns the global tracer provider. */ - static from(permissionLike) { - const accountSASPermissions = new AccountSASPermissions(); - if (permissionLike.read) { - accountSASPermissions.read = true; - } - if (permissionLike.write) { - accountSASPermissions.write = true; - } - if (permissionLike.delete) { - accountSASPermissions.delete = true; - } - if (permissionLike.deleteVersion) { - accountSASPermissions.deleteVersion = true; - } - if (permissionLike.filter) { - accountSASPermissions.filter = true; - } - if (permissionLike.tag) { - accountSASPermissions.tag = true; - } - if (permissionLike.list) { - accountSASPermissions.list = true; - } - if (permissionLike.add) { - accountSASPermissions.add = true; - } - if (permissionLike.create) { - accountSASPermissions.create = true; - } - if (permissionLike.update) { - accountSASPermissions.update = true; - } - if (permissionLike.process) { - accountSASPermissions.process = true; + TraceAPI.prototype.getTracerProvider = function () { + return global_utils_1.getGlobal(API_NAME) || this._proxyTracerProvider; + }; + /** + * Returns a tracer from the global tracer provider. + */ + TraceAPI.prototype.getTracer = function (name, version) { + return this.getTracerProvider().getTracer(name, version); + }; + /** Remove the global tracer provider */ + TraceAPI.prototype.disable = function () { + global_utils_1.unregisterGlobal(API_NAME, diag_1.DiagAPI.instance()); + this._proxyTracerProvider = new ProxyTracerProvider_1.ProxyTracerProvider(); + }; + return TraceAPI; +}()); +exports.TraceAPI = TraceAPI; +//# sourceMappingURL=trace.js.map + +/***/ }), + +/***/ 7682: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + +"use strict"; + +/* + * Copyright The OpenTelemetry Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.deleteBaggage = exports.setBaggage = exports.getBaggage = void 0; +var context_1 = __nccwpck_require__(8242); +/** + * Baggage key + */ +var BAGGAGE_KEY = context_1.createContextKey('OpenTelemetry Baggage Key'); +/** + * Retrieve the current baggage from the given context + * + * @param {Context} Context that manage all context values + * @returns {Baggage} Extracted baggage from the context + */ +function getBaggage(context) { + return context.getValue(BAGGAGE_KEY) || undefined; +} +exports.getBaggage = getBaggage; +/** + * Store a baggage in the given context + * + * @param {Context} Context that manage all context values + * @param {Baggage} baggage that will be set in the actual context + */ +function setBaggage(context, baggage) { + return context.setValue(BAGGAGE_KEY, baggage); +} +exports.setBaggage = setBaggage; +/** + * Delete the baggage stored in the given context + * + * @param {Context} Context that manage all context values + */ +function deleteBaggage(context) { + return context.deleteValue(BAGGAGE_KEY); +} +exports.deleteBaggage = deleteBaggage; +//# sourceMappingURL=context-helpers.js.map + +/***/ }), + +/***/ 4811: +/***/ ((__unused_webpack_module, exports) => { + +"use strict"; + +/* + * Copyright The OpenTelemetry Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.BaggageImpl = void 0; +var BaggageImpl = /** @class */ (function () { + function BaggageImpl(entries) { + this._entries = entries ? new Map(entries) : new Map(); + } + BaggageImpl.prototype.getEntry = function (key) { + var entry = this._entries.get(key); + if (!entry) { + return undefined; } - if (permissionLike.setImmutabilityPolicy) { - accountSASPermissions.setImmutabilityPolicy = true; + return Object.assign({}, entry); + }; + BaggageImpl.prototype.getAllEntries = function () { + return Array.from(this._entries.entries()).map(function (_a) { + var k = _a[0], v = _a[1]; + return [k, v]; + }); + }; + BaggageImpl.prototype.setEntry = function (key, entry) { + var newBaggage = new BaggageImpl(this._entries); + newBaggage._entries.set(key, entry); + return newBaggage; + }; + BaggageImpl.prototype.removeEntry = function (key) { + var newBaggage = new BaggageImpl(this._entries); + newBaggage._entries.delete(key); + return newBaggage; + }; + BaggageImpl.prototype.removeEntries = function () { + var keys = []; + for (var _i = 0; _i < arguments.length; _i++) { + keys[_i] = arguments[_i]; } - if (permissionLike.permanentDelete) { - accountSASPermissions.permanentDelete = true; + var newBaggage = new BaggageImpl(this._entries); + for (var _a = 0, keys_1 = keys; _a < keys_1.length; _a++) { + var key = keys_1[_a]; + newBaggage._entries.delete(key); } - return accountSASPermissions; + return newBaggage; + }; + BaggageImpl.prototype.clear = function () { + return new BaggageImpl(); + }; + return BaggageImpl; +}()); +exports.BaggageImpl = BaggageImpl; +//# sourceMappingURL=baggage-impl.js.map + +/***/ }), + +/***/ 3542: +/***/ ((__unused_webpack_module, exports) => { + +"use strict"; + +/* + * Copyright The OpenTelemetry Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.baggageEntryMetadataSymbol = void 0; +/** + * Symbol used to make BaggageEntryMetadata an opaque type + */ +exports.baggageEntryMetadataSymbol = Symbol('BaggageEntryMetadata'); +//# sourceMappingURL=symbol.js.map + +/***/ }), + +/***/ 1508: +/***/ ((__unused_webpack_module, exports) => { + +"use strict"; + +/* + * Copyright The OpenTelemetry Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +Object.defineProperty(exports, "__esModule", ({ value: true })); +//# sourceMappingURL=types.js.map + +/***/ }), + +/***/ 8136: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + +"use strict"; + +/* + * Copyright The OpenTelemetry Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.baggageEntryMetadataFromString = exports.createBaggage = void 0; +var diag_1 = __nccwpck_require__(1877); +var baggage_impl_1 = __nccwpck_require__(4811); +var symbol_1 = __nccwpck_require__(3542); +var diag = diag_1.DiagAPI.instance(); +/** + * Create a new Baggage with optional entries + * + * @param entries An array of baggage entries the new baggage should contain + */ +function createBaggage(entries) { + if (entries === void 0) { entries = {}; } + return new baggage_impl_1.BaggageImpl(new Map(Object.entries(entries))); +} +exports.createBaggage = createBaggage; +/** + * Create a serializable BaggageEntryMetadata object from a string. + * + * @param str string metadata. Format is currently not defined by the spec and has no special meaning. + * + */ +function baggageEntryMetadataFromString(str) { + if (typeof str !== 'string') { + diag.error("Cannot create baggage metadata from unknown type: " + typeof str); + str = ''; } + return { + __TYPE__: symbol_1.baggageEntryMetadataSymbol, + toString: function () { + return str; + }, + }; +} +exports.baggageEntryMetadataFromString = baggageEntryMetadataFromString; +//# sourceMappingURL=utils.js.map + +/***/ }), + +/***/ 1109: +/***/ ((__unused_webpack_module, exports) => { + +"use strict"; + +/* + * Copyright The OpenTelemetry Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +Object.defineProperty(exports, "__esModule", ({ value: true })); +//# sourceMappingURL=Attributes.js.map + +/***/ }), + +/***/ 4447: +/***/ ((__unused_webpack_module, exports) => { + +"use strict"; + +/* + * Copyright The OpenTelemetry Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +Object.defineProperty(exports, "__esModule", ({ value: true })); +//# sourceMappingURL=Exception.js.map + +/***/ }), + +/***/ 2358: +/***/ ((__unused_webpack_module, exports) => { + +"use strict"; + +Object.defineProperty(exports, "__esModule", ({ value: true })); +//# sourceMappingURL=Time.js.map + +/***/ }), + +/***/ 4118: +/***/ (function(__unused_webpack_module, exports, __nccwpck_require__) { + +"use strict"; + +/* + * Copyright The OpenTelemetry Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +var __spreadArray = (this && this.__spreadArray) || function (to, from) { + for (var i = 0, il = from.length, j = to.length; i < il; i++, j++) + to[j] = from[i]; + return to; +}; +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.NoopContextManager = void 0; +var context_1 = __nccwpck_require__(8242); +var NoopContextManager = /** @class */ (function () { + function NoopContextManager() { + } + NoopContextManager.prototype.active = function () { + return context_1.ROOT_CONTEXT; + }; + NoopContextManager.prototype.with = function (_context, fn, thisArg) { + var args = []; + for (var _i = 3; _i < arguments.length; _i++) { + args[_i - 3] = arguments[_i]; + } + return fn.call.apply(fn, __spreadArray([thisArg], args)); + }; + NoopContextManager.prototype.bind = function (_context, target) { + return target; + }; + NoopContextManager.prototype.enable = function () { + return this; + }; + NoopContextManager.prototype.disable = function () { + return this; + }; + return NoopContextManager; +}()); +exports.NoopContextManager = NoopContextManager; +//# sourceMappingURL=NoopContextManager.js.map + +/***/ }), + +/***/ 8242: +/***/ ((__unused_webpack_module, exports) => { + +"use strict"; + +/* + * Copyright The OpenTelemetry Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.ROOT_CONTEXT = exports.createContextKey = void 0; +/** Get a key to uniquely identify a context value */ +function createContextKey(description) { + // The specification states that for the same input, multiple calls should + // return different keys. Due to the nature of the JS dependency management + // system, this creates problems where multiple versions of some package + // could hold different keys for the same property. + // + // Therefore, we use Symbol.for which returns the same key for the same input. + return Symbol.for(description); +} +exports.createContextKey = createContextKey; +var BaseContext = /** @class */ (function () { /** - * Produces the SAS permissions string for an Azure Storage account. - * Call this method to set AccountSASSignatureValues Permissions field. - * - * Using this method will guarantee the resource types are in - * an order accepted by the service. - * - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas + * Construct a new context which inherits values from an optional parent context. * + * @param parentContext a context from which to inherit values */ - toString() { - // The order of the characters should be as specified here to ensure correctness: - // https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas - // Use a string array instead of string concatenating += operator for performance - const permissions = []; - if (this.read) { - permissions.push("r"); - } - if (this.write) { - permissions.push("w"); - } - if (this.delete) { - permissions.push("d"); - } - if (this.deleteVersion) { - permissions.push("x"); - } - if (this.filter) { - permissions.push("f"); - } - if (this.tag) { - permissions.push("t"); - } - if (this.list) { - permissions.push("l"); - } - if (this.add) { - permissions.push("a"); - } - if (this.create) { - permissions.push("c"); + function BaseContext(parentContext) { + // for minification + var self = this; + self._currentContext = parentContext ? new Map(parentContext) : new Map(); + self.getValue = function (key) { return self._currentContext.get(key); }; + self.setValue = function (key, value) { + var context = new BaseContext(self._currentContext); + context._currentContext.set(key, value); + return context; + }; + self.deleteValue = function (key) { + var context = new BaseContext(self._currentContext); + context._currentContext.delete(key); + return context; + }; + } + return BaseContext; +}()); +/** The root context is used as the default parent context when there is no active context */ +exports.ROOT_CONTEXT = new BaseContext(); +//# sourceMappingURL=context.js.map + +/***/ }), + +/***/ 6504: +/***/ ((__unused_webpack_module, exports) => { + +"use strict"; + +/* + * Copyright The OpenTelemetry Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +Object.defineProperty(exports, "__esModule", ({ value: true })); +//# sourceMappingURL=types.js.map + +/***/ }), + +/***/ 7978: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + +"use strict"; + +/* + * Copyright The OpenTelemetry Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.DiagComponentLogger = void 0; +var global_utils_1 = __nccwpck_require__(5135); +/** + * Component Logger which is meant to be used as part of any component which + * will add automatically additional namespace in front of the log message. + * It will then forward all message to global diag logger + * @example + * const cLogger = diag.createComponentLogger({ namespace: '@opentelemetry/instrumentation-http' }); + * cLogger.debug('test'); + * // @opentelemetry/instrumentation-http test + */ +var DiagComponentLogger = /** @class */ (function () { + function DiagComponentLogger(props) { + this._namespace = props.namespace || 'DiagComponentLogger'; + } + DiagComponentLogger.prototype.debug = function () { + var args = []; + for (var _i = 0; _i < arguments.length; _i++) { + args[_i] = arguments[_i]; } - if (this.update) { - permissions.push("u"); + return logProxy('debug', this._namespace, args); + }; + DiagComponentLogger.prototype.error = function () { + var args = []; + for (var _i = 0; _i < arguments.length; _i++) { + args[_i] = arguments[_i]; } - if (this.process) { - permissions.push("p"); + return logProxy('error', this._namespace, args); + }; + DiagComponentLogger.prototype.info = function () { + var args = []; + for (var _i = 0; _i < arguments.length; _i++) { + args[_i] = arguments[_i]; } - if (this.setImmutabilityPolicy) { - permissions.push("i"); + return logProxy('info', this._namespace, args); + }; + DiagComponentLogger.prototype.warn = function () { + var args = []; + for (var _i = 0; _i < arguments.length; _i++) { + args[_i] = arguments[_i]; } - if (this.permanentDelete) { - permissions.push("y"); + return logProxy('warn', this._namespace, args); + }; + DiagComponentLogger.prototype.verbose = function () { + var args = []; + for (var _i = 0; _i < arguments.length; _i++) { + args[_i] = arguments[_i]; } - return permissions.join(""); + return logProxy('verbose', this._namespace, args); + }; + return DiagComponentLogger; +}()); +exports.DiagComponentLogger = DiagComponentLogger; +function logProxy(funcName, namespace, args) { + var logger = global_utils_1.getGlobal('diag'); + // shortcut if logger not set + if (!logger) { + return; } + args.unshift(namespace); + return logger[funcName].apply(logger, args); } +//# sourceMappingURL=ComponentLogger.js.map -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. -/** - * ONLY AVAILABLE IN NODE.JS RUNTIME. +/***/ }), + +/***/ 3041: +/***/ ((__unused_webpack_module, exports) => { + +"use strict"; + +/* + * Copyright The OpenTelemetry Authors * - * This is a helper class to construct a string representing the resources accessible by an AccountSAS. Setting a value - * to true means that any SAS which uses these permissions will grant access to that resource type. Once all the - * values are set, this should be serialized with toString and set as the resources field on an - * {@link AccountSASSignatureValues} object. It is possible to construct the resources string without this class, but - * the order of the resources is particular and this class guarantees correctness. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ -class AccountSASResourceTypes { - constructor() { - /** - * Permission to access service level APIs granted. - */ - this.service = false; - /** - * Permission to access container level APIs (Blob Containers, Tables, Queues, File Shares) granted. - */ - this.container = false; - /** - * Permission to access object level APIs (Blobs, Table Entities, Queue Messages, Files) granted. - */ - this.object = false; - } - /** - * Creates an {@link AccountSASResourceTypes} from the specified resource types string. This method will throw an - * Error if it encounters a character that does not correspond to a valid resource type. - * - * @param resourceTypes - - */ - static parse(resourceTypes) { - const accountSASResourceTypes = new AccountSASResourceTypes(); - for (const c of resourceTypes) { - switch (c) { - case "s": - accountSASResourceTypes.service = true; - break; - case "c": - accountSASResourceTypes.container = true; - break; - case "o": - accountSASResourceTypes.object = true; - break; - default: - throw new RangeError(`Invalid resource type: ${c}`); - } - } - return accountSASResourceTypes; - } - /** - * Converts the given resource types to a string. - * - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas - * - */ - toString() { - const resourceTypes = []; - if (this.service) { - resourceTypes.push("s"); - } - if (this.container) { - resourceTypes.push("c"); +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.DiagConsoleLogger = void 0; +var consoleMap = [ + { n: 'error', c: 'error' }, + { n: 'warn', c: 'warn' }, + { n: 'info', c: 'info' }, + { n: 'debug', c: 'debug' }, + { n: 'verbose', c: 'trace' }, +]; +/** + * A simple Immutable Console based diagnostic logger which will output any messages to the Console. + * If you want to limit the amount of logging to a specific level or lower use the + * {@link createLogLevelDiagLogger} + */ +var DiagConsoleLogger = /** @class */ (function () { + function DiagConsoleLogger() { + function _consoleFunc(funcName) { + return function () { + var args = []; + for (var _i = 0; _i < arguments.length; _i++) { + args[_i] = arguments[_i]; + } + if (console) { + // Some environments only expose the console when the F12 developer console is open + // eslint-disable-next-line no-console + var theFunc = console[funcName]; + if (typeof theFunc !== 'function') { + // Not all environments support all functions + // eslint-disable-next-line no-console + theFunc = console.log; + } + // One last final check + if (typeof theFunc === 'function') { + return theFunc.apply(console, args); + } + } + }; } - if (this.object) { - resourceTypes.push("o"); + for (var i = 0; i < consoleMap.length; i++) { + this[consoleMap[i].n] = _consoleFunc(consoleMap[i].c); } - return resourceTypes.join(""); } -} + return DiagConsoleLogger; +}()); +exports.DiagConsoleLogger = DiagConsoleLogger; +//# sourceMappingURL=consoleLogger.js.map -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. -/** - * ONLY AVAILABLE IN NODE.JS RUNTIME. +/***/ }), + +/***/ 1634: +/***/ (function(__unused_webpack_module, exports, __nccwpck_require__) { + +"use strict"; + +/* + * Copyright The OpenTelemetry Authors * - * This is a helper class to construct a string representing the services accessible by an AccountSAS. Setting a value - * to true means that any SAS which uses these permissions will grant access to that service. Once all the - * values are set, this should be serialized with toString and set as the services field on an - * {@link AccountSASSignatureValues} object. It is possible to construct the services string without this class, but - * the order of the services is particular and this class guarantees correctness. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ -class AccountSASServices { - constructor() { - /** - * Permission to access blob resources granted. - */ - this.blob = false; - /** - * Permission to access file resources granted. - */ - this.file = false; - /** - * Permission to access queue resources granted. - */ - this.queue = false; - /** - * Permission to access table resources granted. - */ - this.table = false; +var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } }); +}) : (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + o[k2] = m[k]; +})); +var __exportStar = (this && this.__exportStar) || function(m, exports) { + for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p); +}; +Object.defineProperty(exports, "__esModule", ({ value: true })); +__exportStar(__nccwpck_require__(3041), exports); +__exportStar(__nccwpck_require__(8077), exports); +//# sourceMappingURL=index.js.map + +/***/ }), + +/***/ 9639: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + +"use strict"; + +/* + * Copyright The OpenTelemetry Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.createLogLevelDiagLogger = void 0; +var types_1 = __nccwpck_require__(8077); +function createLogLevelDiagLogger(maxLevel, logger) { + if (maxLevel < types_1.DiagLogLevel.NONE) { + maxLevel = types_1.DiagLogLevel.NONE; } - /** - * Creates an {@link AccountSASServices} from the specified services string. This method will throw an - * Error if it encounters a character that does not correspond to a valid service. - * - * @param services - - */ - static parse(services) { - const accountSASServices = new AccountSASServices(); - for (const c of services) { - switch (c) { - case "b": - accountSASServices.blob = true; - break; - case "f": - accountSASServices.file = true; - break; - case "q": - accountSASServices.queue = true; - break; - case "t": - accountSASServices.table = true; - break; - default: - throw new RangeError(`Invalid service character: ${c}`); - } + else if (maxLevel > types_1.DiagLogLevel.ALL) { + maxLevel = types_1.DiagLogLevel.ALL; + } + // In case the logger is null or undefined + logger = logger || {}; + function _filterFunc(funcName, theLevel) { + var theFunc = logger[funcName]; + if (typeof theFunc === 'function' && maxLevel >= theLevel) { + return theFunc.bind(logger); } - return accountSASServices; + return function () { }; } + return { + error: _filterFunc('error', types_1.DiagLogLevel.ERROR), + warn: _filterFunc('warn', types_1.DiagLogLevel.WARN), + info: _filterFunc('info', types_1.DiagLogLevel.INFO), + debug: _filterFunc('debug', types_1.DiagLogLevel.DEBUG), + verbose: _filterFunc('verbose', types_1.DiagLogLevel.VERBOSE), + }; +} +exports.createLogLevelDiagLogger = createLogLevelDiagLogger; +//# sourceMappingURL=logLevelLogger.js.map + +/***/ }), + +/***/ 8077: +/***/ ((__unused_webpack_module, exports) => { + +"use strict"; + +/* + * Copyright The OpenTelemetry Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.DiagLogLevel = void 0; +/** + * Defines the available internal logging levels for the diagnostic logger, the numeric values + * of the levels are defined to match the original values from the initial LogLevel to avoid + * compatibility/migration issues for any implementation that assume the numeric ordering. + */ +var DiagLogLevel; +(function (DiagLogLevel) { + /** Diagnostic Logging level setting to disable all logging (except and forced logs) */ + DiagLogLevel[DiagLogLevel["NONE"] = 0] = "NONE"; + /** Identifies an error scenario */ + DiagLogLevel[DiagLogLevel["ERROR"] = 30] = "ERROR"; + /** Identifies a warning scenario */ + DiagLogLevel[DiagLogLevel["WARN"] = 50] = "WARN"; + /** General informational log message */ + DiagLogLevel[DiagLogLevel["INFO"] = 60] = "INFO"; + /** General debug log message */ + DiagLogLevel[DiagLogLevel["DEBUG"] = 70] = "DEBUG"; /** - * Converts the given services to a string. - * + * Detailed trace level logging should only be used for development, should only be set + * in a development environment. */ - toString() { - const services = []; - if (this.blob) { - services.push("b"); - } - if (this.table) { - services.push("t"); - } - if (this.queue) { - services.push("q"); - } - if (this.file) { - services.push("f"); - } - return services.join(""); - } -} + DiagLogLevel[DiagLogLevel["VERBOSE"] = 80] = "VERBOSE"; + /** Used to set the logging level to include all logging */ + DiagLogLevel[DiagLogLevel["ALL"] = 9999] = "ALL"; +})(DiagLogLevel = exports.DiagLogLevel || (exports.DiagLogLevel = {})); +//# sourceMappingURL=types.js.map -// Copyright (c) Microsoft Corporation. +/***/ }), + +/***/ 5163: +/***/ (function(__unused_webpack_module, exports, __nccwpck_require__) { + +"use strict"; + +/* + * Copyright The OpenTelemetry Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } }); +}) : (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + o[k2] = m[k]; +})); +var __exportStar = (this && this.__exportStar) || function(m, exports) { + for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p); +}; +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.diag = exports.propagation = exports.trace = exports.context = exports.INVALID_SPAN_CONTEXT = exports.INVALID_TRACEID = exports.INVALID_SPANID = exports.isValidSpanId = exports.isValidTraceId = exports.isSpanContextValid = exports.createTraceState = exports.baggageEntryMetadataFromString = void 0; +__exportStar(__nccwpck_require__(1508), exports); +var utils_1 = __nccwpck_require__(8136); +Object.defineProperty(exports, "baggageEntryMetadataFromString", ({ enumerable: true, get: function () { return utils_1.baggageEntryMetadataFromString; } })); +__exportStar(__nccwpck_require__(4447), exports); +__exportStar(__nccwpck_require__(2358), exports); +__exportStar(__nccwpck_require__(1109), exports); +__exportStar(__nccwpck_require__(1634), exports); +__exportStar(__nccwpck_require__(865), exports); +__exportStar(__nccwpck_require__(7492), exports); +__exportStar(__nccwpck_require__(4023), exports); +__exportStar(__nccwpck_require__(3503), exports); +__exportStar(__nccwpck_require__(2285), exports); +__exportStar(__nccwpck_require__(9671), exports); +__exportStar(__nccwpck_require__(3209), exports); +__exportStar(__nccwpck_require__(5769), exports); +__exportStar(__nccwpck_require__(1424), exports); +__exportStar(__nccwpck_require__(4416), exports); +__exportStar(__nccwpck_require__(955), exports); +__exportStar(__nccwpck_require__(8845), exports); +__exportStar(__nccwpck_require__(6905), exports); +__exportStar(__nccwpck_require__(8384), exports); +var utils_2 = __nccwpck_require__(2615); +Object.defineProperty(exports, "createTraceState", ({ enumerable: true, get: function () { return utils_2.createTraceState; } })); +__exportStar(__nccwpck_require__(891), exports); +__exportStar(__nccwpck_require__(3168), exports); +__exportStar(__nccwpck_require__(1823), exports); +var spancontext_utils_1 = __nccwpck_require__(9745); +Object.defineProperty(exports, "isSpanContextValid", ({ enumerable: true, get: function () { return spancontext_utils_1.isSpanContextValid; } })); +Object.defineProperty(exports, "isValidTraceId", ({ enumerable: true, get: function () { return spancontext_utils_1.isValidTraceId; } })); +Object.defineProperty(exports, "isValidSpanId", ({ enumerable: true, get: function () { return spancontext_utils_1.isValidSpanId; } })); +var invalid_span_constants_1 = __nccwpck_require__(1760); +Object.defineProperty(exports, "INVALID_SPANID", ({ enumerable: true, get: function () { return invalid_span_constants_1.INVALID_SPANID; } })); +Object.defineProperty(exports, "INVALID_TRACEID", ({ enumerable: true, get: function () { return invalid_span_constants_1.INVALID_TRACEID; } })); +Object.defineProperty(exports, "INVALID_SPAN_CONTEXT", ({ enumerable: true, get: function () { return invalid_span_constants_1.INVALID_SPAN_CONTEXT; } })); +__exportStar(__nccwpck_require__(8242), exports); +__exportStar(__nccwpck_require__(6504), exports); +var context_1 = __nccwpck_require__(7171); +/** Entrypoint for context API */ +exports.context = context_1.ContextAPI.getInstance(); +var trace_1 = __nccwpck_require__(1539); +/** Entrypoint for trace API */ +exports.trace = trace_1.TraceAPI.getInstance(); +var propagation_1 = __nccwpck_require__(9909); +/** Entrypoint for propagation API */ +exports.propagation = propagation_1.PropagationAPI.getInstance(); +var diag_1 = __nccwpck_require__(1877); /** - * ONLY AVAILABLE IN NODE.JS RUNTIME. + * Entrypoint for Diag API. + * Defines Diagnostic handler used for internal diagnostic logging operations. + * The default provides a Noop DiagLogger implementation which may be changed via the + * diag.setLogger(logger: DiagLogger) function. + */ +exports.diag = diag_1.DiagAPI.instance(); +exports["default"] = { + trace: exports.trace, + context: exports.context, + propagation: exports.propagation, + diag: exports.diag, +}; +//# sourceMappingURL=index.js.map + +/***/ }), + +/***/ 5135: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + +"use strict"; + +/* + * Copyright The OpenTelemetry Authors * - * Generates a {@link SASQueryParameters} object which contains all SAS query parameters needed to make an actual - * REST request. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas + * https://www.apache.org/licenses/LICENSE-2.0 * - * @param accountSASSignatureValues - - * @param sharedKeyCredential - + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ -function generateAccountSASQueryParameters(accountSASSignatureValues, sharedKeyCredential) { - const version = accountSASSignatureValues.version - ? accountSASSignatureValues.version - : SERVICE_VERSION; - if (accountSASSignatureValues.permissions && - accountSASSignatureValues.permissions.setImmutabilityPolicy && - version < "2020-08-04") { - throw RangeError("'version' must be >= '2020-08-04' when provided 'i' permission."); - } - if (accountSASSignatureValues.permissions && - accountSASSignatureValues.permissions.deleteVersion && - version < "2019-10-10") { - throw RangeError("'version' must be >= '2019-10-10' when provided 'x' permission."); - } - if (accountSASSignatureValues.permissions && - accountSASSignatureValues.permissions.permanentDelete && - version < "2019-10-10") { - throw RangeError("'version' must be >= '2019-10-10' when provided 'y' permission."); - } - if (accountSASSignatureValues.permissions && - accountSASSignatureValues.permissions.tag && - version < "2019-12-12") { - throw RangeError("'version' must be >= '2019-12-12' when provided 't' permission."); - } - if (accountSASSignatureValues.permissions && - accountSASSignatureValues.permissions.filter && - version < "2019-12-12") { - throw RangeError("'version' must be >= '2019-12-12' when provided 'f' permission."); +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.unregisterGlobal = exports.getGlobal = exports.registerGlobal = void 0; +var platform_1 = __nccwpck_require__(9957); +var version_1 = __nccwpck_require__(8996); +var semver_1 = __nccwpck_require__(1522); +var major = version_1.VERSION.split('.')[0]; +var GLOBAL_OPENTELEMETRY_API_KEY = Symbol.for("opentelemetry.js.api." + major); +var _global = platform_1._globalThis; +function registerGlobal(type, instance, diag, allowOverride) { + var _a; + if (allowOverride === void 0) { allowOverride = false; } + var api = (_global[GLOBAL_OPENTELEMETRY_API_KEY] = (_a = _global[GLOBAL_OPENTELEMETRY_API_KEY]) !== null && _a !== void 0 ? _a : { + version: version_1.VERSION, + }); + if (!allowOverride && api[type]) { + // already registered an API of this type + var err = new Error("@opentelemetry/api: Attempted duplicate registration of API: " + type); + diag.error(err.stack || err.message); + return false; } - if (accountSASSignatureValues.encryptionScope && version < "2020-12-06") { - throw RangeError("'version' must be >= '2020-12-06' when provided 'encryptionScope' in SAS."); + if (api.version !== version_1.VERSION) { + // All registered APIs must be of the same version exactly + var err = new Error('@opentelemetry/api: All API registration versions must match'); + diag.error(err.stack || err.message); + return false; } - const parsedPermissions = AccountSASPermissions.parse(accountSASSignatureValues.permissions.toString()); - const parsedServices = AccountSASServices.parse(accountSASSignatureValues.services).toString(); - const parsedResourceTypes = AccountSASResourceTypes.parse(accountSASSignatureValues.resourceTypes).toString(); - let stringToSign; - if (version >= "2020-12-06") { - stringToSign = [ - sharedKeyCredential.accountName, - parsedPermissions, - parsedServices, - parsedResourceTypes, - accountSASSignatureValues.startsOn - ? truncatedISO8061Date(accountSASSignatureValues.startsOn, false) - : "", - truncatedISO8061Date(accountSASSignatureValues.expiresOn, false), - accountSASSignatureValues.ipRange ? ipRangeToString(accountSASSignatureValues.ipRange) : "", - accountSASSignatureValues.protocol ? accountSASSignatureValues.protocol : "", - version, - accountSASSignatureValues.encryptionScope ? accountSASSignatureValues.encryptionScope : "", - "", // Account SAS requires an additional newline character - ].join("\n"); + api[type] = instance; + diag.debug("@opentelemetry/api: Registered a global for " + type + " v" + version_1.VERSION + "."); + return true; +} +exports.registerGlobal = registerGlobal; +function getGlobal(type) { + var _a, _b; + var globalVersion = (_a = _global[GLOBAL_OPENTELEMETRY_API_KEY]) === null || _a === void 0 ? void 0 : _a.version; + if (!globalVersion || !semver_1.isCompatible(globalVersion)) { + return; } - else { - stringToSign = [ - sharedKeyCredential.accountName, - parsedPermissions, - parsedServices, - parsedResourceTypes, - accountSASSignatureValues.startsOn - ? truncatedISO8061Date(accountSASSignatureValues.startsOn, false) - : "", - truncatedISO8061Date(accountSASSignatureValues.expiresOn, false), - accountSASSignatureValues.ipRange ? ipRangeToString(accountSASSignatureValues.ipRange) : "", - accountSASSignatureValues.protocol ? accountSASSignatureValues.protocol : "", - version, - "", // Account SAS requires an additional newline character - ].join("\n"); + return (_b = _global[GLOBAL_OPENTELEMETRY_API_KEY]) === null || _b === void 0 ? void 0 : _b[type]; +} +exports.getGlobal = getGlobal; +function unregisterGlobal(type, diag) { + diag.debug("@opentelemetry/api: Unregistering a global for " + type + " v" + version_1.VERSION + "."); + var api = _global[GLOBAL_OPENTELEMETRY_API_KEY]; + if (api) { + delete api[type]; } - const signature = sharedKeyCredential.computeHMACSHA256(stringToSign); - return new SASQueryParameters(version, signature, parsedPermissions.toString(), parsedServices, parsedResourceTypes, accountSASSignatureValues.protocol, accountSASSignatureValues.startsOn, accountSASSignatureValues.expiresOn, accountSASSignatureValues.ipRange, undefined, undefined, undefined, undefined, undefined, undefined, undefined, undefined, undefined, undefined, accountSASSignatureValues.encryptionScope); } +exports.unregisterGlobal = unregisterGlobal; +//# sourceMappingURL=global-utils.js.map -/** - * A BlobServiceClient represents a Client to the Azure Storage Blob service allowing you - * to manipulate blob containers. +/***/ }), + +/***/ 1522: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + +"use strict"; + +/* + * Copyright The OpenTelemetry Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ -class BlobServiceClient extends StorageClient { - constructor(url, credentialOrPipeline, - // Legacy, no fix for eslint error without breaking. Disable it for this interface. - /* eslint-disable-next-line @azure/azure-sdk/ts-naming-options*/ - options) { - let pipeline; - if (isPipelineLike(credentialOrPipeline)) { - pipeline = credentialOrPipeline; - } - else if ((coreHttp.isNode && credentialOrPipeline instanceof StorageSharedKeyCredential) || - credentialOrPipeline instanceof AnonymousCredential || - coreHttp.isTokenCredential(credentialOrPipeline)) { - pipeline = newPipeline(credentialOrPipeline, options); - } - else { - // The second parameter is undefined. Use anonymous credential - pipeline = newPipeline(new AnonymousCredential(), options); - } - super(url, pipeline); - this.serviceContext = new Service(this.storageClientContext); - } - /** - * - * Creates an instance of BlobServiceClient from connection string. - * - * @param connectionString - Account connection string or a SAS connection string of an Azure storage account. - * [ Note - Account connection string can only be used in NODE.JS runtime. ] - * Account connection string example - - * `DefaultEndpointsProtocol=https;AccountName=myaccount;AccountKey=accountKey;EndpointSuffix=core.windows.net` - * SAS connection string example - - * `BlobEndpoint=https://myaccount.blob.core.windows.net/;QueueEndpoint=https://myaccount.queue.core.windows.net/;FileEndpoint=https://myaccount.file.core.windows.net/;TableEndpoint=https://myaccount.table.core.windows.net/;SharedAccessSignature=sasString` - * @param options - Optional. Options to configure the HTTP pipeline. - */ - static fromConnectionString(connectionString, - // Legacy, no fix for eslint error without breaking. Disable it for this interface. - /* eslint-disable-next-line @azure/azure-sdk/ts-naming-options*/ - options) { - options = options || {}; - const extractedCreds = extractConnectionStringParts(connectionString); - if (extractedCreds.kind === "AccountConnString") { - if (coreHttp.isNode) { - const sharedKeyCredential = new StorageSharedKeyCredential(extractedCreds.accountName, extractedCreds.accountKey); - if (!options.proxyOptions) { - options.proxyOptions = coreHttp.getDefaultProxySettings(extractedCreds.proxyUri); - } - const pipeline = newPipeline(sharedKeyCredential, options); - return new BlobServiceClient(extractedCreds.url, pipeline); - } - else { - throw new Error("Account connection string is only supported in Node.js environment"); - } - } - else if (extractedCreds.kind === "SASConnString") { - const pipeline = newPipeline(new AnonymousCredential(), options); - return new BlobServiceClient(extractedCreds.url + "?" + extractedCreds.accountSas, pipeline); - } - else { - throw new Error("Connection string must be either an Account connection string or a SAS connection string"); - } - } - /** - * Creates a {@link ContainerClient} object - * - * @param containerName - A container name - * @returns A new ContainerClient object for the given container name. - * - * Example usage: - * - * ```js - * const containerClient = blobServiceClient.getContainerClient(""); - * ``` - */ - getContainerClient(containerName) { - return new ContainerClient(appendToURLPath(this.url, encodeURIComponent(containerName)), this.pipeline); - } - /** - * Create a Blob container. - * - * @param containerName - Name of the container to create. - * @param options - Options to configure Container Create operation. - * @returns Container creation response and the corresponding container client. - */ - async createContainer(containerName, options = {}) { - const { span, updatedOptions } = createSpan("BlobServiceClient-createContainer", options); - try { - const containerClient = this.getContainerClient(containerName); - const containerCreateResponse = await containerClient.create(updatedOptions); - return { - containerClient, - containerCreateResponse, - }; - } - catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; - } - finally { - span.end(); - } - } - /** - * Deletes a Blob container. - * - * @param containerName - Name of the container to delete. - * @param options - Options to configure Container Delete operation. - * @returns Container deletion response. - */ - async deleteContainer(containerName, options = {}) { - const { span, updatedOptions } = createSpan("BlobServiceClient-deleteContainer", options); - try { - const containerClient = this.getContainerClient(containerName); - return await containerClient.delete(updatedOptions); - } - catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; - } - finally { - span.end(); - } - } - /** - * Restore a previously deleted Blob container. - * This API is only functional if Container Soft Delete is enabled for the storage account associated with the container. - * - * @param deletedContainerName - Name of the previously deleted container. - * @param deletedContainerVersion - Version of the previously deleted container, used to uniquely identify the deleted container. - * @param options - Options to configure Container Restore operation. - * @returns Container deletion response. - */ - async undeleteContainer(deletedContainerName, deletedContainerVersion, options = {}) { - const { span, updatedOptions } = createSpan("BlobServiceClient-undeleteContainer", options); - try { - const containerClient = this.getContainerClient(options.destinationContainerName || deletedContainerName); - // Hack to access a protected member. - const containerContext = new Container(containerClient["storageClientContext"]); - const containerUndeleteResponse = await containerContext.restore(Object.assign({ deletedContainerName, - deletedContainerVersion }, updatedOptions)); - return { containerClient, containerUndeleteResponse }; - } - catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; - } - finally { - span.end(); - } - } - /** - * Rename an existing Blob Container. - * - * @param sourceContainerName - The name of the source container. - * @param destinationContainerName - The new name of the container. - * @param options - Options to configure Container Rename operation. - */ - /* eslint-disable-next-line @typescript-eslint/ban-ts-comment */ - // @ts-ignore Need to hide this interface for now. Make it public and turn on the live tests for it when the service is ready. - async renameContainer(sourceContainerName, destinationContainerName, options = {}) { - var _a; - const { span, updatedOptions } = createSpan("BlobServiceClient-renameContainer", options); - try { - const containerClient = this.getContainerClient(destinationContainerName); - // Hack to access a protected member. - const containerContext = new Container(containerClient["storageClientContext"]); - const containerRenameResponse = await containerContext.rename(sourceContainerName, Object.assign(Object.assign({}, updatedOptions), { sourceLeaseId: (_a = options.sourceCondition) === null || _a === void 0 ? void 0 : _a.leaseId })); - return { containerClient, containerRenameResponse }; - } - catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; - } - finally { - span.end(); - } - } - /** - * Gets the properties of a storage account’s Blob service, including properties - * for Storage Analytics and CORS (Cross-Origin Resource Sharing) rules. - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-service-properties - * - * @param options - Options to the Service Get Properties operation. - * @returns Response data for the Service Get Properties operation. - */ - async getProperties(options = {}) { - const { span, updatedOptions } = createSpan("BlobServiceClient-getProperties", options); - try { - return await this.serviceContext.getProperties(Object.assign({ abortSignal: options.abortSignal }, convertTracingToRequestOptionsBase(updatedOptions))); - } - catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; - } - finally { - span.end(); - } +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.isCompatible = exports._makeCompatibilityCheck = void 0; +var version_1 = __nccwpck_require__(8996); +var re = /^(\d+)\.(\d+)\.(\d+)(-(.+))?$/; +/** + * Create a function to test an API version to see if it is compatible with the provided ownVersion. + * + * The returned function has the following semantics: + * - Exact match is always compatible + * - Major versions must match exactly + * - 1.x package cannot use global 2.x package + * - 2.x package cannot use global 1.x package + * - The minor version of the API module requesting access to the global API must be less than or equal to the minor version of this API + * - 1.3 package may use 1.4 global because the later global contains all functions 1.3 expects + * - 1.4 package may NOT use 1.3 global because it may try to call functions which don't exist on 1.3 + * - If the major version is 0, the minor version is treated as the major and the patch is treated as the minor + * - Patch and build tag differences are not considered at this time + * + * @param ownVersion version which should be checked against + */ +function _makeCompatibilityCheck(ownVersion) { + var acceptedVersions = new Set([ownVersion]); + var rejectedVersions = new Set(); + var myVersionMatch = ownVersion.match(re); + if (!myVersionMatch) { + // we cannot guarantee compatibility so we always return noop + return function () { return false; }; } - /** - * Sets properties for a storage account’s Blob service endpoint, including properties - * for Storage Analytics, CORS (Cross-Origin Resource Sharing) rules and soft delete settings. - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-service-properties - * - * @param properties - - * @param options - Options to the Service Set Properties operation. - * @returns Response data for the Service Set Properties operation. - */ - async setProperties(properties, options = {}) { - const { span, updatedOptions } = createSpan("BlobServiceClient-setProperties", options); - try { - return await this.serviceContext.setProperties(properties, Object.assign({ abortSignal: options.abortSignal }, convertTracingToRequestOptionsBase(updatedOptions))); - } - catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; - } - finally { - span.end(); - } + var ownVersionParsed = { + major: +myVersionMatch[1], + minor: +myVersionMatch[2], + patch: +myVersionMatch[3], + prerelease: myVersionMatch[4], + }; + // if ownVersion has a prerelease tag, versions must match exactly + if (ownVersionParsed.prerelease != null) { + return function isExactmatch(globalVersion) { + return globalVersion === ownVersion; + }; } - /** - * Retrieves statistics related to replication for the Blob service. It is only - * available on the secondary location endpoint when read-access geo-redundant - * replication is enabled for the storage account. - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-service-stats - * - * @param options - Options to the Service Get Statistics operation. - * @returns Response data for the Service Get Statistics operation. - */ - async getStatistics(options = {}) { - const { span, updatedOptions } = createSpan("BlobServiceClient-getStatistics", options); - try { - return await this.serviceContext.getStatistics(Object.assign({ abortSignal: options.abortSignal }, convertTracingToRequestOptionsBase(updatedOptions))); - } - catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; - } - finally { - span.end(); - } + function _reject(v) { + rejectedVersions.add(v); + return false; } - /** - * The Get Account Information operation returns the sku name and account kind - * for the specified account. - * The Get Account Information operation is available on service versions beginning - * with version 2018-03-28. - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/get-account-information - * - * @param options - Options to the Service Get Account Info operation. - * @returns Response data for the Service Get Account Info operation. - */ - async getAccountInfo(options = {}) { - const { span, updatedOptions } = createSpan("BlobServiceClient-getAccountInfo", options); - try { - return await this.serviceContext.getAccountInfo(Object.assign({ abortSignal: options.abortSignal }, convertTracingToRequestOptionsBase(updatedOptions))); - } - catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; - } - finally { - span.end(); - } + function _accept(v) { + acceptedVersions.add(v); + return true; } - /** - * Returns a list of the containers under the specified account. - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/list-containers2 - * - * @param marker - A string value that identifies the portion of - * the list of containers to be returned with the next listing operation. The - * operation returns the continuationToken value within the response body if the - * listing operation did not return all containers remaining to be listed - * with the current page. The continuationToken value can be used as the value for - * the marker parameter in a subsequent call to request the next page of list - * items. The marker value is opaque to the client. - * @param options - Options to the Service List Container Segment operation. - * @returns Response data for the Service List Container Segment operation. - */ - async listContainersSegment(marker, options = {}) { - const { span, updatedOptions } = createSpan("BlobServiceClient-listContainersSegment", options); - try { - return await this.serviceContext.listContainersSegment(Object.assign(Object.assign(Object.assign({ abortSignal: options.abortSignal, marker }, options), { include: typeof options.include === "string" ? [options.include] : options.include }), convertTracingToRequestOptionsBase(updatedOptions))); - } - catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; + return function isCompatible(globalVersion) { + if (acceptedVersions.has(globalVersion)) { + return true; } - finally { - span.end(); + if (rejectedVersions.has(globalVersion)) { + return false; } - } - /** - * The Filter Blobs operation enables callers to list blobs across all containers whose tags - * match a given search expression. Filter blobs searches across all containers within a - * storage account but can be scoped within the expression to a single container. - * - * @param tagFilterSqlExpression - The where parameter enables the caller to query blobs whose tags match a given expression. - * The given expression must evaluate to true for a blob to be returned in the results. - * The[OData - ABNF] filter syntax rule defines the formal grammar for the value of the where query parameter; - * however, only a subset of the OData filter syntax is supported in the Blob service. - * @param marker - A string value that identifies the portion of - * the list of blobs to be returned with the next listing operation. The - * operation returns the continuationToken value within the response body if the - * listing operation did not return all blobs remaining to be listed - * with the current page. The continuationToken value can be used as the value for - * the marker parameter in a subsequent call to request the next page of list - * items. The marker value is opaque to the client. - * @param options - Options to find blobs by tags. - */ - async findBlobsByTagsSegment(tagFilterSqlExpression, marker, options = {}) { - const { span, updatedOptions } = createSpan("BlobServiceClient-findBlobsByTagsSegment", options); - try { - const response = await this.serviceContext.filterBlobs(Object.assign({ abortSignal: options.abortSignal, where: tagFilterSqlExpression, marker, maxPageSize: options.maxPageSize }, convertTracingToRequestOptionsBase(updatedOptions))); - const wrappedResponse = Object.assign(Object.assign({}, response), { _response: response._response, blobs: response.blobs.map((blob) => { - var _a; - let tagValue = ""; - if (((_a = blob.tags) === null || _a === void 0 ? void 0 : _a.blobTagSet.length) === 1) { - tagValue = blob.tags.blobTagSet[0].value; - } - return Object.assign(Object.assign({}, blob), { tags: toTags(blob.tags), tagValue }); - }) }); - return wrappedResponse; + var globalVersionMatch = globalVersion.match(re); + if (!globalVersionMatch) { + // cannot parse other version + // we cannot guarantee compatibility so we always noop + return _reject(globalVersion); } - catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; + var globalVersionParsed = { + major: +globalVersionMatch[1], + minor: +globalVersionMatch[2], + patch: +globalVersionMatch[3], + prerelease: globalVersionMatch[4], + }; + // if globalVersion has a prerelease tag, versions must match exactly + if (globalVersionParsed.prerelease != null) { + return _reject(globalVersion); } - finally { - span.end(); + // major versions must match + if (ownVersionParsed.major !== globalVersionParsed.major) { + return _reject(globalVersion); } - } - /** - * Returns an AsyncIterableIterator for ServiceFindBlobsByTagsSegmentResponse. - * - * @param tagFilterSqlExpression - The where parameter enables the caller to query blobs whose tags match a given expression. - * The given expression must evaluate to true for a blob to be returned in the results. - * The[OData - ABNF] filter syntax rule defines the formal grammar for the value of the where query parameter; - * however, only a subset of the OData filter syntax is supported in the Blob service. - * @param marker - A string value that identifies the portion of - * the list of blobs to be returned with the next listing operation. The - * operation returns the continuationToken value within the response body if the - * listing operation did not return all blobs remaining to be listed - * with the current page. The continuationToken value can be used as the value for - * the marker parameter in a subsequent call to request the next page of list - * items. The marker value is opaque to the client. - * @param options - Options to find blobs by tags. - */ - findBlobsByTagsSegments(tagFilterSqlExpression, marker, options = {}) { - return tslib.__asyncGenerator(this, arguments, function* findBlobsByTagsSegments_1() { - let response; - if (!!marker || marker === undefined) { - do { - response = yield tslib.__await(this.findBlobsByTagsSegment(tagFilterSqlExpression, marker, options)); - response.blobs = response.blobs || []; - marker = response.continuationToken; - yield yield tslib.__await(response); - } while (marker); - } - }); - } - /** - * Returns an AsyncIterableIterator for blobs. - * - * @param tagFilterSqlExpression - The where parameter enables the caller to query blobs whose tags match a given expression. - * The given expression must evaluate to true for a blob to be returned in the results. - * The[OData - ABNF] filter syntax rule defines the formal grammar for the value of the where query parameter; - * however, only a subset of the OData filter syntax is supported in the Blob service. - * @param options - Options to findBlobsByTagsItems. - */ - findBlobsByTagsItems(tagFilterSqlExpression, options = {}) { - return tslib.__asyncGenerator(this, arguments, function* findBlobsByTagsItems_1() { - var e_1, _a; - let marker; - try { - for (var _b = tslib.__asyncValues(this.findBlobsByTagsSegments(tagFilterSqlExpression, marker, options)), _c; _c = yield tslib.__await(_b.next()), !_c.done;) { - const segment = _c.value; - yield tslib.__await(yield* tslib.__asyncDelegator(tslib.__asyncValues(segment.blobs))); - } - } - catch (e_1_1) { e_1 = { error: e_1_1 }; } - finally { - try { - if (_c && !_c.done && (_a = _b.return)) yield tslib.__await(_a.call(_b)); - } - finally { if (e_1) throw e_1.error; } - } - }); - } - /** - * Returns an async iterable iterator to find all blobs with specified tag - * under the specified account. - * - * .byPage() returns an async iterable iterator to list the blobs in pages. - * - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-service-properties - * - * Example using `for await` syntax: - * - * ```js - * let i = 1; - * for await (const blob of blobServiceClient.findBlobsByTags("tagkey='tagvalue'")) { - * console.log(`Blob ${i++}: ${container.name}`); - * } - * ``` - * - * Example using `iter.next()`: - * - * ```js - * let i = 1; - * const iter = blobServiceClient.findBlobsByTags("tagkey='tagvalue'"); - * let blobItem = await iter.next(); - * while (!blobItem.done) { - * console.log(`Blob ${i++}: ${blobItem.value.name}`); - * blobItem = await iter.next(); - * } - * ``` - * - * Example using `byPage()`: - * - * ```js - * // passing optional maxPageSize in the page settings - * let i = 1; - * for await (const response of blobServiceClient.findBlobsByTags("tagkey='tagvalue'").byPage({ maxPageSize: 20 })) { - * if (response.blobs) { - * for (const blob of response.blobs) { - * console.log(`Blob ${i++}: ${blob.name}`); - * } - * } - * } - * ``` - * - * Example using paging with a marker: - * - * ```js - * let i = 1; - * let iterator = blobServiceClient.findBlobsByTags("tagkey='tagvalue'").byPage({ maxPageSize: 2 }); - * let response = (await iterator.next()).value; - * - * // Prints 2 blob names - * if (response.blobs) { - * for (const blob of response.blobs) { - * console.log(`Blob ${i++}: ${blob.name}`); - * } - * } - * - * // Gets next marker - * let marker = response.continuationToken; - * // Passing next marker as continuationToken - * iterator = blobServiceClient - * .findBlobsByTags("tagkey='tagvalue'") - * .byPage({ continuationToken: marker, maxPageSize: 10 }); - * response = (await iterator.next()).value; - * - * // Prints blob names - * if (response.blobs) { - * for (const blob of response.blobs) { - * console.log(`Blob ${i++}: ${blob.name}`); - * } - * } - * ``` - * - * @param tagFilterSqlExpression - The where parameter enables the caller to query blobs whose tags match a given expression. - * The given expression must evaluate to true for a blob to be returned in the results. - * The[OData - ABNF] filter syntax rule defines the formal grammar for the value of the where query parameter; - * however, only a subset of the OData filter syntax is supported in the Blob service. - * @param options - Options to find blobs by tags. - */ - findBlobsByTags(tagFilterSqlExpression, options = {}) { - // AsyncIterableIterator to iterate over blobs - const listSegmentOptions = Object.assign({}, options); - const iter = this.findBlobsByTagsItems(tagFilterSqlExpression, listSegmentOptions); - return { - /** - * The next method, part of the iteration protocol - */ - next() { - return iter.next(); - }, - /** - * The connection to the async iterator, part of the iteration protocol - */ - [Symbol.asyncIterator]() { - return this; - }, - /** - * Return an AsyncIterableIterator that works a page at a time - */ - byPage: (settings = {}) => { - return this.findBlobsByTagsSegments(tagFilterSqlExpression, settings.continuationToken, Object.assign({ maxPageSize: settings.maxPageSize }, listSegmentOptions)); - }, - }; - } - /** - * Returns an AsyncIterableIterator for ServiceListContainersSegmentResponses - * - * @param marker - A string value that identifies the portion of - * the list of containers to be returned with the next listing operation. The - * operation returns the continuationToken value within the response body if the - * listing operation did not return all containers remaining to be listed - * with the current page. The continuationToken value can be used as the value for - * the marker parameter in a subsequent call to request the next page of list - * items. The marker value is opaque to the client. - * @param options - Options to list containers operation. - */ - listSegments(marker, options = {}) { - return tslib.__asyncGenerator(this, arguments, function* listSegments_1() { - let listContainersSegmentResponse; - if (!!marker || marker === undefined) { - do { - listContainersSegmentResponse = yield tslib.__await(this.listContainersSegment(marker, options)); - listContainersSegmentResponse.containerItems = - listContainersSegmentResponse.containerItems || []; - marker = listContainersSegmentResponse.continuationToken; - yield yield tslib.__await(yield tslib.__await(listContainersSegmentResponse)); - } while (marker); - } - }); - } - /** - * Returns an AsyncIterableIterator for Container Items - * - * @param options - Options to list containers operation. - */ - listItems(options = {}) { - return tslib.__asyncGenerator(this, arguments, function* listItems_1() { - var e_2, _a; - let marker; - try { - for (var _b = tslib.__asyncValues(this.listSegments(marker, options)), _c; _c = yield tslib.__await(_b.next()), !_c.done;) { - const segment = _c.value; - yield tslib.__await(yield* tslib.__asyncDelegator(tslib.__asyncValues(segment.containerItems))); - } - } - catch (e_2_1) { e_2 = { error: e_2_1 }; } - finally { - try { - if (_c && !_c.done && (_a = _b.return)) yield tslib.__await(_a.call(_b)); - } - finally { if (e_2) throw e_2.error; } + if (ownVersionParsed.major === 0) { + if (ownVersionParsed.minor === globalVersionParsed.minor && + ownVersionParsed.patch <= globalVersionParsed.patch) { + return _accept(globalVersion); } - }); + return _reject(globalVersion); + } + if (ownVersionParsed.minor <= globalVersionParsed.minor) { + return _accept(globalVersion); + } + return _reject(globalVersion); + }; +} +exports._makeCompatibilityCheck = _makeCompatibilityCheck; +/** + * Test an API version to see if it is compatible with this API. + * + * - Exact match is always compatible + * - Major versions must match exactly + * - 1.x package cannot use global 2.x package + * - 2.x package cannot use global 1.x package + * - The minor version of the API module requesting access to the global API must be less than or equal to the minor version of this API + * - 1.3 package may use 1.4 global because the later global contains all functions 1.3 expects + * - 1.4 package may NOT use 1.3 global because it may try to call functions which don't exist on 1.3 + * - If the major version is 0, the minor version is treated as the major and the patch is treated as the minor + * - Patch and build tag differences are not considered at this time + * + * @param version version of the API requesting an instance of the global API + */ +exports.isCompatible = _makeCompatibilityCheck(version_1.VERSION); +//# sourceMappingURL=semver.js.map + +/***/ }), + +/***/ 9957: +/***/ (function(__unused_webpack_module, exports, __nccwpck_require__) { + +"use strict"; + +/* + * Copyright The OpenTelemetry Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } }); +}) : (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + o[k2] = m[k]; +})); +var __exportStar = (this && this.__exportStar) || function(m, exports) { + for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p); +}; +Object.defineProperty(exports, "__esModule", ({ value: true })); +__exportStar(__nccwpck_require__(7200), exports); +//# sourceMappingURL=index.js.map + +/***/ }), + +/***/ 9406: +/***/ ((__unused_webpack_module, exports) => { + +"use strict"; + +/* + * Copyright The OpenTelemetry Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports._globalThis = void 0; +/** only globals that common to node and browsers are allowed */ +// eslint-disable-next-line node/no-unsupported-features/es-builtins +exports._globalThis = typeof globalThis === 'object' ? globalThis : global; +//# sourceMappingURL=globalThis.js.map + +/***/ }), + +/***/ 7200: +/***/ (function(__unused_webpack_module, exports, __nccwpck_require__) { + +"use strict"; + +/* + * Copyright The OpenTelemetry Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } }); +}) : (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + o[k2] = m[k]; +})); +var __exportStar = (this && this.__exportStar) || function(m, exports) { + for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p); +}; +Object.defineProperty(exports, "__esModule", ({ value: true })); +__exportStar(__nccwpck_require__(9406), exports); +//# sourceMappingURL=index.js.map + +/***/ }), + +/***/ 2368: +/***/ ((__unused_webpack_module, exports) => { + +"use strict"; + +/* + * Copyright The OpenTelemetry Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.NoopTextMapPropagator = void 0; +/** + * No-op implementations of {@link TextMapPropagator}. + */ +var NoopTextMapPropagator = /** @class */ (function () { + function NoopTextMapPropagator() { } - /** - * Returns an async iterable iterator to list all the containers - * under the specified account. - * - * .byPage() returns an async iterable iterator to list the containers in pages. - * - * Example using `for await` syntax: - * - * ```js - * let i = 1; - * for await (const container of blobServiceClient.listContainers()) { - * console.log(`Container ${i++}: ${container.name}`); - * } - * ``` - * - * Example using `iter.next()`: - * - * ```js - * let i = 1; - * const iter = blobServiceClient.listContainers(); - * let containerItem = await iter.next(); - * while (!containerItem.done) { - * console.log(`Container ${i++}: ${containerItem.value.name}`); - * containerItem = await iter.next(); - * } - * ``` - * - * Example using `byPage()`: - * - * ```js - * // passing optional maxPageSize in the page settings - * let i = 1; - * for await (const response of blobServiceClient.listContainers().byPage({ maxPageSize: 20 })) { - * if (response.containerItems) { - * for (const container of response.containerItems) { - * console.log(`Container ${i++}: ${container.name}`); - * } - * } - * } - * ``` - * - * Example using paging with a marker: - * - * ```js - * let i = 1; - * let iterator = blobServiceClient.listContainers().byPage({ maxPageSize: 2 }); - * let response = (await iterator.next()).value; - * - * // Prints 2 container names - * if (response.containerItems) { - * for (const container of response.containerItems) { - * console.log(`Container ${i++}: ${container.name}`); - * } - * } - * - * // Gets next marker - * let marker = response.continuationToken; - * // Passing next marker as continuationToken - * iterator = blobServiceClient - * .listContainers() - * .byPage({ continuationToken: marker, maxPageSize: 10 }); - * response = (await iterator.next()).value; - * - * // Prints 10 container names - * if (response.containerItems) { - * for (const container of response.containerItems) { - * console.log(`Container ${i++}: ${container.name}`); - * } - * } - * ``` - * - * @param options - Options to list containers. - * @returns An asyncIterableIterator that supports paging. - */ - listContainers(options = {}) { - if (options.prefix === "") { - options.prefix = undefined; - } - const include = []; - if (options.includeDeleted) { - include.push("deleted"); + /** Noop inject function does nothing */ + NoopTextMapPropagator.prototype.inject = function (_context, _carrier) { }; + /** Noop extract function does nothing and returns the input context */ + NoopTextMapPropagator.prototype.extract = function (context, _carrier) { + return context; + }; + NoopTextMapPropagator.prototype.fields = function () { + return []; + }; + return NoopTextMapPropagator; +}()); +exports.NoopTextMapPropagator = NoopTextMapPropagator; +//# sourceMappingURL=NoopTextMapPropagator.js.map + +/***/ }), + +/***/ 865: +/***/ ((__unused_webpack_module, exports) => { + +"use strict"; + +/* + * Copyright The OpenTelemetry Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.defaultTextMapSetter = exports.defaultTextMapGetter = void 0; +exports.defaultTextMapGetter = { + get: function (carrier, key) { + if (carrier == null) { + return undefined; } - if (options.includeMetadata) { - include.push("metadata"); + return carrier[key]; + }, + keys: function (carrier) { + if (carrier == null) { + return []; } - if (options.includeSystem) { - include.push("system"); + return Object.keys(carrier); + }, +}; +exports.defaultTextMapSetter = { + set: function (carrier, key, value) { + if (carrier == null) { + return; } - // AsyncIterableIterator to iterate over containers - const listSegmentOptions = Object.assign(Object.assign({}, options), (include.length > 0 ? { include } : {})); - const iter = this.listItems(listSegmentOptions); - return { - /** - * The next method, part of the iteration protocol - */ - next() { - return iter.next(); - }, - /** - * The connection to the async iterator, part of the iteration protocol - */ - [Symbol.asyncIterator]() { - return this; - }, - /** - * Return an AsyncIterableIterator that works a page at a time - */ - byPage: (settings = {}) => { - return this.listSegments(settings.continuationToken, Object.assign({ maxPageSize: settings.maxPageSize }, listSegmentOptions)); - }, - }; + carrier[key] = value; + }, +}; +//# sourceMappingURL=TextMapPropagator.js.map + +/***/ }), + +/***/ 1462: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + +"use strict"; + +/* + * Copyright The OpenTelemetry Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.NonRecordingSpan = void 0; +var invalid_span_constants_1 = __nccwpck_require__(1760); +/** + * The NonRecordingSpan is the default {@link Span} that is used when no Span + * implementation is available. All operations are no-op including context + * propagation. + */ +var NonRecordingSpan = /** @class */ (function () { + function NonRecordingSpan(_spanContext) { + if (_spanContext === void 0) { _spanContext = invalid_span_constants_1.INVALID_SPAN_CONTEXT; } + this._spanContext = _spanContext; } - /** - * ONLY AVAILABLE WHEN USING BEARER TOKEN AUTHENTICATION (TokenCredential). - * - * Retrieves a user delegation key for the Blob service. This is only a valid operation when using - * bearer token authentication. - * - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/get-user-delegation-key - * - * @param startsOn - The start time for the user delegation SAS. Must be within 7 days of the current time - * @param expiresOn - The end time for the user delegation SAS. Must be within 7 days of the current time - */ - async getUserDelegationKey(startsOn, expiresOn, options = {}) { - const { span, updatedOptions } = createSpan("BlobServiceClient-getUserDelegationKey", options); - try { - const response = await this.serviceContext.getUserDelegationKey({ - startsOn: truncatedISO8061Date(startsOn, false), - expiresOn: truncatedISO8061Date(expiresOn, false), - }, Object.assign({ abortSignal: options.abortSignal }, convertTracingToRequestOptionsBase(updatedOptions))); - const userDelegationKey = { - signedObjectId: response.signedObjectId, - signedTenantId: response.signedTenantId, - signedStartsOn: new Date(response.signedStartsOn), - signedExpiresOn: new Date(response.signedExpiresOn), - signedService: response.signedService, - signedVersion: response.signedVersion, - value: response.value, - }; - const res = Object.assign({ _response: response._response, requestId: response.requestId, clientRequestId: response.clientRequestId, version: response.version, date: response.date, errorCode: response.errorCode }, userDelegationKey); - return res; + // Returns a SpanContext. + NonRecordingSpan.prototype.spanContext = function () { + return this._spanContext; + }; + // By default does nothing + NonRecordingSpan.prototype.setAttribute = function (_key, _value) { + return this; + }; + // By default does nothing + NonRecordingSpan.prototype.setAttributes = function (_attributes) { + return this; + }; + // By default does nothing + NonRecordingSpan.prototype.addEvent = function (_name, _attributes) { + return this; + }; + // By default does nothing + NonRecordingSpan.prototype.setStatus = function (_status) { + return this; + }; + // By default does nothing + NonRecordingSpan.prototype.updateName = function (_name) { + return this; + }; + // By default does nothing + NonRecordingSpan.prototype.end = function (_endTime) { }; + // isRecording always returns false for NonRecordingSpan. + NonRecordingSpan.prototype.isRecording = function () { + return false; + }; + // By default does nothing + NonRecordingSpan.prototype.recordException = function (_exception, _time) { }; + return NonRecordingSpan; +}()); +exports.NonRecordingSpan = NonRecordingSpan; +//# sourceMappingURL=NonRecordingSpan.js.map + +/***/ }), + +/***/ 7606: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + +"use strict"; + +/* + * Copyright The OpenTelemetry Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.NoopTracer = void 0; +var context_1 = __nccwpck_require__(7171); +var context_utils_1 = __nccwpck_require__(3326); +var NonRecordingSpan_1 = __nccwpck_require__(1462); +var spancontext_utils_1 = __nccwpck_require__(9745); +var context = context_1.ContextAPI.getInstance(); +/** + * No-op implementations of {@link Tracer}. + */ +var NoopTracer = /** @class */ (function () { + function NoopTracer() { + } + // startSpan starts a noop span. + NoopTracer.prototype.startSpan = function (name, options, context) { + var root = Boolean(options === null || options === void 0 ? void 0 : options.root); + if (root) { + return new NonRecordingSpan_1.NonRecordingSpan(); } - catch (e) { - span.setStatus({ - code: coreTracing.SpanStatusCode.ERROR, - message: e.message, - }); - throw e; + var parentFromContext = context && context_utils_1.getSpanContext(context); + if (isSpanContext(parentFromContext) && + spancontext_utils_1.isSpanContextValid(parentFromContext)) { + return new NonRecordingSpan_1.NonRecordingSpan(parentFromContext); } - finally { - span.end(); + else { + return new NonRecordingSpan_1.NonRecordingSpan(); } - } - /** - * Creates a BlobBatchClient object to conduct batch operations. - * - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/blob-batch - * - * @returns A new BlobBatchClient object for this service. - */ - getBlobBatchClient() { - return new BlobBatchClient(this.url, this.pipeline); - } - /** - * Only available for BlobServiceClient constructed with a shared key credential. - * - * Generates a Blob account Shared Access Signature (SAS) URI based on the client properties - * and parameters passed in. The SAS is signed by the shared key credential of the client. - * - * @see https://docs.microsoft.com/en-us/rest/api/storageservices/create-account-sas - * - * @param expiresOn - Optional. The time at which the shared access signature becomes invalid. Default to an hour later if not provided. - * @param permissions - Specifies the list of permissions to be associated with the SAS. - * @param resourceTypes - Specifies the resource types associated with the shared access signature. - * @param options - Optional parameters. - * @returns An account SAS URI consisting of the URI to the resource represented by this client, followed by the generated SAS token. - */ - generateAccountSasUrl(expiresOn, permissions = AccountSASPermissions.parse("r"), resourceTypes = "sco", options = {}) { - if (!(this.credential instanceof StorageSharedKeyCredential)) { - throw RangeError("Can only generate the account SAS when the client is initialized with a shared key credential"); + }; + NoopTracer.prototype.startActiveSpan = function (name, arg2, arg3, arg4) { + var opts; + var ctx; + var fn; + if (arguments.length < 2) { + return; } - if (expiresOn === undefined) { - const now = new Date(); - expiresOn = new Date(now.getTime() + 3600 * 1000); + else if (arguments.length === 2) { + fn = arg2; } - const sas = generateAccountSASQueryParameters(Object.assign({ permissions, - expiresOn, - resourceTypes, services: AccountSASServices.parse("b").toString() }, options), this.credential).toString(); - return appendToURLQuery(this.url, sas); - } + else if (arguments.length === 3) { + opts = arg2; + fn = arg3; + } + else { + opts = arg2; + ctx = arg3; + fn = arg4; + } + var parentContext = ctx !== null && ctx !== void 0 ? ctx : context.active(); + var span = this.startSpan(name, opts, parentContext); + var contextWithSpanSet = context_utils_1.setSpan(parentContext, span); + return context.with(contextWithSpanSet, fn, undefined, span); + }; + return NoopTracer; +}()); +exports.NoopTracer = NoopTracer; +function isSpanContext(spanContext) { + return (typeof spanContext === 'object' && + typeof spanContext['spanId'] === 'string' && + typeof spanContext['traceId'] === 'string' && + typeof spanContext['traceFlags'] === 'number'); } +//# sourceMappingURL=NoopTracer.js.map -Object.defineProperty(exports, "BaseRequestPolicy", ({ - enumerable: true, - get: function () { return coreHttp.BaseRequestPolicy; } -})); -Object.defineProperty(exports, "HttpHeaders", ({ - enumerable: true, - get: function () { return coreHttp.HttpHeaders; } -})); -Object.defineProperty(exports, "RequestPolicyOptions", ({ - enumerable: true, - get: function () { return coreHttp.RequestPolicyOptions; } -})); -Object.defineProperty(exports, "RestError", ({ - enumerable: true, - get: function () { return coreHttp.RestError; } -})); -Object.defineProperty(exports, "WebResource", ({ - enumerable: true, - get: function () { return coreHttp.WebResource; } -})); -Object.defineProperty(exports, "deserializationPolicy", ({ - enumerable: true, - get: function () { return coreHttp.deserializationPolicy; } -})); -exports.AccountSASPermissions = AccountSASPermissions; -exports.AccountSASResourceTypes = AccountSASResourceTypes; -exports.AccountSASServices = AccountSASServices; -exports.AnonymousCredential = AnonymousCredential; -exports.AnonymousCredentialPolicy = AnonymousCredentialPolicy; -exports.AppendBlobClient = AppendBlobClient; -exports.BlobBatch = BlobBatch; -exports.BlobBatchClient = BlobBatchClient; -exports.BlobClient = BlobClient; -exports.BlobLeaseClient = BlobLeaseClient; -exports.BlobSASPermissions = BlobSASPermissions; -exports.BlobServiceClient = BlobServiceClient; -exports.BlockBlobClient = BlockBlobClient; -exports.ContainerClient = ContainerClient; -exports.ContainerSASPermissions = ContainerSASPermissions; -exports.Credential = Credential; -exports.CredentialPolicy = CredentialPolicy; -exports.PageBlobClient = PageBlobClient; -exports.Pipeline = Pipeline; -exports.SASQueryParameters = SASQueryParameters; -exports.StorageBrowserPolicy = StorageBrowserPolicy; -exports.StorageBrowserPolicyFactory = StorageBrowserPolicyFactory; -exports.StorageOAuthScopes = StorageOAuthScopes; -exports.StorageRetryPolicy = StorageRetryPolicy; -exports.StorageRetryPolicyFactory = StorageRetryPolicyFactory; -exports.StorageSharedKeyCredential = StorageSharedKeyCredential; -exports.StorageSharedKeyCredentialPolicy = StorageSharedKeyCredentialPolicy; -exports.generateAccountSASQueryParameters = generateAccountSASQueryParameters; -exports.generateBlobSASQueryParameters = generateBlobSASQueryParameters; -exports.isPipelineLike = isPipelineLike; -exports.logger = logger; -exports.newPipeline = newPipeline; -//# sourceMappingURL=index.js.map +/***/ }), + +/***/ 3259: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + +"use strict"; +/* + * Copyright The OpenTelemetry Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.NoopTracerProvider = void 0; +var NoopTracer_1 = __nccwpck_require__(7606); +/** + * An implementation of the {@link TracerProvider} which returns an impotent + * Tracer for all calls to `getTracer`. + * + * All operations are no-op. + */ +var NoopTracerProvider = /** @class */ (function () { + function NoopTracerProvider() { + } + NoopTracerProvider.prototype.getTracer = function (_name, _version, _options) { + return new NoopTracer_1.NoopTracer(); + }; + return NoopTracerProvider; +}()); +exports.NoopTracerProvider = NoopTracerProvider; +//# sourceMappingURL=NoopTracerProvider.js.map /***/ }), -/***/ 7171: -/***/ (function(__unused_webpack_module, exports, __nccwpck_require__) { +/***/ 3503: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { "use strict"; @@ -47574,88 +46913,115 @@ exports.newPipeline = newPipeline; * See the License for the specific language governing permissions and * limitations under the License. */ -var __spreadArray = (this && this.__spreadArray) || function (to, from) { - for (var i = 0, il = from.length, j = to.length; i < il; i++, j++) - to[j] = from[i]; - return to; -}; Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.ContextAPI = void 0; -var NoopContextManager_1 = __nccwpck_require__(4118); -var global_utils_1 = __nccwpck_require__(5135); -var diag_1 = __nccwpck_require__(1877); -var API_NAME = 'context'; -var NOOP_CONTEXT_MANAGER = new NoopContextManager_1.NoopContextManager(); +exports.ProxyTracer = void 0; +var NoopTracer_1 = __nccwpck_require__(7606); +var NOOP_TRACER = new NoopTracer_1.NoopTracer(); /** - * Singleton object which represents the entry point to the OpenTelemetry Context API + * Proxy tracer provided by the proxy tracer provider */ -var ContextAPI = /** @class */ (function () { - /** Empty private constructor prevents end users from constructing a new instance of the API */ - function ContextAPI() { +var ProxyTracer = /** @class */ (function () { + function ProxyTracer(_provider, name, version, options) { + this._provider = _provider; + this.name = name; + this.version = version; + this.options = options; } - /** Get the singleton instance of the Context API */ - ContextAPI.getInstance = function () { - if (!this._instance) { - this._instance = new ContextAPI(); - } - return this._instance; + ProxyTracer.prototype.startSpan = function (name, options, context) { + return this._getTracer().startSpan(name, options, context); }; - /** - * Set the current context manager. - * - * @returns true if the context manager was successfully registered, else false - */ - ContextAPI.prototype.setGlobalContextManager = function (contextManager) { - return global_utils_1.registerGlobal(API_NAME, contextManager, diag_1.DiagAPI.instance()); + ProxyTracer.prototype.startActiveSpan = function (_name, _options, _context, _fn) { + var tracer = this._getTracer(); + return Reflect.apply(tracer.startActiveSpan, tracer, arguments); }; /** - * Get the currently active context + * Try to get a tracer from the proxy tracer provider. + * If the proxy tracer provider has no delegate, return a noop tracer. */ - ContextAPI.prototype.active = function () { - return this._getContextManager().active(); + ProxyTracer.prototype._getTracer = function () { + if (this._delegate) { + return this._delegate; + } + var tracer = this._provider.getDelegateTracer(this.name, this.version, this.options); + if (!tracer) { + return NOOP_TRACER; + } + this._delegate = tracer; + return this._delegate; }; + return ProxyTracer; +}()); +exports.ProxyTracer = ProxyTracer; +//# sourceMappingURL=ProxyTracer.js.map + +/***/ }), + +/***/ 2285: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + +"use strict"; + +/* + * Copyright The OpenTelemetry Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.ProxyTracerProvider = void 0; +var ProxyTracer_1 = __nccwpck_require__(3503); +var NoopTracerProvider_1 = __nccwpck_require__(3259); +var NOOP_TRACER_PROVIDER = new NoopTracerProvider_1.NoopTracerProvider(); +/** + * Tracer provider which provides {@link ProxyTracer}s. + * + * Before a delegate is set, tracers provided are NoOp. + * When a delegate is set, traces are provided from the delegate. + * When a delegate is set after tracers have already been provided, + * all tracers already provided will use the provided delegate implementation. + */ +var ProxyTracerProvider = /** @class */ (function () { + function ProxyTracerProvider() { + } /** - * Execute a function with an active context - * - * @param context context to be active during function execution - * @param fn function to execute in a context - * @param thisArg optional receiver to be used for calling fn - * @param args optional arguments forwarded to fn + * Get a {@link ProxyTracer} */ - ContextAPI.prototype.with = function (context, fn, thisArg) { + ProxyTracerProvider.prototype.getTracer = function (name, version, options) { var _a; - var args = []; - for (var _i = 3; _i < arguments.length; _i++) { - args[_i - 3] = arguments[_i]; - } - return (_a = this._getContextManager()).with.apply(_a, __spreadArray([context, fn, thisArg], args)); + return ((_a = this.getDelegateTracer(name, version, options)) !== null && _a !== void 0 ? _a : new ProxyTracer_1.ProxyTracer(this, name, version, options)); + }; + ProxyTracerProvider.prototype.getDelegate = function () { + var _a; + return (_a = this._delegate) !== null && _a !== void 0 ? _a : NOOP_TRACER_PROVIDER; }; /** - * Bind a context to a target function or event emitter - * - * @param context context to bind to the event emitter or function. Defaults to the currently active context - * @param target function or event emitter to bind + * Set the delegate tracer provider */ - ContextAPI.prototype.bind = function (context, target) { - return this._getContextManager().bind(context, target); - }; - ContextAPI.prototype._getContextManager = function () { - return global_utils_1.getGlobal(API_NAME) || NOOP_CONTEXT_MANAGER; + ProxyTracerProvider.prototype.setDelegate = function (delegate) { + this._delegate = delegate; }; - /** Disable and remove the global context manager */ - ContextAPI.prototype.disable = function () { - this._getContextManager().disable(); - global_utils_1.unregisterGlobal(API_NAME, diag_1.DiagAPI.instance()); + ProxyTracerProvider.prototype.getDelegateTracer = function (name, version, options) { + var _a; + return (_a = this._delegate) === null || _a === void 0 ? void 0 : _a.getTracer(name, version, options); }; - return ContextAPI; + return ProxyTracerProvider; }()); -exports.ContextAPI = ContextAPI; -//# sourceMappingURL=context.js.map +exports.ProxyTracerProvider = ProxyTracerProvider; +//# sourceMappingURL=ProxyTracerProvider.js.map /***/ }), -/***/ 1877: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { +/***/ 9671: +/***/ ((__unused_webpack_module, exports) => { "use strict"; @@ -47675,86 +47041,110 @@ exports.ContextAPI = ContextAPI; * limitations under the License. */ Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.DiagAPI = void 0; -var ComponentLogger_1 = __nccwpck_require__(7978); -var logLevelLogger_1 = __nccwpck_require__(9639); -var types_1 = __nccwpck_require__(8077); -var global_utils_1 = __nccwpck_require__(5135); -var API_NAME = 'diag'; +//# sourceMappingURL=Sampler.js.map + +/***/ }), + +/***/ 3209: +/***/ ((__unused_webpack_module, exports) => { + +"use strict"; + +/* + * Copyright The OpenTelemetry Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.SamplingDecision = void 0; /** - * Singleton object which represents the entry point to the OpenTelemetry internal - * diagnostic API + * @deprecated use the one declared in @opentelemetry/sdk-trace-base instead. + * A sampling decision that determines how a {@link Span} will be recorded + * and collected. */ -var DiagAPI = /** @class */ (function () { +var SamplingDecision; +(function (SamplingDecision) { /** - * Private internal constructor - * @private + * `Span.isRecording() === false`, span will not be recorded and all events + * and attributes will be dropped. */ - function DiagAPI() { - function _logProxy(funcName) { - return function () { - var args = []; - for (var _i = 0; _i < arguments.length; _i++) { - args[_i] = arguments[_i]; - } - var logger = global_utils_1.getGlobal('diag'); - // shortcut if logger not set - if (!logger) - return; - return logger[funcName].apply(logger, args); - }; - } - // Using self local variable for minification purposes as 'this' cannot be minified - var self = this; - // DiagAPI specific functions - self.setLogger = function (logger, logLevel) { - var _a, _b; - if (logLevel === void 0) { logLevel = types_1.DiagLogLevel.INFO; } - if (logger === self) { - // There isn't much we can do here. - // Logging to the console might break the user application. - // Try to log to self. If a logger was previously registered it will receive the log. - var err = new Error('Cannot use diag as the logger for itself. Please use a DiagLogger implementation like ConsoleDiagLogger or a custom implementation'); - self.error((_a = err.stack) !== null && _a !== void 0 ? _a : err.message); - return false; - } - var oldLogger = global_utils_1.getGlobal('diag'); - var newLogger = logLevelLogger_1.createLogLevelDiagLogger(logLevel, logger); - // There already is an logger registered. We'll let it know before overwriting it. - if (oldLogger) { - var stack = (_b = new Error().stack) !== null && _b !== void 0 ? _b : ''; - oldLogger.warn("Current logger will be overwritten from " + stack); - newLogger.warn("Current logger will overwrite one already registered from " + stack); - } - return global_utils_1.registerGlobal('diag', newLogger, self, true); - }; - self.disable = function () { - global_utils_1.unregisterGlobal(API_NAME, self); - }; - self.createComponentLogger = function (options) { - return new ComponentLogger_1.DiagComponentLogger(options); - }; - self.verbose = _logProxy('verbose'); - self.debug = _logProxy('debug'); - self.info = _logProxy('info'); - self.warn = _logProxy('warn'); - self.error = _logProxy('error'); - } - /** Get the singleton instance of the DiagAPI API */ - DiagAPI.instance = function () { - if (!this._instance) { - this._instance = new DiagAPI(); - } - return this._instance; - }; - return DiagAPI; -}()); -exports.DiagAPI = DiagAPI; -//# sourceMappingURL=diag.js.map + SamplingDecision[SamplingDecision["NOT_RECORD"] = 0] = "NOT_RECORD"; + /** + * `Span.isRecording() === true`, but `Sampled` flag in {@link TraceFlags} + * MUST NOT be set. + */ + SamplingDecision[SamplingDecision["RECORD"] = 1] = "RECORD"; + /** + * `Span.isRecording() === true` AND `Sampled` flag in {@link TraceFlags} + * MUST be set. + */ + SamplingDecision[SamplingDecision["RECORD_AND_SAMPLED"] = 2] = "RECORD_AND_SAMPLED"; +})(SamplingDecision = exports.SamplingDecision || (exports.SamplingDecision = {})); +//# sourceMappingURL=SamplingResult.js.map /***/ }), -/***/ 9909: +/***/ 955: +/***/ ((__unused_webpack_module, exports) => { + +"use strict"; + +/* + * Copyright The OpenTelemetry Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +Object.defineProperty(exports, "__esModule", ({ value: true })); +//# sourceMappingURL=SpanOptions.js.map + +/***/ }), + +/***/ 7492: +/***/ ((__unused_webpack_module, exports) => { + +"use strict"; + +/* + * Copyright The OpenTelemetry Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +Object.defineProperty(exports, "__esModule", ({ value: true })); +//# sourceMappingURL=attributes.js.map + +/***/ }), + +/***/ 3326: /***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { "use strict"; @@ -47775,85 +47165,188 @@ exports.DiagAPI = DiagAPI; * limitations under the License. */ Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.PropagationAPI = void 0; -var global_utils_1 = __nccwpck_require__(5135); -var NoopTextMapPropagator_1 = __nccwpck_require__(2368); -var TextMapPropagator_1 = __nccwpck_require__(865); -var context_helpers_1 = __nccwpck_require__(7682); -var utils_1 = __nccwpck_require__(8136); -var diag_1 = __nccwpck_require__(1877); -var API_NAME = 'propagation'; -var NOOP_TEXT_MAP_PROPAGATOR = new NoopTextMapPropagator_1.NoopTextMapPropagator(); +exports.getSpanContext = exports.setSpanContext = exports.deleteSpan = exports.setSpan = exports.getActiveSpan = exports.getSpan = void 0; +var context_1 = __nccwpck_require__(8242); +var NonRecordingSpan_1 = __nccwpck_require__(1462); +var context_2 = __nccwpck_require__(7171); /** - * Singleton object which represents the entry point to the OpenTelemetry Propagation API + * span key */ -var PropagationAPI = /** @class */ (function () { - /** Empty private constructor prevents end users from constructing a new instance of the API */ - function PropagationAPI() { - this.createBaggage = utils_1.createBaggage; - this.getBaggage = context_helpers_1.getBaggage; - this.setBaggage = context_helpers_1.setBaggage; - this.deleteBaggage = context_helpers_1.deleteBaggage; +var SPAN_KEY = context_1.createContextKey('OpenTelemetry Context Key SPAN'); +/** + * Return the span if one exists + * + * @param context context to get span from + */ +function getSpan(context) { + return context.getValue(SPAN_KEY) || undefined; +} +exports.getSpan = getSpan; +/** + * Gets the span from the current context, if one exists. + */ +function getActiveSpan() { + return getSpan(context_2.ContextAPI.getInstance().active()); +} +exports.getActiveSpan = getActiveSpan; +/** + * Set the span on a context + * + * @param context context to use as parent + * @param span span to set active + */ +function setSpan(context, span) { + return context.setValue(SPAN_KEY, span); +} +exports.setSpan = setSpan; +/** + * Remove current span stored in the context + * + * @param context context to delete span from + */ +function deleteSpan(context) { + return context.deleteValue(SPAN_KEY); +} +exports.deleteSpan = deleteSpan; +/** + * Wrap span context in a NoopSpan and set as span in a new + * context + * + * @param context context to set active span on + * @param spanContext span context to be wrapped + */ +function setSpanContext(context, spanContext) { + return setSpan(context, new NonRecordingSpan_1.NonRecordingSpan(spanContext)); +} +exports.setSpanContext = setSpanContext; +/** + * Get the span context of the span if it exists. + * + * @param context context to get values from + */ +function getSpanContext(context) { + var _a; + return (_a = getSpan(context)) === null || _a === void 0 ? void 0 : _a.spanContext(); +} +exports.getSpanContext = getSpanContext; +//# sourceMappingURL=context-utils.js.map + +/***/ }), + +/***/ 2110: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + +"use strict"; + +/* + * Copyright The OpenTelemetry Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.TraceStateImpl = void 0; +var tracestate_validators_1 = __nccwpck_require__(4864); +var MAX_TRACE_STATE_ITEMS = 32; +var MAX_TRACE_STATE_LEN = 512; +var LIST_MEMBERS_SEPARATOR = ','; +var LIST_MEMBER_KEY_VALUE_SPLITTER = '='; +/** + * TraceState must be a class and not a simple object type because of the spec + * requirement (https://www.w3.org/TR/trace-context/#tracestate-field). + * + * Here is the list of allowed mutations: + * - New key-value pair should be added into the beginning of the list + * - The value of any key can be updated. Modified keys MUST be moved to the + * beginning of the list. + */ +var TraceStateImpl = /** @class */ (function () { + function TraceStateImpl(rawTraceState) { + this._internalState = new Map(); + if (rawTraceState) + this._parse(rawTraceState); } - /** Get the singleton instance of the Propagator API */ - PropagationAPI.getInstance = function () { - if (!this._instance) { - this._instance = new PropagationAPI(); + TraceStateImpl.prototype.set = function (key, value) { + // TODO: Benchmark the different approaches(map vs list) and + // use the faster one. + var traceState = this._clone(); + if (traceState._internalState.has(key)) { + traceState._internalState.delete(key); } - return this._instance; - }; - /** - * Set the current propagator. - * - * @returns true if the propagator was successfully registered, else false - */ - PropagationAPI.prototype.setGlobalPropagator = function (propagator) { - return global_utils_1.registerGlobal(API_NAME, propagator, diag_1.DiagAPI.instance()); - }; - /** - * Inject context into a carrier to be propagated inter-process - * - * @param context Context carrying tracing data to inject - * @param carrier carrier to inject context into - * @param setter Function used to set values on the carrier - */ - PropagationAPI.prototype.inject = function (context, carrier, setter) { - if (setter === void 0) { setter = TextMapPropagator_1.defaultTextMapSetter; } - return this._getGlobalPropagator().inject(context, carrier, setter); + traceState._internalState.set(key, value); + return traceState; }; - /** - * Extract context from a carrier - * - * @param context Context which the newly created context will inherit from - * @param carrier Carrier to extract context from - * @param getter Function used to extract keys from a carrier - */ - PropagationAPI.prototype.extract = function (context, carrier, getter) { - if (getter === void 0) { getter = TextMapPropagator_1.defaultTextMapGetter; } - return this._getGlobalPropagator().extract(context, carrier, getter); + TraceStateImpl.prototype.unset = function (key) { + var traceState = this._clone(); + traceState._internalState.delete(key); + return traceState; }; - /** - * Return a list of all fields which may be used by the propagator. - */ - PropagationAPI.prototype.fields = function () { - return this._getGlobalPropagator().fields(); + TraceStateImpl.prototype.get = function (key) { + return this._internalState.get(key); }; - /** Remove the global propagator */ - PropagationAPI.prototype.disable = function () { - global_utils_1.unregisterGlobal(API_NAME, diag_1.DiagAPI.instance()); + TraceStateImpl.prototype.serialize = function () { + var _this = this; + return this._keys() + .reduce(function (agg, key) { + agg.push(key + LIST_MEMBER_KEY_VALUE_SPLITTER + _this.get(key)); + return agg; + }, []) + .join(LIST_MEMBERS_SEPARATOR); }; - PropagationAPI.prototype._getGlobalPropagator = function () { - return global_utils_1.getGlobal(API_NAME) || NOOP_TEXT_MAP_PROPAGATOR; + TraceStateImpl.prototype._parse = function (rawTraceState) { + if (rawTraceState.length > MAX_TRACE_STATE_LEN) + return; + this._internalState = rawTraceState + .split(LIST_MEMBERS_SEPARATOR) + .reverse() // Store in reverse so new keys (.set(...)) will be placed at the beginning + .reduce(function (agg, part) { + var listMember = part.trim(); // Optional Whitespace (OWS) handling + var i = listMember.indexOf(LIST_MEMBER_KEY_VALUE_SPLITTER); + if (i !== -1) { + var key = listMember.slice(0, i); + var value = listMember.slice(i + 1, part.length); + if (tracestate_validators_1.validateKey(key) && tracestate_validators_1.validateValue(value)) { + agg.set(key, value); + } + else { + // TODO: Consider to add warning log + } + } + return agg; + }, new Map()); + // Because of the reverse() requirement, trunc must be done after map is created + if (this._internalState.size > MAX_TRACE_STATE_ITEMS) { + this._internalState = new Map(Array.from(this._internalState.entries()) + .reverse() // Use reverse same as original tracestate parse chain + .slice(0, MAX_TRACE_STATE_ITEMS)); + } }; - return PropagationAPI; + TraceStateImpl.prototype._keys = function () { + return Array.from(this._internalState.keys()).reverse(); + }; + TraceStateImpl.prototype._clone = function () { + var traceState = new TraceStateImpl(); + traceState._internalState = new Map(this._internalState); + return traceState; + }; + return TraceStateImpl; }()); -exports.PropagationAPI = PropagationAPI; -//# sourceMappingURL=propagation.js.map +exports.TraceStateImpl = TraceStateImpl; +//# sourceMappingURL=tracestate-impl.js.map /***/ }), -/***/ 1539: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { +/***/ 4864: +/***/ ((__unused_webpack_module, exports) => { "use strict"; @@ -47873,73 +47366,39 @@ exports.PropagationAPI = PropagationAPI; * limitations under the License. */ Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.TraceAPI = void 0; -var global_utils_1 = __nccwpck_require__(5135); -var ProxyTracerProvider_1 = __nccwpck_require__(2285); -var spancontext_utils_1 = __nccwpck_require__(9745); -var context_utils_1 = __nccwpck_require__(3326); -var diag_1 = __nccwpck_require__(1877); -var API_NAME = 'trace'; +exports.validateValue = exports.validateKey = void 0; +var VALID_KEY_CHAR_RANGE = '[_0-9a-z-*/]'; +var VALID_KEY = "[a-z]" + VALID_KEY_CHAR_RANGE + "{0,255}"; +var VALID_VENDOR_KEY = "[a-z0-9]" + VALID_KEY_CHAR_RANGE + "{0,240}@[a-z]" + VALID_KEY_CHAR_RANGE + "{0,13}"; +var VALID_KEY_REGEX = new RegExp("^(?:" + VALID_KEY + "|" + VALID_VENDOR_KEY + ")$"); +var VALID_VALUE_BASE_REGEX = /^[ -~]{0,255}[!-~]$/; +var INVALID_VALUE_COMMA_EQUAL_REGEX = /,|=/; /** - * Singleton object which represents the entry point to the OpenTelemetry Tracing API + * Key is opaque string up to 256 characters printable. It MUST begin with a + * lowercase letter, and can only contain lowercase letters a-z, digits 0-9, + * underscores _, dashes -, asterisks *, and forward slashes /. + * For multi-tenant vendor scenarios, an at sign (@) can be used to prefix the + * vendor name. Vendors SHOULD set the tenant ID at the beginning of the key. + * see https://www.w3.org/TR/trace-context/#key */ -var TraceAPI = /** @class */ (function () { - /** Empty private constructor prevents end users from constructing a new instance of the API */ - function TraceAPI() { - this._proxyTracerProvider = new ProxyTracerProvider_1.ProxyTracerProvider(); - this.wrapSpanContext = spancontext_utils_1.wrapSpanContext; - this.isSpanContextValid = spancontext_utils_1.isSpanContextValid; - this.deleteSpan = context_utils_1.deleteSpan; - this.getSpan = context_utils_1.getSpan; - this.getActiveSpan = context_utils_1.getActiveSpan; - this.getSpanContext = context_utils_1.getSpanContext; - this.setSpan = context_utils_1.setSpan; - this.setSpanContext = context_utils_1.setSpanContext; - } - /** Get the singleton instance of the Trace API */ - TraceAPI.getInstance = function () { - if (!this._instance) { - this._instance = new TraceAPI(); - } - return this._instance; - }; - /** - * Set the current global tracer. - * - * @returns true if the tracer provider was successfully registered, else false - */ - TraceAPI.prototype.setGlobalTracerProvider = function (provider) { - var success = global_utils_1.registerGlobal(API_NAME, this._proxyTracerProvider, diag_1.DiagAPI.instance()); - if (success) { - this._proxyTracerProvider.setDelegate(provider); - } - return success; - }; - /** - * Returns the global tracer provider. - */ - TraceAPI.prototype.getTracerProvider = function () { - return global_utils_1.getGlobal(API_NAME) || this._proxyTracerProvider; - }; - /** - * Returns a tracer from the global tracer provider. - */ - TraceAPI.prototype.getTracer = function (name, version) { - return this.getTracerProvider().getTracer(name, version); - }; - /** Remove the global tracer provider */ - TraceAPI.prototype.disable = function () { - global_utils_1.unregisterGlobal(API_NAME, diag_1.DiagAPI.instance()); - this._proxyTracerProvider = new ProxyTracerProvider_1.ProxyTracerProvider(); - }; - return TraceAPI; -}()); -exports.TraceAPI = TraceAPI; -//# sourceMappingURL=trace.js.map +function validateKey(key) { + return VALID_KEY_REGEX.test(key); +} +exports.validateKey = validateKey; +/** + * Value is opaque string up to 256 characters printable ASCII RFC0020 + * characters (i.e., the range 0x20 to 0x7E) except comma , and =. + */ +function validateValue(value) { + return (VALID_VALUE_BASE_REGEX.test(value) && + !INVALID_VALUE_COMMA_EQUAL_REGEX.test(value)); +} +exports.validateValue = validateValue; +//# sourceMappingURL=tracestate-validators.js.map /***/ }), -/***/ 7682: +/***/ 2615: /***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { "use strict"; @@ -47960,47 +47419,18 @@ exports.TraceAPI = TraceAPI; * limitations under the License. */ Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.deleteBaggage = exports.setBaggage = exports.getBaggage = void 0; -var context_1 = __nccwpck_require__(8242); -/** - * Baggage key - */ -var BAGGAGE_KEY = context_1.createContextKey('OpenTelemetry Baggage Key'); -/** - * Retrieve the current baggage from the given context - * - * @param {Context} Context that manage all context values - * @returns {Baggage} Extracted baggage from the context - */ -function getBaggage(context) { - return context.getValue(BAGGAGE_KEY) || undefined; -} -exports.getBaggage = getBaggage; -/** - * Store a baggage in the given context - * - * @param {Context} Context that manage all context values - * @param {Baggage} baggage that will be set in the actual context - */ -function setBaggage(context, baggage) { - return context.setValue(BAGGAGE_KEY, baggage); -} -exports.setBaggage = setBaggage; -/** - * Delete the baggage stored in the given context - * - * @param {Context} Context that manage all context values - */ -function deleteBaggage(context) { - return context.deleteValue(BAGGAGE_KEY); +exports.createTraceState = void 0; +var tracestate_impl_1 = __nccwpck_require__(2110); +function createTraceState(rawTraceState) { + return new tracestate_impl_1.TraceStateImpl(rawTraceState); } -exports.deleteBaggage = deleteBaggage; -//# sourceMappingURL=context-helpers.js.map +exports.createTraceState = createTraceState; +//# sourceMappingURL=utils.js.map /***/ }), -/***/ 4811: -/***/ ((__unused_webpack_module, exports) => { +/***/ 1760: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { "use strict"; @@ -48020,57 +47450,20 @@ exports.deleteBaggage = deleteBaggage; * limitations under the License. */ Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.BaggageImpl = void 0; -var BaggageImpl = /** @class */ (function () { - function BaggageImpl(entries) { - this._entries = entries ? new Map(entries) : new Map(); - } - BaggageImpl.prototype.getEntry = function (key) { - var entry = this._entries.get(key); - if (!entry) { - return undefined; - } - return Object.assign({}, entry); - }; - BaggageImpl.prototype.getAllEntries = function () { - return Array.from(this._entries.entries()).map(function (_a) { - var k = _a[0], v = _a[1]; - return [k, v]; - }); - }; - BaggageImpl.prototype.setEntry = function (key, entry) { - var newBaggage = new BaggageImpl(this._entries); - newBaggage._entries.set(key, entry); - return newBaggage; - }; - BaggageImpl.prototype.removeEntry = function (key) { - var newBaggage = new BaggageImpl(this._entries); - newBaggage._entries.delete(key); - return newBaggage; - }; - BaggageImpl.prototype.removeEntries = function () { - var keys = []; - for (var _i = 0; _i < arguments.length; _i++) { - keys[_i] = arguments[_i]; - } - var newBaggage = new BaggageImpl(this._entries); - for (var _a = 0, keys_1 = keys; _a < keys_1.length; _a++) { - var key = keys_1[_a]; - newBaggage._entries.delete(key); - } - return newBaggage; - }; - BaggageImpl.prototype.clear = function () { - return new BaggageImpl(); - }; - return BaggageImpl; -}()); -exports.BaggageImpl = BaggageImpl; -//# sourceMappingURL=baggage-impl.js.map +exports.INVALID_SPAN_CONTEXT = exports.INVALID_TRACEID = exports.INVALID_SPANID = void 0; +var trace_flags_1 = __nccwpck_require__(6905); +exports.INVALID_SPANID = '0000000000000000'; +exports.INVALID_TRACEID = '00000000000000000000000000000000'; +exports.INVALID_SPAN_CONTEXT = { + traceId: exports.INVALID_TRACEID, + spanId: exports.INVALID_SPANID, + traceFlags: trace_flags_1.TraceFlags.NONE, +}; +//# sourceMappingURL=invalid-span-constants.js.map /***/ }), -/***/ 3542: +/***/ 4023: /***/ ((__unused_webpack_module, exports) => { "use strict"; @@ -48091,16 +47484,11 @@ exports.BaggageImpl = BaggageImpl; * limitations under the License. */ Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.baggageEntryMetadataSymbol = void 0; -/** - * Symbol used to make BaggageEntryMetadata an opaque type - */ -exports.baggageEntryMetadataSymbol = Symbol('BaggageEntryMetadata'); -//# sourceMappingURL=symbol.js.map +//# sourceMappingURL=link.js.map /***/ }), -/***/ 1508: +/***/ 4416: /***/ ((__unused_webpack_module, exports) => { "use strict"; @@ -48121,12 +47509,12 @@ exports.baggageEntryMetadataSymbol = Symbol('BaggageEntryMetadata'); * limitations under the License. */ Object.defineProperty(exports, "__esModule", ({ value: true })); -//# sourceMappingURL=types.js.map +//# sourceMappingURL=span.js.map /***/ }), -/***/ 8136: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { +/***/ 5769: +/***/ ((__unused_webpack_module, exports) => { "use strict"; @@ -48146,49 +47534,17 @@ Object.defineProperty(exports, "__esModule", ({ value: true })); * limitations under the License. */ Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.baggageEntryMetadataFromString = exports.createBaggage = void 0; -var diag_1 = __nccwpck_require__(1877); -var baggage_impl_1 = __nccwpck_require__(4811); -var symbol_1 = __nccwpck_require__(3542); -var diag = diag_1.DiagAPI.instance(); -/** - * Create a new Baggage with optional entries - * - * @param entries An array of baggage entries the new baggage should contain - */ -function createBaggage(entries) { - if (entries === void 0) { entries = {}; } - return new baggage_impl_1.BaggageImpl(new Map(Object.entries(entries))); -} -exports.createBaggage = createBaggage; -/** - * Create a serializable BaggageEntryMetadata object from a string. - * - * @param str string metadata. Format is currently not defined by the spec and has no special meaning. - * - */ -function baggageEntryMetadataFromString(str) { - if (typeof str !== 'string') { - diag.error("Cannot create baggage metadata from unknown type: " + typeof str); - str = ''; - } - return { - __TYPE__: symbol_1.baggageEntryMetadataSymbol, - toString: function () { - return str; - }, - }; -} -exports.baggageEntryMetadataFromString = baggageEntryMetadataFromString; -//# sourceMappingURL=utils.js.map +//# sourceMappingURL=span_context.js.map /***/ }), -/***/ 1109: +/***/ 1424: /***/ ((__unused_webpack_module, exports) => { "use strict"; +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.SpanKind = void 0; /* * Copyright The OpenTelemetry Authors * @@ -48204,16 +47560,44 @@ exports.baggageEntryMetadataFromString = baggageEntryMetadataFromString; * See the License for the specific language governing permissions and * limitations under the License. */ -Object.defineProperty(exports, "__esModule", ({ value: true })); -//# sourceMappingURL=Attributes.js.map +var SpanKind; +(function (SpanKind) { + /** Default value. Indicates that the span is used internally. */ + SpanKind[SpanKind["INTERNAL"] = 0] = "INTERNAL"; + /** + * Indicates that the span covers server-side handling of an RPC or other + * remote request. + */ + SpanKind[SpanKind["SERVER"] = 1] = "SERVER"; + /** + * Indicates that the span covers the client-side wrapper around an RPC or + * other remote request. + */ + SpanKind[SpanKind["CLIENT"] = 2] = "CLIENT"; + /** + * Indicates that the span describes producer sending a message to a + * broker. Unlike client and server, there is no direct critical path latency + * relationship between producer and consumer spans. + */ + SpanKind[SpanKind["PRODUCER"] = 3] = "PRODUCER"; + /** + * Indicates that the span describes consumer receiving a message from a + * broker. Unlike client and server, there is no direct critical path latency + * relationship between producer and consumer spans. + */ + SpanKind[SpanKind["CONSUMER"] = 4] = "CONSUMER"; +})(SpanKind = exports.SpanKind || (exports.SpanKind = {})); +//# sourceMappingURL=span_kind.js.map /***/ }), -/***/ 4447: -/***/ ((__unused_webpack_module, exports) => { +/***/ 9745: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { "use strict"; +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.wrapSpanContext = exports.isSpanContextValid = exports.isValidSpanId = exports.isValidTraceId = void 0; /* * Copyright The OpenTelemetry Authors * @@ -48229,26 +47613,77 @@ Object.defineProperty(exports, "__esModule", ({ value: true })); * See the License for the specific language governing permissions and * limitations under the License. */ -Object.defineProperty(exports, "__esModule", ({ value: true })); -//# sourceMappingURL=Exception.js.map +var invalid_span_constants_1 = __nccwpck_require__(1760); +var NonRecordingSpan_1 = __nccwpck_require__(1462); +var VALID_TRACEID_REGEX = /^([0-9a-f]{32})$/i; +var VALID_SPANID_REGEX = /^[0-9a-f]{16}$/i; +function isValidTraceId(traceId) { + return VALID_TRACEID_REGEX.test(traceId) && traceId !== invalid_span_constants_1.INVALID_TRACEID; +} +exports.isValidTraceId = isValidTraceId; +function isValidSpanId(spanId) { + return VALID_SPANID_REGEX.test(spanId) && spanId !== invalid_span_constants_1.INVALID_SPANID; +} +exports.isValidSpanId = isValidSpanId; +/** + * Returns true if this {@link SpanContext} is valid. + * @return true if this {@link SpanContext} is valid. + */ +function isSpanContextValid(spanContext) { + return (isValidTraceId(spanContext.traceId) && isValidSpanId(spanContext.spanId)); +} +exports.isSpanContextValid = isSpanContextValid; +/** + * Wrap the given {@link SpanContext} in a new non-recording {@link Span} + * + * @param spanContext span context to be wrapped + * @returns a new non-recording {@link Span} with the provided context + */ +function wrapSpanContext(spanContext) { + return new NonRecordingSpan_1.NonRecordingSpan(spanContext); +} +exports.wrapSpanContext = wrapSpanContext; +//# sourceMappingURL=spancontext-utils.js.map /***/ }), -/***/ 2358: +/***/ 8845: /***/ ((__unused_webpack_module, exports) => { "use strict"; Object.defineProperty(exports, "__esModule", ({ value: true })); -//# sourceMappingURL=Time.js.map +exports.SpanStatusCode = void 0; +/** + * An enumeration of status codes. + */ +var SpanStatusCode; +(function (SpanStatusCode) { + /** + * The default status. + */ + SpanStatusCode[SpanStatusCode["UNSET"] = 0] = "UNSET"; + /** + * The operation has been validated by an Application developer or + * Operator to have completed successfully. + */ + SpanStatusCode[SpanStatusCode["OK"] = 1] = "OK"; + /** + * The operation contains an error. + */ + SpanStatusCode[SpanStatusCode["ERROR"] = 2] = "ERROR"; +})(SpanStatusCode = exports.SpanStatusCode || (exports.SpanStatusCode = {})); +//# sourceMappingURL=status.js.map /***/ }), -/***/ 4118: -/***/ (function(__unused_webpack_module, exports, __nccwpck_require__) { +/***/ 6905: +/***/ ((__unused_webpack_module, exports) => { "use strict"; +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.TraceFlags = void 0; /* * Copyright The OpenTelemetry Authors * @@ -48264,44 +47699,18 @@ Object.defineProperty(exports, "__esModule", ({ value: true })); * See the License for the specific language governing permissions and * limitations under the License. */ -var __spreadArray = (this && this.__spreadArray) || function (to, from) { - for (var i = 0, il = from.length, j = to.length; i < il; i++, j++) - to[j] = from[i]; - return to; -}; -Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.NoopContextManager = void 0; -var context_1 = __nccwpck_require__(8242); -var NoopContextManager = /** @class */ (function () { - function NoopContextManager() { - } - NoopContextManager.prototype.active = function () { - return context_1.ROOT_CONTEXT; - }; - NoopContextManager.prototype.with = function (_context, fn, thisArg) { - var args = []; - for (var _i = 3; _i < arguments.length; _i++) { - args[_i - 3] = arguments[_i]; - } - return fn.call.apply(fn, __spreadArray([thisArg], args)); - }; - NoopContextManager.prototype.bind = function (_context, target) { - return target; - }; - NoopContextManager.prototype.enable = function () { - return this; - }; - NoopContextManager.prototype.disable = function () { - return this; - }; - return NoopContextManager; -}()); -exports.NoopContextManager = NoopContextManager; -//# sourceMappingURL=NoopContextManager.js.map +var TraceFlags; +(function (TraceFlags) { + /** Represents no flag set. */ + TraceFlags[TraceFlags["NONE"] = 0] = "NONE"; + /** Bit to represent whether trace is sampled in trace flags. */ + TraceFlags[TraceFlags["SAMPLED"] = 1] = "SAMPLED"; +})(TraceFlags = exports.TraceFlags || (exports.TraceFlags = {})); +//# sourceMappingURL=trace_flags.js.map /***/ }), -/***/ 8242: +/***/ 8384: /***/ ((__unused_webpack_module, exports) => { "use strict"; @@ -48322,49 +47731,36 @@ exports.NoopContextManager = NoopContextManager; * limitations under the License. */ Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.ROOT_CONTEXT = exports.createContextKey = void 0; -/** Get a key to uniquely identify a context value */ -function createContextKey(description) { - // The specification states that for the same input, multiple calls should - // return different keys. Due to the nature of the JS dependency management - // system, this creates problems where multiple versions of some package - // could hold different keys for the same property. - // - // Therefore, we use Symbol.for which returns the same key for the same input. - return Symbol.for(description); -} -exports.createContextKey = createContextKey; -var BaseContext = /** @class */ (function () { - /** - * Construct a new context which inherits values from an optional parent context. - * - * @param parentContext a context from which to inherit values - */ - function BaseContext(parentContext) { - // for minification - var self = this; - self._currentContext = parentContext ? new Map(parentContext) : new Map(); - self.getValue = function (key) { return self._currentContext.get(key); }; - self.setValue = function (key, value) { - var context = new BaseContext(self._currentContext); - context._currentContext.set(key, value); - return context; - }; - self.deleteValue = function (key) { - var context = new BaseContext(self._currentContext); - context._currentContext.delete(key); - return context; - }; - } - return BaseContext; -}()); -/** The root context is used as the default parent context when there is no active context */ -exports.ROOT_CONTEXT = new BaseContext(); -//# sourceMappingURL=context.js.map +//# sourceMappingURL=trace_state.js.map + +/***/ }), + +/***/ 3168: +/***/ ((__unused_webpack_module, exports) => { + +"use strict"; + +/* + * Copyright The OpenTelemetry Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +Object.defineProperty(exports, "__esModule", ({ value: true })); +//# sourceMappingURL=tracer.js.map /***/ }), -/***/ 6504: +/***/ 1823: /***/ ((__unused_webpack_module, exports) => { "use strict"; @@ -48385,12 +47781,12 @@ exports.ROOT_CONTEXT = new BaseContext(); * limitations under the License. */ Object.defineProperty(exports, "__esModule", ({ value: true })); -//# sourceMappingURL=types.js.map +//# sourceMappingURL=tracer_options.js.map /***/ }), -/***/ 7978: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { +/***/ 891: +/***/ ((__unused_webpack_module, exports) => { "use strict"; @@ -48410,73 +47806,11 @@ Object.defineProperty(exports, "__esModule", ({ value: true })); * limitations under the License. */ Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.DiagComponentLogger = void 0; -var global_utils_1 = __nccwpck_require__(5135); -/** - * Component Logger which is meant to be used as part of any component which - * will add automatically additional namespace in front of the log message. - * It will then forward all message to global diag logger - * @example - * const cLogger = diag.createComponentLogger({ namespace: '@opentelemetry/instrumentation-http' }); - * cLogger.debug('test'); - * // @opentelemetry/instrumentation-http test - */ -var DiagComponentLogger = /** @class */ (function () { - function DiagComponentLogger(props) { - this._namespace = props.namespace || 'DiagComponentLogger'; - } - DiagComponentLogger.prototype.debug = function () { - var args = []; - for (var _i = 0; _i < arguments.length; _i++) { - args[_i] = arguments[_i]; - } - return logProxy('debug', this._namespace, args); - }; - DiagComponentLogger.prototype.error = function () { - var args = []; - for (var _i = 0; _i < arguments.length; _i++) { - args[_i] = arguments[_i]; - } - return logProxy('error', this._namespace, args); - }; - DiagComponentLogger.prototype.info = function () { - var args = []; - for (var _i = 0; _i < arguments.length; _i++) { - args[_i] = arguments[_i]; - } - return logProxy('info', this._namespace, args); - }; - DiagComponentLogger.prototype.warn = function () { - var args = []; - for (var _i = 0; _i < arguments.length; _i++) { - args[_i] = arguments[_i]; - } - return logProxy('warn', this._namespace, args); - }; - DiagComponentLogger.prototype.verbose = function () { - var args = []; - for (var _i = 0; _i < arguments.length; _i++) { - args[_i] = arguments[_i]; - } - return logProxy('verbose', this._namespace, args); - }; - return DiagComponentLogger; -}()); -exports.DiagComponentLogger = DiagComponentLogger; -function logProxy(funcName, namespace, args) { - var logger = global_utils_1.getGlobal('diag'); - // shortcut if logger not set - if (!logger) { - return; - } - args.unshift(namespace); - return logger[funcName].apply(logger, args); -} -//# sourceMappingURL=ComponentLogger.js.map +//# sourceMappingURL=tracer_provider.js.map /***/ }), -/***/ 3041: +/***/ 8996: /***/ ((__unused_webpack_module, exports) => { "use strict"; @@ -48497,10931 +47831,11689 @@ function logProxy(funcName, namespace, args) { * limitations under the License. */ Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.DiagConsoleLogger = void 0; -var consoleMap = [ - { n: 'error', c: 'error' }, - { n: 'warn', c: 'warn' }, - { n: 'info', c: 'info' }, - { n: 'debug', c: 'debug' }, - { n: 'verbose', c: 'trace' }, -]; +exports.VERSION = void 0; +// this is autogenerated file, see scripts/version-update.js +exports.VERSION = '1.2.0'; +//# sourceMappingURL=version.js.map + +/***/ }), + +/***/ 4812: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +module.exports = +{ + parallel : __nccwpck_require__(8210), + serial : __nccwpck_require__(445), + serialOrdered : __nccwpck_require__(3578) +}; + + +/***/ }), + +/***/ 1700: +/***/ ((module) => { + +// API +module.exports = abort; + /** - * A simple Immutable Console based diagnostic logger which will output any messages to the Console. - * If you want to limit the amount of logging to a specific level or lower use the - * {@link createLogLevelDiagLogger} + * Aborts leftover active jobs + * + * @param {object} state - current state object */ -var DiagConsoleLogger = /** @class */ (function () { - function DiagConsoleLogger() { - function _consoleFunc(funcName) { - return function () { - var args = []; - for (var _i = 0; _i < arguments.length; _i++) { - args[_i] = arguments[_i]; - } - if (console) { - // Some environments only expose the console when the F12 developer console is open - // eslint-disable-next-line no-console - var theFunc = console[funcName]; - if (typeof theFunc !== 'function') { - // Not all environments support all functions - // eslint-disable-next-line no-console - theFunc = console.log; - } - // One last final check - if (typeof theFunc === 'function') { - return theFunc.apply(console, args); - } - } - }; - } - for (var i = 0; i < consoleMap.length; i++) { - this[consoleMap[i].n] = _consoleFunc(consoleMap[i].c); - } +function abort(state) +{ + Object.keys(state.jobs).forEach(clean.bind(state)); + + // reset leftover jobs + state.jobs = {}; +} + +/** + * Cleans up leftover job by invoking abort function for the provided job id + * + * @this state + * @param {string|number} key - job id to abort + */ +function clean(key) +{ + if (typeof this.jobs[key] == 'function') + { + this.jobs[key](); + } +} + + +/***/ }), + +/***/ 2794: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +var defer = __nccwpck_require__(5295); + +// API +module.exports = async; + +/** + * Runs provided callback asynchronously + * even if callback itself is not + * + * @param {function} callback - callback to invoke + * @returns {function} - augmented callback + */ +function async(callback) +{ + var isAsync = false; + + // check if async happened + defer(function() { isAsync = true; }); + + return function async_callback(err, result) + { + if (isAsync) + { + callback(err, result); + } + else + { + defer(function nextTick_callback() + { + callback(err, result); + }); + } + }; +} + + +/***/ }), + +/***/ 5295: +/***/ ((module) => { + +module.exports = defer; + +/** + * Runs provided function on next iteration of the event loop + * + * @param {function} fn - function to run + */ +function defer(fn) +{ + var nextTick = typeof setImmediate == 'function' + ? setImmediate + : ( + typeof process == 'object' && typeof process.nextTick == 'function' + ? process.nextTick + : null + ); + + if (nextTick) + { + nextTick(fn); + } + else + { + setTimeout(fn, 0); + } +} + + +/***/ }), + +/***/ 9023: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +var async = __nccwpck_require__(2794) + , abort = __nccwpck_require__(1700) + ; + +// API +module.exports = iterate; + +/** + * Iterates over each job object + * + * @param {array|object} list - array or object (named list) to iterate over + * @param {function} iterator - iterator to run + * @param {object} state - current job status + * @param {function} callback - invoked when all elements processed + */ +function iterate(list, iterator, state, callback) +{ + // store current index + var key = state['keyedList'] ? state['keyedList'][state.index] : state.index; + + state.jobs[key] = runJob(iterator, key, list[key], function(error, output) + { + // don't repeat yourself + // skip secondary callbacks + if (!(key in state.jobs)) + { + return; + } + + // clean up jobs + delete state.jobs[key]; + + if (error) + { + // don't process rest of the results + // stop still active jobs + // and reset the list + abort(state); + } + else + { + state.results[key] = output; + } + + // return salvaged results + callback(error, state.results); + }); +} + +/** + * Runs iterator over provided job element + * + * @param {function} iterator - iterator to invoke + * @param {string|number} key - key/index of the element in the list of jobs + * @param {mixed} item - job description + * @param {function} callback - invoked after iterator is done with the job + * @returns {function|mixed} - job abort function or something else + */ +function runJob(iterator, key, item, callback) +{ + var aborter; + + // allow shortcut if iterator expects only two arguments + if (iterator.length == 2) + { + aborter = iterator(item, async(callback)); + } + // otherwise go with full three arguments + else + { + aborter = iterator(item, key, async(callback)); + } + + return aborter; +} + + +/***/ }), + +/***/ 2474: +/***/ ((module) => { + +// API +module.exports = state; + +/** + * Creates initial state object + * for iteration over list + * + * @param {array|object} list - list to iterate over + * @param {function|null} sortMethod - function to use for keys sort, + * or `null` to keep them as is + * @returns {object} - initial state object + */ +function state(list, sortMethod) +{ + var isNamedList = !Array.isArray(list) + , initState = + { + index : 0, + keyedList: isNamedList || sortMethod ? Object.keys(list) : null, + jobs : {}, + results : isNamedList ? {} : [], + size : isNamedList ? Object.keys(list).length : list.length + } + ; + + if (sortMethod) + { + // sort array keys based on it's values + // sort object's keys just on own merit + initState.keyedList.sort(isNamedList ? sortMethod : function(a, b) + { + return sortMethod(list[a], list[b]); + }); + } + + return initState; +} + + +/***/ }), + +/***/ 7942: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +var abort = __nccwpck_require__(1700) + , async = __nccwpck_require__(2794) + ; + +// API +module.exports = terminator; + +/** + * Terminates jobs in the attached state context + * + * @this AsyncKitState# + * @param {function} callback - final callback to invoke after termination + */ +function terminator(callback) +{ + if (!Object.keys(this.jobs).length) + { + return; + } + + // fast forward iteration index + this.index = this.size; + + // abort jobs + abort(this); + + // send back results we have so far + async(callback)(null, this.results); +} + + +/***/ }), + +/***/ 8210: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +var iterate = __nccwpck_require__(9023) + , initState = __nccwpck_require__(2474) + , terminator = __nccwpck_require__(7942) + ; + +// Public API +module.exports = parallel; + +/** + * Runs iterator over provided array elements in parallel + * + * @param {array|object} list - array or object (named list) to iterate over + * @param {function} iterator - iterator to run + * @param {function} callback - invoked when all elements processed + * @returns {function} - jobs terminator + */ +function parallel(list, iterator, callback) +{ + var state = initState(list); + + while (state.index < (state['keyedList'] || list).length) + { + iterate(list, iterator, state, function(error, result) + { + if (error) + { + callback(error, result); + return; + } + + // looks like it's the last one + if (Object.keys(state.jobs).length === 0) + { + callback(null, state.results); + return; + } + }); + + state.index++; + } + + return terminator.bind(state, callback); +} + + +/***/ }), + +/***/ 445: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +var serialOrdered = __nccwpck_require__(3578); + +// Public API +module.exports = serial; + +/** + * Runs iterator over provided array elements in series + * + * @param {array|object} list - array or object (named list) to iterate over + * @param {function} iterator - iterator to run + * @param {function} callback - invoked when all elements processed + * @returns {function} - jobs terminator + */ +function serial(list, iterator, callback) +{ + return serialOrdered(list, iterator, null, callback); +} + + +/***/ }), + +/***/ 3578: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +var iterate = __nccwpck_require__(9023) + , initState = __nccwpck_require__(2474) + , terminator = __nccwpck_require__(7942) + ; + +// Public API +module.exports = serialOrdered; +// sorting helpers +module.exports.ascending = ascending; +module.exports.descending = descending; + +/** + * Runs iterator over provided sorted array elements in series + * + * @param {array|object} list - array or object (named list) to iterate over + * @param {function} iterator - iterator to run + * @param {function} sortMethod - custom sort function + * @param {function} callback - invoked when all elements processed + * @returns {function} - jobs terminator + */ +function serialOrdered(list, iterator, sortMethod, callback) +{ + var state = initState(list, sortMethod); + + iterate(list, iterator, state, function iteratorHandler(error, result) + { + if (error) + { + callback(error, result); + return; } - return DiagConsoleLogger; -}()); -exports.DiagConsoleLogger = DiagConsoleLogger; -//# sourceMappingURL=consoleLogger.js.map -/***/ }), + state.index++; -/***/ 1634: -/***/ (function(__unused_webpack_module, exports, __nccwpck_require__) { + // are we there yet? + if (state.index < (state['keyedList'] || list).length) + { + iterate(list, iterator, state, iteratorHandler); + return; + } -"use strict"; + // done here + callback(null, state.results); + }); + + return terminator.bind(state, callback); +} /* - * Copyright The OpenTelemetry Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * -- Sort methods + */ + +/** + * sort helper to sort array elements in ascending order * - * https://www.apache.org/licenses/LICENSE-2.0 + * @param {mixed} a - an item to compare + * @param {mixed} b - an item to compare + * @returns {number} - comparison result + */ +function ascending(a, b) +{ + return a < b ? -1 : a > b ? 1 : 0; +} + +/** + * sort helper to sort array elements in descending order * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * @param {mixed} a - an item to compare + * @param {mixed} b - an item to compare + * @returns {number} - comparison result */ -var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { - if (k2 === undefined) k2 = k; - Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } }); -}) : (function(o, m, k, k2) { - if (k2 === undefined) k2 = k; - o[k2] = m[k]; -})); -var __exportStar = (this && this.__exportStar) || function(m, exports) { - for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p); -}; -Object.defineProperty(exports, "__esModule", ({ value: true })); -__exportStar(__nccwpck_require__(3041), exports); -__exportStar(__nccwpck_require__(8077), exports); -//# sourceMappingURL=index.js.map +function descending(a, b) +{ + return -1 * ascending(a, b); +} + /***/ }), -/***/ 9639: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { +/***/ 9417: +/***/ ((module) => { "use strict"; -/* - * Copyright The OpenTelemetry Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.createLogLevelDiagLogger = void 0; -var types_1 = __nccwpck_require__(8077); -function createLogLevelDiagLogger(maxLevel, logger) { - if (maxLevel < types_1.DiagLogLevel.NONE) { - maxLevel = types_1.DiagLogLevel.NONE; - } - else if (maxLevel > types_1.DiagLogLevel.ALL) { - maxLevel = types_1.DiagLogLevel.ALL; +module.exports = balanced; +function balanced(a, b, str) { + if (a instanceof RegExp) a = maybeMatch(a, str); + if (b instanceof RegExp) b = maybeMatch(b, str); + + var r = range(a, b, str); + + return r && { + start: r[0], + end: r[1], + pre: str.slice(0, r[0]), + body: str.slice(r[0] + a.length, r[1]), + post: str.slice(r[1] + b.length) + }; +} + +function maybeMatch(reg, str) { + var m = str.match(reg); + return m ? m[0] : null; +} + +balanced.range = range; +function range(a, b, str) { + var begs, beg, left, right, result; + var ai = str.indexOf(a); + var bi = str.indexOf(b, ai + 1); + var i = ai; + + if (ai >= 0 && bi > 0) { + if(a===b) { + return [ai, bi]; } - // In case the logger is null or undefined - logger = logger || {}; - function _filterFunc(funcName, theLevel) { - var theFunc = logger[funcName]; - if (typeof theFunc === 'function' && maxLevel >= theLevel) { - return theFunc.bind(logger); + begs = []; + left = str.length; + + while (i >= 0 && !result) { + if (i == ai) { + begs.push(i); + ai = str.indexOf(a, i + 1); + } else if (begs.length == 1) { + result = [ begs.pop(), bi ]; + } else { + beg = begs.pop(); + if (beg < left) { + left = beg; + right = bi; } - return function () { }; + + bi = str.indexOf(b, i + 1); + } + + i = ai < bi && ai >= 0 ? ai : bi; } - return { - error: _filterFunc('error', types_1.DiagLogLevel.ERROR), - warn: _filterFunc('warn', types_1.DiagLogLevel.WARN), - info: _filterFunc('info', types_1.DiagLogLevel.INFO), - debug: _filterFunc('debug', types_1.DiagLogLevel.DEBUG), - verbose: _filterFunc('verbose', types_1.DiagLogLevel.VERBOSE), - }; + + if (begs.length) { + result = [ left, right ]; + } + } + + return result; } -exports.createLogLevelDiagLogger = createLogLevelDiagLogger; -//# sourceMappingURL=logLevelLogger.js.map + /***/ }), -/***/ 8077: -/***/ ((__unused_webpack_module, exports) => { +/***/ 3717: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { -"use strict"; +var concatMap = __nccwpck_require__(6891); +var balanced = __nccwpck_require__(9417); -/* - * Copyright The OpenTelemetry Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.DiagLogLevel = void 0; -/** - * Defines the available internal logging levels for the diagnostic logger, the numeric values - * of the levels are defined to match the original values from the initial LogLevel to avoid - * compatibility/migration issues for any implementation that assume the numeric ordering. - */ -var DiagLogLevel; -(function (DiagLogLevel) { - /** Diagnostic Logging level setting to disable all logging (except and forced logs) */ - DiagLogLevel[DiagLogLevel["NONE"] = 0] = "NONE"; - /** Identifies an error scenario */ - DiagLogLevel[DiagLogLevel["ERROR"] = 30] = "ERROR"; - /** Identifies a warning scenario */ - DiagLogLevel[DiagLogLevel["WARN"] = 50] = "WARN"; - /** General informational log message */ - DiagLogLevel[DiagLogLevel["INFO"] = 60] = "INFO"; - /** General debug log message */ - DiagLogLevel[DiagLogLevel["DEBUG"] = 70] = "DEBUG"; - /** - * Detailed trace level logging should only be used for development, should only be set - * in a development environment. - */ - DiagLogLevel[DiagLogLevel["VERBOSE"] = 80] = "VERBOSE"; - /** Used to set the logging level to include all logging */ - DiagLogLevel[DiagLogLevel["ALL"] = 9999] = "ALL"; -})(DiagLogLevel = exports.DiagLogLevel || (exports.DiagLogLevel = {})); -//# sourceMappingURL=types.js.map +module.exports = expandTop; -/***/ }), +var escSlash = '\0SLASH'+Math.random()+'\0'; +var escOpen = '\0OPEN'+Math.random()+'\0'; +var escClose = '\0CLOSE'+Math.random()+'\0'; +var escComma = '\0COMMA'+Math.random()+'\0'; +var escPeriod = '\0PERIOD'+Math.random()+'\0'; -/***/ 5163: -/***/ (function(__unused_webpack_module, exports, __nccwpck_require__) { +function numeric(str) { + return parseInt(str, 10) == str + ? parseInt(str, 10) + : str.charCodeAt(0); +} -"use strict"; +function escapeBraces(str) { + return str.split('\\\\').join(escSlash) + .split('\\{').join(escOpen) + .split('\\}').join(escClose) + .split('\\,').join(escComma) + .split('\\.').join(escPeriod); +} -/* - * Copyright The OpenTelemetry Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { - if (k2 === undefined) k2 = k; - Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } }); -}) : (function(o, m, k, k2) { - if (k2 === undefined) k2 = k; - o[k2] = m[k]; -})); -var __exportStar = (this && this.__exportStar) || function(m, exports) { - for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p); -}; -Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.diag = exports.propagation = exports.trace = exports.context = exports.INVALID_SPAN_CONTEXT = exports.INVALID_TRACEID = exports.INVALID_SPANID = exports.isValidSpanId = exports.isValidTraceId = exports.isSpanContextValid = exports.createTraceState = exports.baggageEntryMetadataFromString = void 0; -__exportStar(__nccwpck_require__(1508), exports); -var utils_1 = __nccwpck_require__(8136); -Object.defineProperty(exports, "baggageEntryMetadataFromString", ({ enumerable: true, get: function () { return utils_1.baggageEntryMetadataFromString; } })); -__exportStar(__nccwpck_require__(4447), exports); -__exportStar(__nccwpck_require__(2358), exports); -__exportStar(__nccwpck_require__(1109), exports); -__exportStar(__nccwpck_require__(1634), exports); -__exportStar(__nccwpck_require__(865), exports); -__exportStar(__nccwpck_require__(7492), exports); -__exportStar(__nccwpck_require__(4023), exports); -__exportStar(__nccwpck_require__(3503), exports); -__exportStar(__nccwpck_require__(2285), exports); -__exportStar(__nccwpck_require__(9671), exports); -__exportStar(__nccwpck_require__(3209), exports); -__exportStar(__nccwpck_require__(5769), exports); -__exportStar(__nccwpck_require__(1424), exports); -__exportStar(__nccwpck_require__(4416), exports); -__exportStar(__nccwpck_require__(955), exports); -__exportStar(__nccwpck_require__(8845), exports); -__exportStar(__nccwpck_require__(6905), exports); -__exportStar(__nccwpck_require__(8384), exports); -var utils_2 = __nccwpck_require__(2615); -Object.defineProperty(exports, "createTraceState", ({ enumerable: true, get: function () { return utils_2.createTraceState; } })); -__exportStar(__nccwpck_require__(891), exports); -__exportStar(__nccwpck_require__(3168), exports); -__exportStar(__nccwpck_require__(1823), exports); -var spancontext_utils_1 = __nccwpck_require__(9745); -Object.defineProperty(exports, "isSpanContextValid", ({ enumerable: true, get: function () { return spancontext_utils_1.isSpanContextValid; } })); -Object.defineProperty(exports, "isValidTraceId", ({ enumerable: true, get: function () { return spancontext_utils_1.isValidTraceId; } })); -Object.defineProperty(exports, "isValidSpanId", ({ enumerable: true, get: function () { return spancontext_utils_1.isValidSpanId; } })); -var invalid_span_constants_1 = __nccwpck_require__(1760); -Object.defineProperty(exports, "INVALID_SPANID", ({ enumerable: true, get: function () { return invalid_span_constants_1.INVALID_SPANID; } })); -Object.defineProperty(exports, "INVALID_TRACEID", ({ enumerable: true, get: function () { return invalid_span_constants_1.INVALID_TRACEID; } })); -Object.defineProperty(exports, "INVALID_SPAN_CONTEXT", ({ enumerable: true, get: function () { return invalid_span_constants_1.INVALID_SPAN_CONTEXT; } })); -__exportStar(__nccwpck_require__(8242), exports); -__exportStar(__nccwpck_require__(6504), exports); -var context_1 = __nccwpck_require__(7171); -/** Entrypoint for context API */ -exports.context = context_1.ContextAPI.getInstance(); -var trace_1 = __nccwpck_require__(1539); -/** Entrypoint for trace API */ -exports.trace = trace_1.TraceAPI.getInstance(); -var propagation_1 = __nccwpck_require__(9909); -/** Entrypoint for propagation API */ -exports.propagation = propagation_1.PropagationAPI.getInstance(); -var diag_1 = __nccwpck_require__(1877); -/** - * Entrypoint for Diag API. - * Defines Diagnostic handler used for internal diagnostic logging operations. - * The default provides a Noop DiagLogger implementation which may be changed via the - * diag.setLogger(logger: DiagLogger) function. - */ -exports.diag = diag_1.DiagAPI.instance(); -exports["default"] = { - trace: exports.trace, - context: exports.context, - propagation: exports.propagation, - diag: exports.diag, -}; -//# sourceMappingURL=index.js.map +function unescapeBraces(str) { + return str.split(escSlash).join('\\') + .split(escOpen).join('{') + .split(escClose).join('}') + .split(escComma).join(',') + .split(escPeriod).join('.'); +} -/***/ }), -/***/ 5135: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { +// Basically just str.split(","), but handling cases +// where we have nested braced sections, which should be +// treated as individual members, like {a,{b,c},d} +function parseCommaParts(str) { + if (!str) + return ['']; -"use strict"; + var parts = []; + var m = balanced('{', '}', str); -/* - * Copyright The OpenTelemetry Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.unregisterGlobal = exports.getGlobal = exports.registerGlobal = void 0; -var platform_1 = __nccwpck_require__(9957); -var version_1 = __nccwpck_require__(8996); -var semver_1 = __nccwpck_require__(1522); -var major = version_1.VERSION.split('.')[0]; -var GLOBAL_OPENTELEMETRY_API_KEY = Symbol.for("opentelemetry.js.api." + major); -var _global = platform_1._globalThis; -function registerGlobal(type, instance, diag, allowOverride) { - var _a; - if (allowOverride === void 0) { allowOverride = false; } - var api = (_global[GLOBAL_OPENTELEMETRY_API_KEY] = (_a = _global[GLOBAL_OPENTELEMETRY_API_KEY]) !== null && _a !== void 0 ? _a : { - version: version_1.VERSION, - }); - if (!allowOverride && api[type]) { - // already registered an API of this type - var err = new Error("@opentelemetry/api: Attempted duplicate registration of API: " + type); - diag.error(err.stack || err.message); - return false; + if (!m) + return str.split(','); + + var pre = m.pre; + var body = m.body; + var post = m.post; + var p = pre.split(','); + + p[p.length-1] += '{' + body + '}'; + var postParts = parseCommaParts(post); + if (post.length) { + p[p.length-1] += postParts.shift(); + p.push.apply(p, postParts); + } + + parts.push.apply(parts, p); + + return parts; +} + +function expandTop(str) { + if (!str) + return []; + + // I don't know why Bash 4.3 does this, but it does. + // Anything starting with {} will have the first two bytes preserved + // but *only* at the top level, so {},a}b will not expand to anything, + // but a{},b}c will be expanded to [a}c,abc]. + // One could argue that this is a bug in Bash, but since the goal of + // this module is to match Bash's rules, we escape a leading {} + if (str.substr(0, 2) === '{}') { + str = '\\{\\}' + str.substr(2); + } + + return expand(escapeBraces(str), true).map(unescapeBraces); +} + +function identity(e) { + return e; +} + +function embrace(str) { + return '{' + str + '}'; +} +function isPadded(el) { + return /^-?0\d/.test(el); +} + +function lte(i, y) { + return i <= y; +} +function gte(i, y) { + return i >= y; +} + +function expand(str, isTop) { + var expansions = []; + + var m = balanced('{', '}', str); + if (!m || /\$$/.test(m.pre)) return [str]; + + var isNumericSequence = /^-?\d+\.\.-?\d+(?:\.\.-?\d+)?$/.test(m.body); + var isAlphaSequence = /^[a-zA-Z]\.\.[a-zA-Z](?:\.\.-?\d+)?$/.test(m.body); + var isSequence = isNumericSequence || isAlphaSequence; + var isOptions = m.body.indexOf(',') >= 0; + if (!isSequence && !isOptions) { + // {a},b} + if (m.post.match(/,.*\}/)) { + str = m.pre + '{' + m.body + escClose + m.post; + return expand(str); } - if (api.version !== version_1.VERSION) { - // All registered APIs must be of the same version exactly - var err = new Error('@opentelemetry/api: All API registration versions must match'); - diag.error(err.stack || err.message); - return false; + return [str]; + } + + var n; + if (isSequence) { + n = m.body.split(/\.\./); + } else { + n = parseCommaParts(m.body); + if (n.length === 1) { + // x{{a,b}}y ==> x{a}y x{b}y + n = expand(n[0], false).map(embrace); + if (n.length === 1) { + var post = m.post.length + ? expand(m.post, false) + : ['']; + return post.map(function(p) { + return m.pre + n[0] + p; + }); + } + } + } + + // at this point, n is the parts, and we know it's not a comma set + // with a single entry. + + // no need to expand pre, since it is guaranteed to be free of brace-sets + var pre = m.pre; + var post = m.post.length + ? expand(m.post, false) + : ['']; + + var N; + + if (isSequence) { + var x = numeric(n[0]); + var y = numeric(n[1]); + var width = Math.max(n[0].length, n[1].length) + var incr = n.length == 3 + ? Math.abs(numeric(n[2])) + : 1; + var test = lte; + var reverse = y < x; + if (reverse) { + incr *= -1; + test = gte; } - api[type] = instance; - diag.debug("@opentelemetry/api: Registered a global for " + type + " v" + version_1.VERSION + "."); - return true; -} -exports.registerGlobal = registerGlobal; -function getGlobal(type) { - var _a, _b; - var globalVersion = (_a = _global[GLOBAL_OPENTELEMETRY_API_KEY]) === null || _a === void 0 ? void 0 : _a.version; - if (!globalVersion || !semver_1.isCompatible(globalVersion)) { - return; + var pad = n.some(isPadded); + + N = []; + + for (var i = x; test(i, y); i += incr) { + var c; + if (isAlphaSequence) { + c = String.fromCharCode(i); + if (c === '\\') + c = ''; + } else { + c = String(i); + if (pad) { + var need = width - c.length; + if (need > 0) { + var z = new Array(need + 1).join('0'); + if (i < 0) + c = '-' + z + c.slice(1); + else + c = z + c; + } + } + } + N.push(c); } - return (_b = _global[GLOBAL_OPENTELEMETRY_API_KEY]) === null || _b === void 0 ? void 0 : _b[type]; -} -exports.getGlobal = getGlobal; -function unregisterGlobal(type, diag) { - diag.debug("@opentelemetry/api: Unregistering a global for " + type + " v" + version_1.VERSION + "."); - var api = _global[GLOBAL_OPENTELEMETRY_API_KEY]; - if (api) { - delete api[type]; + } else { + N = concatMap(n, function(el) { return expand(el, false) }); + } + + for (var j = 0; j < N.length; j++) { + for (var k = 0; k < post.length; k++) { + var expansion = pre + N[j] + post[k]; + if (!isTop || isSequence || expansion) + expansions.push(expansion); } + } + + return expansions; } -exports.unregisterGlobal = unregisterGlobal; -//# sourceMappingURL=global-utils.js.map + + /***/ }), -/***/ 1522: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { +/***/ 5443: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { -"use strict"; +var util = __nccwpck_require__(3837); +var Stream = (__nccwpck_require__(2781).Stream); +var DelayedStream = __nccwpck_require__(8611); -/* - * Copyright The OpenTelemetry Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.isCompatible = exports._makeCompatibilityCheck = void 0; -var version_1 = __nccwpck_require__(8996); -var re = /^(\d+)\.(\d+)\.(\d+)(-(.+))?$/; -/** - * Create a function to test an API version to see if it is compatible with the provided ownVersion. - * - * The returned function has the following semantics: - * - Exact match is always compatible - * - Major versions must match exactly - * - 1.x package cannot use global 2.x package - * - 2.x package cannot use global 1.x package - * - The minor version of the API module requesting access to the global API must be less than or equal to the minor version of this API - * - 1.3 package may use 1.4 global because the later global contains all functions 1.3 expects - * - 1.4 package may NOT use 1.3 global because it may try to call functions which don't exist on 1.3 - * - If the major version is 0, the minor version is treated as the major and the patch is treated as the minor - * - Patch and build tag differences are not considered at this time - * - * @param ownVersion version which should be checked against - */ -function _makeCompatibilityCheck(ownVersion) { - var acceptedVersions = new Set([ownVersion]); - var rejectedVersions = new Set(); - var myVersionMatch = ownVersion.match(re); - if (!myVersionMatch) { - // we cannot guarantee compatibility so we always return noop - return function () { return false; }; - } - var ownVersionParsed = { - major: +myVersionMatch[1], - minor: +myVersionMatch[2], - patch: +myVersionMatch[3], - prerelease: myVersionMatch[4], - }; - // if ownVersion has a prerelease tag, versions must match exactly - if (ownVersionParsed.prerelease != null) { - return function isExactmatch(globalVersion) { - return globalVersion === ownVersion; - }; - } - function _reject(v) { - rejectedVersions.add(v); - return false; - } - function _accept(v) { - acceptedVersions.add(v); - return true; - } - return function isCompatible(globalVersion) { - if (acceptedVersions.has(globalVersion)) { - return true; - } - if (rejectedVersions.has(globalVersion)) { - return false; - } - var globalVersionMatch = globalVersion.match(re); - if (!globalVersionMatch) { - // cannot parse other version - // we cannot guarantee compatibility so we always noop - return _reject(globalVersion); - } - var globalVersionParsed = { - major: +globalVersionMatch[1], - minor: +globalVersionMatch[2], - patch: +globalVersionMatch[3], - prerelease: globalVersionMatch[4], - }; - // if globalVersion has a prerelease tag, versions must match exactly - if (globalVersionParsed.prerelease != null) { - return _reject(globalVersion); - } - // major versions must match - if (ownVersionParsed.major !== globalVersionParsed.major) { - return _reject(globalVersion); - } - if (ownVersionParsed.major === 0) { - if (ownVersionParsed.minor === globalVersionParsed.minor && - ownVersionParsed.patch <= globalVersionParsed.patch) { - return _accept(globalVersion); - } - return _reject(globalVersion); - } - if (ownVersionParsed.minor <= globalVersionParsed.minor) { - return _accept(globalVersion); - } - return _reject(globalVersion); - }; +module.exports = CombinedStream; +function CombinedStream() { + this.writable = false; + this.readable = true; + this.dataSize = 0; + this.maxDataSize = 2 * 1024 * 1024; + this.pauseStreams = true; + + this._released = false; + this._streams = []; + this._currentStream = null; + this._insideLoop = false; + this._pendingNext = false; } -exports._makeCompatibilityCheck = _makeCompatibilityCheck; -/** - * Test an API version to see if it is compatible with this API. - * - * - Exact match is always compatible - * - Major versions must match exactly - * - 1.x package cannot use global 2.x package - * - 2.x package cannot use global 1.x package - * - The minor version of the API module requesting access to the global API must be less than or equal to the minor version of this API - * - 1.3 package may use 1.4 global because the later global contains all functions 1.3 expects - * - 1.4 package may NOT use 1.3 global because it may try to call functions which don't exist on 1.3 - * - If the major version is 0, the minor version is treated as the major and the patch is treated as the minor - * - Patch and build tag differences are not considered at this time - * - * @param version version of the API requesting an instance of the global API - */ -exports.isCompatible = _makeCompatibilityCheck(version_1.VERSION); -//# sourceMappingURL=semver.js.map +util.inherits(CombinedStream, Stream); -/***/ }), +CombinedStream.create = function(options) { + var combinedStream = new this(); -/***/ 9957: -/***/ (function(__unused_webpack_module, exports, __nccwpck_require__) { + options = options || {}; + for (var option in options) { + combinedStream[option] = options[option]; + } -"use strict"; + return combinedStream; +}; -/* - * Copyright The OpenTelemetry Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { - if (k2 === undefined) k2 = k; - Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } }); -}) : (function(o, m, k, k2) { - if (k2 === undefined) k2 = k; - o[k2] = m[k]; -})); -var __exportStar = (this && this.__exportStar) || function(m, exports) { - for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p); +CombinedStream.isStreamLike = function(stream) { + return (typeof stream !== 'function') + && (typeof stream !== 'string') + && (typeof stream !== 'boolean') + && (typeof stream !== 'number') + && (!Buffer.isBuffer(stream)); }; -Object.defineProperty(exports, "__esModule", ({ value: true })); -__exportStar(__nccwpck_require__(7200), exports); -//# sourceMappingURL=index.js.map -/***/ }), +CombinedStream.prototype.append = function(stream) { + var isStreamLike = CombinedStream.isStreamLike(stream); -/***/ 9406: -/***/ ((__unused_webpack_module, exports) => { + if (isStreamLike) { + if (!(stream instanceof DelayedStream)) { + var newStream = DelayedStream.create(stream, { + maxDataSize: Infinity, + pauseStream: this.pauseStreams, + }); + stream.on('data', this._checkDataSize.bind(this)); + stream = newStream; + } -"use strict"; + this._handleErrors(stream); -/* - * Copyright The OpenTelemetry Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -Object.defineProperty(exports, "__esModule", ({ value: true })); -exports._globalThis = void 0; -/** only globals that common to node and browsers are allowed */ -// eslint-disable-next-line node/no-unsupported-features/es-builtins -exports._globalThis = typeof globalThis === 'object' ? globalThis : global; -//# sourceMappingURL=globalThis.js.map + if (this.pauseStreams) { + stream.pause(); + } + } -/***/ }), + this._streams.push(stream); + return this; +}; -/***/ 7200: -/***/ (function(__unused_webpack_module, exports, __nccwpck_require__) { +CombinedStream.prototype.pipe = function(dest, options) { + Stream.prototype.pipe.call(this, dest, options); + this.resume(); + return dest; +}; -"use strict"; +CombinedStream.prototype._getNext = function() { + this._currentStream = null; -/* - * Copyright The OpenTelemetry Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { - if (k2 === undefined) k2 = k; - Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } }); -}) : (function(o, m, k, k2) { - if (k2 === undefined) k2 = k; - o[k2] = m[k]; -})); -var __exportStar = (this && this.__exportStar) || function(m, exports) { - for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p); + if (this._insideLoop) { + this._pendingNext = true; + return; // defer call + } + + this._insideLoop = true; + try { + do { + this._pendingNext = false; + this._realGetNext(); + } while (this._pendingNext); + } finally { + this._insideLoop = false; + } }; -Object.defineProperty(exports, "__esModule", ({ value: true })); -__exportStar(__nccwpck_require__(9406), exports); -//# sourceMappingURL=index.js.map -/***/ }), +CombinedStream.prototype._realGetNext = function() { + var stream = this._streams.shift(); -/***/ 2368: -/***/ ((__unused_webpack_module, exports) => { -"use strict"; + if (typeof stream == 'undefined') { + this.end(); + return; + } -/* - * Copyright The OpenTelemetry Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.NoopTextMapPropagator = void 0; -/** - * No-op implementations of {@link TextMapPropagator}. - */ -var NoopTextMapPropagator = /** @class */ (function () { - function NoopTextMapPropagator() { + if (typeof stream !== 'function') { + this._pipeNext(stream); + return; + } + + var getStream = stream; + getStream(function(stream) { + var isStreamLike = CombinedStream.isStreamLike(stream); + if (isStreamLike) { + stream.on('data', this._checkDataSize.bind(this)); + this._handleErrors(stream); } - /** Noop inject function does nothing */ - NoopTextMapPropagator.prototype.inject = function (_context, _carrier) { }; - /** Noop extract function does nothing and returns the input context */ - NoopTextMapPropagator.prototype.extract = function (context, _carrier) { - return context; - }; - NoopTextMapPropagator.prototype.fields = function () { - return []; - }; - return NoopTextMapPropagator; -}()); -exports.NoopTextMapPropagator = NoopTextMapPropagator; -//# sourceMappingURL=NoopTextMapPropagator.js.map -/***/ }), + this._pipeNext(stream); + }.bind(this)); +}; -/***/ 865: -/***/ ((__unused_webpack_module, exports) => { +CombinedStream.prototype._pipeNext = function(stream) { + this._currentStream = stream; -"use strict"; + var isStreamLike = CombinedStream.isStreamLike(stream); + if (isStreamLike) { + stream.on('end', this._getNext.bind(this)); + stream.pipe(this, {end: false}); + return; + } -/* - * Copyright The OpenTelemetry Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.defaultTextMapSetter = exports.defaultTextMapGetter = void 0; -exports.defaultTextMapGetter = { - get: function (carrier, key) { - if (carrier == null) { - return undefined; - } - return carrier[key]; - }, - keys: function (carrier) { - if (carrier == null) { - return []; - } - return Object.keys(carrier); - }, + var value = stream; + this.write(value); + this._getNext(); }; -exports.defaultTextMapSetter = { - set: function (carrier, key, value) { - if (carrier == null) { - return; - } - carrier[key] = value; - }, + +CombinedStream.prototype._handleErrors = function(stream) { + var self = this; + stream.on('error', function(err) { + self._emitError(err); + }); +}; + +CombinedStream.prototype.write = function(data) { + this.emit('data', data); +}; + +CombinedStream.prototype.pause = function() { + if (!this.pauseStreams) { + return; + } + + if(this.pauseStreams && this._currentStream && typeof(this._currentStream.pause) == 'function') this._currentStream.pause(); + this.emit('pause'); +}; + +CombinedStream.prototype.resume = function() { + if (!this._released) { + this._released = true; + this.writable = true; + this._getNext(); + } + + if(this.pauseStreams && this._currentStream && typeof(this._currentStream.resume) == 'function') this._currentStream.resume(); + this.emit('resume'); +}; + +CombinedStream.prototype.end = function() { + this._reset(); + this.emit('end'); +}; + +CombinedStream.prototype.destroy = function() { + this._reset(); + this.emit('close'); +}; + +CombinedStream.prototype._reset = function() { + this.writable = false; + this._streams = []; + this._currentStream = null; +}; + +CombinedStream.prototype._checkDataSize = function() { + this._updateDataSize(); + if (this.dataSize <= this.maxDataSize) { + return; + } + + var message = + 'DelayedStream#maxDataSize of ' + this.maxDataSize + ' bytes exceeded.'; + this._emitError(new Error(message)); }; -//# sourceMappingURL=TextMapPropagator.js.map -/***/ }), +CombinedStream.prototype._updateDataSize = function() { + this.dataSize = 0; -/***/ 1462: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + var self = this; + this._streams.forEach(function(stream) { + if (!stream.dataSize) { + return; + } -"use strict"; + self.dataSize += stream.dataSize; + }); -/* - * Copyright The OpenTelemetry Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.NonRecordingSpan = void 0; -var invalid_span_constants_1 = __nccwpck_require__(1760); -/** - * The NonRecordingSpan is the default {@link Span} that is used when no Span - * implementation is available. All operations are no-op including context - * propagation. - */ -var NonRecordingSpan = /** @class */ (function () { - function NonRecordingSpan(_spanContext) { - if (_spanContext === void 0) { _spanContext = invalid_span_constants_1.INVALID_SPAN_CONTEXT; } - this._spanContext = _spanContext; - } - // Returns a SpanContext. - NonRecordingSpan.prototype.spanContext = function () { - return this._spanContext; - }; - // By default does nothing - NonRecordingSpan.prototype.setAttribute = function (_key, _value) { - return this; - }; - // By default does nothing - NonRecordingSpan.prototype.setAttributes = function (_attributes) { - return this; - }; - // By default does nothing - NonRecordingSpan.prototype.addEvent = function (_name, _attributes) { - return this; - }; - // By default does nothing - NonRecordingSpan.prototype.setStatus = function (_status) { - return this; - }; - // By default does nothing - NonRecordingSpan.prototype.updateName = function (_name) { - return this; - }; - // By default does nothing - NonRecordingSpan.prototype.end = function (_endTime) { }; - // isRecording always returns false for NonRecordingSpan. - NonRecordingSpan.prototype.isRecording = function () { - return false; - }; - // By default does nothing - NonRecordingSpan.prototype.recordException = function (_exception, _time) { }; - return NonRecordingSpan; -}()); -exports.NonRecordingSpan = NonRecordingSpan; -//# sourceMappingURL=NonRecordingSpan.js.map + if (this._currentStream && this._currentStream.dataSize) { + this.dataSize += this._currentStream.dataSize; + } +}; -/***/ }), +CombinedStream.prototype._emitError = function(err) { + this._reset(); + this.emit('error', err); +}; -/***/ 7606: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { -"use strict"; +/***/ }), -/* - * Copyright The OpenTelemetry Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.NoopTracer = void 0; -var context_1 = __nccwpck_require__(7171); -var context_utils_1 = __nccwpck_require__(3326); -var NonRecordingSpan_1 = __nccwpck_require__(1462); -var spancontext_utils_1 = __nccwpck_require__(9745); -var context = context_1.ContextAPI.getInstance(); -/** - * No-op implementations of {@link Tracer}. - */ -var NoopTracer = /** @class */ (function () { - function NoopTracer() { +/***/ 6891: +/***/ ((module) => { + +module.exports = function (xs, fn) { + var res = []; + for (var i = 0; i < xs.length; i++) { + var x = fn(xs[i], i); + if (isArray(x)) res.push.apply(res, x); + else res.push(x); } - // startSpan starts a noop span. - NoopTracer.prototype.startSpan = function (name, options, context) { - var root = Boolean(options === null || options === void 0 ? void 0 : options.root); - if (root) { - return new NonRecordingSpan_1.NonRecordingSpan(); - } - var parentFromContext = context && context_utils_1.getSpanContext(context); - if (isSpanContext(parentFromContext) && - spancontext_utils_1.isSpanContextValid(parentFromContext)) { - return new NonRecordingSpan_1.NonRecordingSpan(parentFromContext); - } - else { - return new NonRecordingSpan_1.NonRecordingSpan(); - } - }; - NoopTracer.prototype.startActiveSpan = function (name, arg2, arg3, arg4) { - var opts; - var ctx; - var fn; - if (arguments.length < 2) { - return; - } - else if (arguments.length === 2) { - fn = arg2; - } - else if (arguments.length === 3) { - opts = arg2; - fn = arg3; - } - else { - opts = arg2; - ctx = arg3; - fn = arg4; - } - var parentContext = ctx !== null && ctx !== void 0 ? ctx : context.active(); - var span = this.startSpan(name, opts, parentContext); - var contextWithSpanSet = context_utils_1.setSpan(parentContext, span); - return context.with(contextWithSpanSet, fn, undefined, span); - }; - return NoopTracer; -}()); -exports.NoopTracer = NoopTracer; -function isSpanContext(spanContext) { - return (typeof spanContext === 'object' && - typeof spanContext['spanId'] === 'string' && - typeof spanContext['traceId'] === 'string' && - typeof spanContext['traceFlags'] === 'number'); -} -//# sourceMappingURL=NoopTracer.js.map + return res; +}; + +var isArray = Array.isArray || function (xs) { + return Object.prototype.toString.call(xs) === '[object Array]'; +}; + /***/ }), -/***/ 3259: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { +/***/ 8611: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { -"use strict"; +var Stream = (__nccwpck_require__(2781).Stream); +var util = __nccwpck_require__(3837); -/* - * Copyright The OpenTelemetry Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.NoopTracerProvider = void 0; -var NoopTracer_1 = __nccwpck_require__(7606); -/** - * An implementation of the {@link TracerProvider} which returns an impotent - * Tracer for all calls to `getTracer`. - * - * All operations are no-op. - */ -var NoopTracerProvider = /** @class */ (function () { - function NoopTracerProvider() { - } - NoopTracerProvider.prototype.getTracer = function (_name, _version, _options) { - return new NoopTracer_1.NoopTracer(); - }; - return NoopTracerProvider; -}()); -exports.NoopTracerProvider = NoopTracerProvider; -//# sourceMappingURL=NoopTracerProvider.js.map +module.exports = DelayedStream; +function DelayedStream() { + this.source = null; + this.dataSize = 0; + this.maxDataSize = 1024 * 1024; + this.pauseStream = true; -/***/ }), + this._maxDataSizeExceeded = false; + this._released = false; + this._bufferedEvents = []; +} +util.inherits(DelayedStream, Stream); -/***/ 3503: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { +DelayedStream.create = function(source, options) { + var delayedStream = new this(); -"use strict"; + options = options || {}; + for (var option in options) { + delayedStream[option] = options[option]; + } -/* - * Copyright The OpenTelemetry Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.ProxyTracer = void 0; -var NoopTracer_1 = __nccwpck_require__(7606); -var NOOP_TRACER = new NoopTracer_1.NoopTracer(); -/** - * Proxy tracer provided by the proxy tracer provider - */ -var ProxyTracer = /** @class */ (function () { - function ProxyTracer(_provider, name, version, options) { - this._provider = _provider; - this.name = name; - this.version = version; - this.options = options; - } - ProxyTracer.prototype.startSpan = function (name, options, context) { - return this._getTracer().startSpan(name, options, context); - }; - ProxyTracer.prototype.startActiveSpan = function (_name, _options, _context, _fn) { - var tracer = this._getTracer(); - return Reflect.apply(tracer.startActiveSpan, tracer, arguments); - }; - /** - * Try to get a tracer from the proxy tracer provider. - * If the proxy tracer provider has no delegate, return a noop tracer. - */ - ProxyTracer.prototype._getTracer = function () { - if (this._delegate) { - return this._delegate; - } - var tracer = this._provider.getDelegateTracer(this.name, this.version, this.options); - if (!tracer) { - return NOOP_TRACER; - } - this._delegate = tracer; - return this._delegate; - }; - return ProxyTracer; -}()); -exports.ProxyTracer = ProxyTracer; -//# sourceMappingURL=ProxyTracer.js.map + delayedStream.source = source; -/***/ }), + var realEmit = source.emit; + source.emit = function() { + delayedStream._handleEmit(arguments); + return realEmit.apply(source, arguments); + }; -/***/ 2285: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + source.on('error', function() {}); + if (delayedStream.pauseStream) { + source.pause(); + } -"use strict"; + return delayedStream; +}; -/* - * Copyright The OpenTelemetry Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.ProxyTracerProvider = void 0; -var ProxyTracer_1 = __nccwpck_require__(3503); -var NoopTracerProvider_1 = __nccwpck_require__(3259); -var NOOP_TRACER_PROVIDER = new NoopTracerProvider_1.NoopTracerProvider(); -/** - * Tracer provider which provides {@link ProxyTracer}s. - * - * Before a delegate is set, tracers provided are NoOp. - * When a delegate is set, traces are provided from the delegate. - * When a delegate is set after tracers have already been provided, - * all tracers already provided will use the provided delegate implementation. - */ -var ProxyTracerProvider = /** @class */ (function () { - function ProxyTracerProvider() { - } - /** - * Get a {@link ProxyTracer} - */ - ProxyTracerProvider.prototype.getTracer = function (name, version, options) { - var _a; - return ((_a = this.getDelegateTracer(name, version, options)) !== null && _a !== void 0 ? _a : new ProxyTracer_1.ProxyTracer(this, name, version, options)); - }; - ProxyTracerProvider.prototype.getDelegate = function () { - var _a; - return (_a = this._delegate) !== null && _a !== void 0 ? _a : NOOP_TRACER_PROVIDER; - }; - /** - * Set the delegate tracer provider - */ - ProxyTracerProvider.prototype.setDelegate = function (delegate) { - this._delegate = delegate; - }; - ProxyTracerProvider.prototype.getDelegateTracer = function (name, version, options) { - var _a; - return (_a = this._delegate) === null || _a === void 0 ? void 0 : _a.getTracer(name, version, options); - }; - return ProxyTracerProvider; -}()); -exports.ProxyTracerProvider = ProxyTracerProvider; -//# sourceMappingURL=ProxyTracerProvider.js.map +Object.defineProperty(DelayedStream.prototype, 'readable', { + configurable: true, + enumerable: true, + get: function() { + return this.source.readable; + } +}); -/***/ }), +DelayedStream.prototype.setEncoding = function() { + return this.source.setEncoding.apply(this.source, arguments); +}; -/***/ 9671: -/***/ ((__unused_webpack_module, exports) => { +DelayedStream.prototype.resume = function() { + if (!this._released) { + this.release(); + } -"use strict"; + this.source.resume(); +}; -/* - * Copyright The OpenTelemetry Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -Object.defineProperty(exports, "__esModule", ({ value: true })); -//# sourceMappingURL=Sampler.js.map +DelayedStream.prototype.pause = function() { + this.source.pause(); +}; -/***/ }), +DelayedStream.prototype.release = function() { + this._released = true; -/***/ 3209: -/***/ ((__unused_webpack_module, exports) => { + this._bufferedEvents.forEach(function(args) { + this.emit.apply(this, args); + }.bind(this)); + this._bufferedEvents = []; +}; -"use strict"; +DelayedStream.prototype.pipe = function() { + var r = Stream.prototype.pipe.apply(this, arguments); + this.resume(); + return r; +}; -/* - * Copyright The OpenTelemetry Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.SamplingDecision = void 0; -/** - * @deprecated use the one declared in @opentelemetry/sdk-trace-base instead. - * A sampling decision that determines how a {@link Span} will be recorded - * and collected. - */ -var SamplingDecision; -(function (SamplingDecision) { - /** - * `Span.isRecording() === false`, span will not be recorded and all events - * and attributes will be dropped. - */ - SamplingDecision[SamplingDecision["NOT_RECORD"] = 0] = "NOT_RECORD"; - /** - * `Span.isRecording() === true`, but `Sampled` flag in {@link TraceFlags} - * MUST NOT be set. - */ - SamplingDecision[SamplingDecision["RECORD"] = 1] = "RECORD"; - /** - * `Span.isRecording() === true` AND `Sampled` flag in {@link TraceFlags} - * MUST be set. - */ - SamplingDecision[SamplingDecision["RECORD_AND_SAMPLED"] = 2] = "RECORD_AND_SAMPLED"; -})(SamplingDecision = exports.SamplingDecision || (exports.SamplingDecision = {})); -//# sourceMappingURL=SamplingResult.js.map +DelayedStream.prototype._handleEmit = function(args) { + if (this._released) { + this.emit.apply(this, args); + return; + } -/***/ }), + if (args[0] === 'data') { + this.dataSize += args[1].length; + this._checkIfMaxDataSizeExceeded(); + } -/***/ 955: -/***/ ((__unused_webpack_module, exports) => { + this._bufferedEvents.push(args); +}; -"use strict"; +DelayedStream.prototype._checkIfMaxDataSizeExceeded = function() { + if (this._maxDataSizeExceeded) { + return; + } + + if (this.dataSize <= this.maxDataSize) { + return; + } + + this._maxDataSizeExceeded = true; + var message = + 'DelayedStream#maxDataSize of ' + this.maxDataSize + ' bytes exceeded.' + this.emit('error', new Error(message)); +}; -/* - * Copyright The OpenTelemetry Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -Object.defineProperty(exports, "__esModule", ({ value: true })); -//# sourceMappingURL=SpanOptions.js.map /***/ }), -/***/ 7492: -/***/ ((__unused_webpack_module, exports) => { +/***/ 7129: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { "use strict"; -/* - * Copyright The OpenTelemetry Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -Object.defineProperty(exports, "__esModule", ({ value: true })); -//# sourceMappingURL=attributes.js.map -/***/ }), +// A linked list to keep track of recently-used-ness +const Yallist = __nccwpck_require__(665) -/***/ 3326: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { +const MAX = Symbol('max') +const LENGTH = Symbol('length') +const LENGTH_CALCULATOR = Symbol('lengthCalculator') +const ALLOW_STALE = Symbol('allowStale') +const MAX_AGE = Symbol('maxAge') +const DISPOSE = Symbol('dispose') +const NO_DISPOSE_ON_SET = Symbol('noDisposeOnSet') +const LRU_LIST = Symbol('lruList') +const CACHE = Symbol('cache') +const UPDATE_AGE_ON_GET = Symbol('updateAgeOnGet') -"use strict"; +const naiveLength = () => 1 -/* - * Copyright The OpenTelemetry Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.getSpanContext = exports.setSpanContext = exports.deleteSpan = exports.setSpan = exports.getActiveSpan = exports.getSpan = void 0; -var context_1 = __nccwpck_require__(8242); -var NonRecordingSpan_1 = __nccwpck_require__(1462); -var context_2 = __nccwpck_require__(7171); -/** - * span key - */ -var SPAN_KEY = context_1.createContextKey('OpenTelemetry Context Key SPAN'); -/** - * Return the span if one exists - * - * @param context context to get span from - */ -function getSpan(context) { - return context.getValue(SPAN_KEY) || undefined; -} -exports.getSpan = getSpan; -/** - * Gets the span from the current context, if one exists. - */ -function getActiveSpan() { - return getSpan(context_2.ContextAPI.getInstance().active()); -} -exports.getActiveSpan = getActiveSpan; -/** - * Set the span on a context - * - * @param context context to use as parent - * @param span span to set active - */ -function setSpan(context, span) { - return context.setValue(SPAN_KEY, span); -} -exports.setSpan = setSpan; -/** - * Remove current span stored in the context - * - * @param context context to delete span from - */ -function deleteSpan(context) { - return context.deleteValue(SPAN_KEY); -} -exports.deleteSpan = deleteSpan; -/** - * Wrap span context in a NoopSpan and set as span in a new - * context - * - * @param context context to set active span on - * @param spanContext span context to be wrapped - */ -function setSpanContext(context, spanContext) { - return setSpan(context, new NonRecordingSpan_1.NonRecordingSpan(spanContext)); -} -exports.setSpanContext = setSpanContext; -/** - * Get the span context of the span if it exists. - * - * @param context context to get values from - */ -function getSpanContext(context) { - var _a; - return (_a = getSpan(context)) === null || _a === void 0 ? void 0 : _a.spanContext(); -} -exports.getSpanContext = getSpanContext; -//# sourceMappingURL=context-utils.js.map +// lruList is a yallist where the head is the youngest +// item, and the tail is the oldest. the list contains the Hit +// objects as the entries. +// Each Hit object has a reference to its Yallist.Node. This +// never changes. +// +// cache is a Map (or PseudoMap) that matches the keys to +// the Yallist.Node object. +class LRUCache { + constructor (options) { + if (typeof options === 'number') + options = { max: options } -/***/ }), + if (!options) + options = {} -/***/ 2110: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + if (options.max && (typeof options.max !== 'number' || options.max < 0)) + throw new TypeError('max must be a non-negative number') + // Kind of weird to have a default max of Infinity, but oh well. + const max = this[MAX] = options.max || Infinity -"use strict"; + const lc = options.length || naiveLength + this[LENGTH_CALCULATOR] = (typeof lc !== 'function') ? naiveLength : lc + this[ALLOW_STALE] = options.stale || false + if (options.maxAge && typeof options.maxAge !== 'number') + throw new TypeError('maxAge must be a number') + this[MAX_AGE] = options.maxAge || 0 + this[DISPOSE] = options.dispose + this[NO_DISPOSE_ON_SET] = options.noDisposeOnSet || false + this[UPDATE_AGE_ON_GET] = options.updateAgeOnGet || false + this.reset() + } -/* - * Copyright The OpenTelemetry Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.TraceStateImpl = void 0; -var tracestate_validators_1 = __nccwpck_require__(4864); -var MAX_TRACE_STATE_ITEMS = 32; -var MAX_TRACE_STATE_LEN = 512; -var LIST_MEMBERS_SEPARATOR = ','; -var LIST_MEMBER_KEY_VALUE_SPLITTER = '='; -/** - * TraceState must be a class and not a simple object type because of the spec - * requirement (https://www.w3.org/TR/trace-context/#tracestate-field). - * - * Here is the list of allowed mutations: - * - New key-value pair should be added into the beginning of the list - * - The value of any key can be updated. Modified keys MUST be moved to the - * beginning of the list. - */ -var TraceStateImpl = /** @class */ (function () { - function TraceStateImpl(rawTraceState) { - this._internalState = new Map(); - if (rawTraceState) - this._parse(rawTraceState); + // resize the cache when the max changes. + set max (mL) { + if (typeof mL !== 'number' || mL < 0) + throw new TypeError('max must be a non-negative number') + + this[MAX] = mL || Infinity + trim(this) + } + get max () { + return this[MAX] + } + + set allowStale (allowStale) { + this[ALLOW_STALE] = !!allowStale + } + get allowStale () { + return this[ALLOW_STALE] + } + + set maxAge (mA) { + if (typeof mA !== 'number') + throw new TypeError('maxAge must be a non-negative number') + + this[MAX_AGE] = mA + trim(this) + } + get maxAge () { + return this[MAX_AGE] + } + + // resize the cache when the lengthCalculator changes. + set lengthCalculator (lC) { + if (typeof lC !== 'function') + lC = naiveLength + + if (lC !== this[LENGTH_CALCULATOR]) { + this[LENGTH_CALCULATOR] = lC + this[LENGTH] = 0 + this[LRU_LIST].forEach(hit => { + hit.length = this[LENGTH_CALCULATOR](hit.value, hit.key) + this[LENGTH] += hit.length + }) } - TraceStateImpl.prototype.set = function (key, value) { - // TODO: Benchmark the different approaches(map vs list) and - // use the faster one. - var traceState = this._clone(); - if (traceState._internalState.has(key)) { - traceState._internalState.delete(key); - } - traceState._internalState.set(key, value); - return traceState; - }; - TraceStateImpl.prototype.unset = function (key) { - var traceState = this._clone(); - traceState._internalState.delete(key); - return traceState; - }; - TraceStateImpl.prototype.get = function (key) { - return this._internalState.get(key); - }; - TraceStateImpl.prototype.serialize = function () { - var _this = this; - return this._keys() - .reduce(function (agg, key) { - agg.push(key + LIST_MEMBER_KEY_VALUE_SPLITTER + _this.get(key)); - return agg; - }, []) - .join(LIST_MEMBERS_SEPARATOR); - }; - TraceStateImpl.prototype._parse = function (rawTraceState) { - if (rawTraceState.length > MAX_TRACE_STATE_LEN) - return; - this._internalState = rawTraceState - .split(LIST_MEMBERS_SEPARATOR) - .reverse() // Store in reverse so new keys (.set(...)) will be placed at the beginning - .reduce(function (agg, part) { - var listMember = part.trim(); // Optional Whitespace (OWS) handling - var i = listMember.indexOf(LIST_MEMBER_KEY_VALUE_SPLITTER); - if (i !== -1) { - var key = listMember.slice(0, i); - var value = listMember.slice(i + 1, part.length); - if (tracestate_validators_1.validateKey(key) && tracestate_validators_1.validateValue(value)) { - agg.set(key, value); - } - else { - // TODO: Consider to add warning log - } - } - return agg; - }, new Map()); - // Because of the reverse() requirement, trunc must be done after map is created - if (this._internalState.size > MAX_TRACE_STATE_ITEMS) { - this._internalState = new Map(Array.from(this._internalState.entries()) - .reverse() // Use reverse same as original tracestate parse chain - .slice(0, MAX_TRACE_STATE_ITEMS)); - } - }; - TraceStateImpl.prototype._keys = function () { - return Array.from(this._internalState.keys()).reverse(); - }; - TraceStateImpl.prototype._clone = function () { - var traceState = new TraceStateImpl(); - traceState._internalState = new Map(this._internalState); - return traceState; - }; - return TraceStateImpl; -}()); -exports.TraceStateImpl = TraceStateImpl; -//# sourceMappingURL=tracestate-impl.js.map + trim(this) + } + get lengthCalculator () { return this[LENGTH_CALCULATOR] } -/***/ }), + get length () { return this[LENGTH] } + get itemCount () { return this[LRU_LIST].length } -/***/ 4864: -/***/ ((__unused_webpack_module, exports) => { + rforEach (fn, thisp) { + thisp = thisp || this + for (let walker = this[LRU_LIST].tail; walker !== null;) { + const prev = walker.prev + forEachStep(this, fn, walker, thisp) + walker = prev + } + } -"use strict"; + forEach (fn, thisp) { + thisp = thisp || this + for (let walker = this[LRU_LIST].head; walker !== null;) { + const next = walker.next + forEachStep(this, fn, walker, thisp) + walker = next + } + } -/* - * Copyright The OpenTelemetry Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.validateValue = exports.validateKey = void 0; -var VALID_KEY_CHAR_RANGE = '[_0-9a-z-*/]'; -var VALID_KEY = "[a-z]" + VALID_KEY_CHAR_RANGE + "{0,255}"; -var VALID_VENDOR_KEY = "[a-z0-9]" + VALID_KEY_CHAR_RANGE + "{0,240}@[a-z]" + VALID_KEY_CHAR_RANGE + "{0,13}"; -var VALID_KEY_REGEX = new RegExp("^(?:" + VALID_KEY + "|" + VALID_VENDOR_KEY + ")$"); -var VALID_VALUE_BASE_REGEX = /^[ -~]{0,255}[!-~]$/; -var INVALID_VALUE_COMMA_EQUAL_REGEX = /,|=/; -/** - * Key is opaque string up to 256 characters printable. It MUST begin with a - * lowercase letter, and can only contain lowercase letters a-z, digits 0-9, - * underscores _, dashes -, asterisks *, and forward slashes /. - * For multi-tenant vendor scenarios, an at sign (@) can be used to prefix the - * vendor name. Vendors SHOULD set the tenant ID at the beginning of the key. - * see https://www.w3.org/TR/trace-context/#key - */ -function validateKey(key) { - return VALID_KEY_REGEX.test(key); -} -exports.validateKey = validateKey; -/** - * Value is opaque string up to 256 characters printable ASCII RFC0020 - * characters (i.e., the range 0x20 to 0x7E) except comma , and =. - */ -function validateValue(value) { - return (VALID_VALUE_BASE_REGEX.test(value) && - !INVALID_VALUE_COMMA_EQUAL_REGEX.test(value)); -} -exports.validateValue = validateValue; -//# sourceMappingURL=tracestate-validators.js.map + keys () { + return this[LRU_LIST].toArray().map(k => k.key) + } -/***/ }), + values () { + return this[LRU_LIST].toArray().map(k => k.value) + } -/***/ 2615: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + reset () { + if (this[DISPOSE] && + this[LRU_LIST] && + this[LRU_LIST].length) { + this[LRU_LIST].forEach(hit => this[DISPOSE](hit.key, hit.value)) + } -"use strict"; + this[CACHE] = new Map() // hash of items by key + this[LRU_LIST] = new Yallist() // list of items in order of use recency + this[LENGTH] = 0 // length of items in the list + } -/* - * Copyright The OpenTelemetry Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.createTraceState = void 0; -var tracestate_impl_1 = __nccwpck_require__(2110); -function createTraceState(rawTraceState) { - return new tracestate_impl_1.TraceStateImpl(rawTraceState); -} -exports.createTraceState = createTraceState; -//# sourceMappingURL=utils.js.map + dump () { + return this[LRU_LIST].map(hit => + isStale(this, hit) ? false : { + k: hit.key, + v: hit.value, + e: hit.now + (hit.maxAge || 0) + }).toArray().filter(h => h) + } -/***/ }), + dumpLru () { + return this[LRU_LIST] + } -/***/ 1760: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + set (key, value, maxAge) { + maxAge = maxAge || this[MAX_AGE] -"use strict"; + if (maxAge && typeof maxAge !== 'number') + throw new TypeError('maxAge must be a number') -/* - * Copyright The OpenTelemetry Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.INVALID_SPAN_CONTEXT = exports.INVALID_TRACEID = exports.INVALID_SPANID = void 0; -var trace_flags_1 = __nccwpck_require__(6905); -exports.INVALID_SPANID = '0000000000000000'; -exports.INVALID_TRACEID = '00000000000000000000000000000000'; -exports.INVALID_SPAN_CONTEXT = { - traceId: exports.INVALID_TRACEID, - spanId: exports.INVALID_SPANID, - traceFlags: trace_flags_1.TraceFlags.NONE, -}; -//# sourceMappingURL=invalid-span-constants.js.map + const now = maxAge ? Date.now() : 0 + const len = this[LENGTH_CALCULATOR](value, key) -/***/ }), + if (this[CACHE].has(key)) { + if (len > this[MAX]) { + del(this, this[CACHE].get(key)) + return false + } -/***/ 4023: -/***/ ((__unused_webpack_module, exports) => { + const node = this[CACHE].get(key) + const item = node.value -"use strict"; + // dispose of the old one before overwriting + // split out into 2 ifs for better coverage tracking + if (this[DISPOSE]) { + if (!this[NO_DISPOSE_ON_SET]) + this[DISPOSE](key, item.value) + } -/* - * Copyright The OpenTelemetry Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -Object.defineProperty(exports, "__esModule", ({ value: true })); -//# sourceMappingURL=link.js.map + item.now = now + item.maxAge = maxAge + item.value = value + this[LENGTH] += len - item.length + item.length = len + this.get(key) + trim(this) + return true + } -/***/ }), + const hit = new Entry(key, value, len, now, maxAge) -/***/ 4416: -/***/ ((__unused_webpack_module, exports) => { + // oversized objects fall out of cache automatically. + if (hit.length > this[MAX]) { + if (this[DISPOSE]) + this[DISPOSE](key, value) + + return false + } -"use strict"; + this[LENGTH] += hit.length + this[LRU_LIST].unshift(hit) + this[CACHE].set(key, this[LRU_LIST].head) + trim(this) + return true + } -/* - * Copyright The OpenTelemetry Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -Object.defineProperty(exports, "__esModule", ({ value: true })); -//# sourceMappingURL=span.js.map + has (key) { + if (!this[CACHE].has(key)) return false + const hit = this[CACHE].get(key).value + return !isStale(this, hit) + } -/***/ }), + get (key) { + return get(this, key, true) + } -/***/ 5769: -/***/ ((__unused_webpack_module, exports) => { + peek (key) { + return get(this, key, false) + } -"use strict"; + pop () { + const node = this[LRU_LIST].tail + if (!node) + return null -/* - * Copyright The OpenTelemetry Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -Object.defineProperty(exports, "__esModule", ({ value: true })); -//# sourceMappingURL=span_context.js.map + del(this, node) + return node.value + } -/***/ }), + del (key) { + del(this, this[CACHE].get(key)) + } -/***/ 1424: -/***/ ((__unused_webpack_module, exports) => { + load (arr) { + // reset the cache + this.reset() -"use strict"; + const now = Date.now() + // A previous serialized cache has the most recent items first + for (let l = arr.length - 1; l >= 0; l--) { + const hit = arr[l] + const expiresAt = hit.e || 0 + if (expiresAt === 0) + // the item was created without expiration in a non aged cache + this.set(hit.k, hit.v) + else { + const maxAge = expiresAt - now + // dont add already expired items + if (maxAge > 0) { + this.set(hit.k, hit.v, maxAge) + } + } + } + } -Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.SpanKind = void 0; -/* - * Copyright The OpenTelemetry Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -var SpanKind; -(function (SpanKind) { - /** Default value. Indicates that the span is used internally. */ - SpanKind[SpanKind["INTERNAL"] = 0] = "INTERNAL"; - /** - * Indicates that the span covers server-side handling of an RPC or other - * remote request. - */ - SpanKind[SpanKind["SERVER"] = 1] = "SERVER"; - /** - * Indicates that the span covers the client-side wrapper around an RPC or - * other remote request. - */ - SpanKind[SpanKind["CLIENT"] = 2] = "CLIENT"; - /** - * Indicates that the span describes producer sending a message to a - * broker. Unlike client and server, there is no direct critical path latency - * relationship between producer and consumer spans. - */ - SpanKind[SpanKind["PRODUCER"] = 3] = "PRODUCER"; - /** - * Indicates that the span describes consumer receiving a message from a - * broker. Unlike client and server, there is no direct critical path latency - * relationship between producer and consumer spans. - */ - SpanKind[SpanKind["CONSUMER"] = 4] = "CONSUMER"; -})(SpanKind = exports.SpanKind || (exports.SpanKind = {})); -//# sourceMappingURL=span_kind.js.map + prune () { + this[CACHE].forEach((value, key) => get(this, key, false)) + } +} -/***/ }), +const get = (self, key, doUse) => { + const node = self[CACHE].get(key) + if (node) { + const hit = node.value + if (isStale(self, hit)) { + del(self, node) + if (!self[ALLOW_STALE]) + return undefined + } else { + if (doUse) { + if (self[UPDATE_AGE_ON_GET]) + node.value.now = Date.now() + self[LRU_LIST].unshiftNode(node) + } + } + return hit.value + } +} -/***/ 9745: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { +const isStale = (self, hit) => { + if (!hit || (!hit.maxAge && !self[MAX_AGE])) + return false -"use strict"; + const diff = Date.now() - hit.now + return hit.maxAge ? diff > hit.maxAge + : self[MAX_AGE] && (diff > self[MAX_AGE]) +} -Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.wrapSpanContext = exports.isSpanContextValid = exports.isValidSpanId = exports.isValidTraceId = void 0; -/* - * Copyright The OpenTelemetry Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -var invalid_span_constants_1 = __nccwpck_require__(1760); -var NonRecordingSpan_1 = __nccwpck_require__(1462); -var VALID_TRACEID_REGEX = /^([0-9a-f]{32})$/i; -var VALID_SPANID_REGEX = /^[0-9a-f]{16}$/i; -function isValidTraceId(traceId) { - return VALID_TRACEID_REGEX.test(traceId) && traceId !== invalid_span_constants_1.INVALID_TRACEID; +const trim = self => { + if (self[LENGTH] > self[MAX]) { + for (let walker = self[LRU_LIST].tail; + self[LENGTH] > self[MAX] && walker !== null;) { + // We know that we're about to delete this one, and also + // what the next least recently used key will be, so just + // go ahead and set it now. + const prev = walker.prev + del(self, walker) + walker = prev + } + } } -exports.isValidTraceId = isValidTraceId; -function isValidSpanId(spanId) { - return VALID_SPANID_REGEX.test(spanId) && spanId !== invalid_span_constants_1.INVALID_SPANID; + +const del = (self, node) => { + if (node) { + const hit = node.value + if (self[DISPOSE]) + self[DISPOSE](hit.key, hit.value) + + self[LENGTH] -= hit.length + self[CACHE].delete(hit.key) + self[LRU_LIST].removeNode(node) + } } -exports.isValidSpanId = isValidSpanId; -/** - * Returns true if this {@link SpanContext} is valid. - * @return true if this {@link SpanContext} is valid. - */ -function isSpanContextValid(spanContext) { - return (isValidTraceId(spanContext.traceId) && isValidSpanId(spanContext.spanId)); + +class Entry { + constructor (key, value, length, now, maxAge) { + this.key = key + this.value = value + this.length = length + this.now = now + this.maxAge = maxAge || 0 + } } -exports.isSpanContextValid = isSpanContextValid; -/** - * Wrap the given {@link SpanContext} in a new non-recording {@link Span} - * - * @param spanContext span context to be wrapped - * @returns a new non-recording {@link Span} with the provided context - */ -function wrapSpanContext(spanContext) { - return new NonRecordingSpan_1.NonRecordingSpan(spanContext); + +const forEachStep = (self, fn, node, thisp) => { + let hit = node.value + if (isStale(self, hit)) { + del(self, node) + if (!self[ALLOW_STALE]) + hit = undefined + } + if (hit) + fn.call(thisp, hit.value, hit.key, self) } -exports.wrapSpanContext = wrapSpanContext; -//# sourceMappingURL=spancontext-utils.js.map + +module.exports = LRUCache + /***/ }), -/***/ 8845: -/***/ ((__unused_webpack_module, exports) => { +/***/ 7426: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { -"use strict"; +/*! + * mime-db + * Copyright(c) 2014 Jonathan Ong + * Copyright(c) 2015-2022 Douglas Christopher Wilson + * MIT Licensed + */ -Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.SpanStatusCode = void 0; /** - * An enumeration of status codes. + * Module exports. */ -var SpanStatusCode; -(function (SpanStatusCode) { - /** - * The default status. - */ - SpanStatusCode[SpanStatusCode["UNSET"] = 0] = "UNSET"; - /** - * The operation has been validated by an Application developer or - * Operator to have completed successfully. - */ - SpanStatusCode[SpanStatusCode["OK"] = 1] = "OK"; - /** - * The operation contains an error. - */ - SpanStatusCode[SpanStatusCode["ERROR"] = 2] = "ERROR"; -})(SpanStatusCode = exports.SpanStatusCode || (exports.SpanStatusCode = {})); -//# sourceMappingURL=status.js.map - -/***/ }), -/***/ 6905: -/***/ ((__unused_webpack_module, exports) => { - -"use strict"; +module.exports = __nccwpck_require__(3765) -Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.TraceFlags = void 0; -/* - * Copyright The OpenTelemetry Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -var TraceFlags; -(function (TraceFlags) { - /** Represents no flag set. */ - TraceFlags[TraceFlags["NONE"] = 0] = "NONE"; - /** Bit to represent whether trace is sampled in trace flags. */ - TraceFlags[TraceFlags["SAMPLED"] = 1] = "SAMPLED"; -})(TraceFlags = exports.TraceFlags || (exports.TraceFlags = {})); -//# sourceMappingURL=trace_flags.js.map /***/ }), -/***/ 8384: -/***/ ((__unused_webpack_module, exports) => { +/***/ 3583: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { "use strict"; - -/* - * Copyright The OpenTelemetry Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. +/*! + * mime-types + * Copyright(c) 2014 Jonathan Ong + * Copyright(c) 2015 Douglas Christopher Wilson + * MIT Licensed */ -Object.defineProperty(exports, "__esModule", ({ value: true })); -//# sourceMappingURL=trace_state.js.map -/***/ }), - -/***/ 3168: -/***/ ((__unused_webpack_module, exports) => { -"use strict"; -/* - * Copyright The OpenTelemetry Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. +/** + * Module dependencies. + * @private */ -Object.defineProperty(exports, "__esModule", ({ value: true })); -//# sourceMappingURL=tracer.js.map -/***/ }), +var db = __nccwpck_require__(7426) +var extname = (__nccwpck_require__(1017).extname) -/***/ 1823: -/***/ ((__unused_webpack_module, exports) => { +/** + * Module variables. + * @private + */ -"use strict"; +var EXTRACT_TYPE_REGEXP = /^\s*([^;\s]*)(?:;|\s|$)/ +var TEXT_TYPE_REGEXP = /^text\//i -/* - * Copyright The OpenTelemetry Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. +/** + * Module exports. + * @public */ -Object.defineProperty(exports, "__esModule", ({ value: true })); -//# sourceMappingURL=tracer_options.js.map - -/***/ }), -/***/ 891: -/***/ ((__unused_webpack_module, exports) => { +exports.charset = charset +exports.charsets = { lookup: charset } +exports.contentType = contentType +exports.extension = extension +exports.extensions = Object.create(null) +exports.lookup = lookup +exports.types = Object.create(null) -"use strict"; +// Populate the extensions/types maps +populateMaps(exports.extensions, exports.types) -/* - * Copyright The OpenTelemetry Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 +/** + * Get the default charset for a MIME type. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * @param {string} type + * @return {boolean|string} */ -Object.defineProperty(exports, "__esModule", ({ value: true })); -//# sourceMappingURL=tracer_provider.js.map -/***/ }), +function charset (type) { + if (!type || typeof type !== 'string') { + return false + } -/***/ 8996: -/***/ ((__unused_webpack_module, exports) => { + // TODO: use media-typer + var match = EXTRACT_TYPE_REGEXP.exec(type) + var mime = match && db[match[1].toLowerCase()] -"use strict"; + if (mime && mime.charset) { + return mime.charset + } -/* - * Copyright The OpenTelemetry Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.VERSION = void 0; -// this is autogenerated file, see scripts/version-update.js -exports.VERSION = '1.2.0'; -//# sourceMappingURL=version.js.map + // default text/* to utf-8 + if (match && TEXT_TYPE_REGEXP.test(match[1])) { + return 'UTF-8' + } -/***/ }), + return false +} -/***/ 4812: -/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { +/** + * Create a full Content-Type header given a MIME type or extension. + * + * @param {string} str + * @return {boolean|string} + */ -module.exports = -{ - parallel : __nccwpck_require__(8210), - serial : __nccwpck_require__(445), - serialOrdered : __nccwpck_require__(3578) -}; +function contentType (str) { + // TODO: should this even be in this module? + if (!str || typeof str !== 'string') { + return false + } + var mime = str.indexOf('/') === -1 + ? exports.lookup(str) + : str -/***/ }), + if (!mime) { + return false + } -/***/ 1700: -/***/ ((module) => { + // TODO: use content-type or other module + if (mime.indexOf('charset') === -1) { + var charset = exports.charset(mime) + if (charset) mime += '; charset=' + charset.toLowerCase() + } -// API -module.exports = abort; + return mime +} /** - * Aborts leftover active jobs + * Get the default extension for a MIME type. * - * @param {object} state - current state object + * @param {string} type + * @return {boolean|string} */ -function abort(state) -{ - Object.keys(state.jobs).forEach(clean.bind(state)); - // reset leftover jobs - state.jobs = {}; +function extension (type) { + if (!type || typeof type !== 'string') { + return false + } + + // TODO: use media-typer + var match = EXTRACT_TYPE_REGEXP.exec(type) + + // get extensions + var exts = match && exports.extensions[match[1].toLowerCase()] + + if (!exts || !exts.length) { + return false + } + + return exts[0] } /** - * Cleans up leftover job by invoking abort function for the provided job id + * Lookup the MIME type for a file path/extension. * - * @this state - * @param {string|number} key - job id to abort + * @param {string} path + * @return {boolean|string} */ -function clean(key) -{ - if (typeof this.jobs[key] == 'function') - { - this.jobs[key](); - } -} - -/***/ }), +function lookup (path) { + if (!path || typeof path !== 'string') { + return false + } -/***/ 2794: -/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + // get the extension ("ext" or ".ext" or full path) + var extension = extname('x.' + path) + .toLowerCase() + .substr(1) -var defer = __nccwpck_require__(5295); + if (!extension) { + return false + } -// API -module.exports = async; + return exports.types[extension] || false +} /** - * Runs provided callback asynchronously - * even if callback itself is not - * - * @param {function} callback - callback to invoke - * @returns {function} - augmented callback + * Populate the extensions and types maps. + * @private */ -function async(callback) -{ - var isAsync = false; - // check if async happened - defer(function() { isAsync = true; }); +function populateMaps (extensions, types) { + // source preference (least -> most) + var preference = ['nginx', 'apache', undefined, 'iana'] - return function async_callback(err, result) - { - if (isAsync) - { - callback(err, result); - } - else - { - defer(function nextTick_callback() - { - callback(err, result); - }); - } - }; -} + Object.keys(db).forEach(function forEachMimeType (type) { + var mime = db[type] + var exts = mime.extensions + if (!exts || !exts.length) { + return + } -/***/ }), + // mime -> extensions + extensions[type] = exts -/***/ 5295: -/***/ ((module) => { + // extension -> mime + for (var i = 0; i < exts.length; i++) { + var extension = exts[i] -module.exports = defer; + if (types[extension]) { + var from = preference.indexOf(db[types[extension]].source) + var to = preference.indexOf(mime.source) -/** - * Runs provided function on next iteration of the event loop - * - * @param {function} fn - function to run - */ -function defer(fn) -{ - var nextTick = typeof setImmediate == 'function' - ? setImmediate - : ( - typeof process == 'object' && typeof process.nextTick == 'function' - ? process.nextTick - : null - ); + if (types[extension] !== 'application/octet-stream' && + (from > to || (from === to && types[extension].substr(0, 12) === 'application/'))) { + // skip the remapping + continue + } + } - if (nextTick) - { - nextTick(fn); - } - else - { - setTimeout(fn, 0); - } + // set the extension -> mime + types[extension] = type + } + }) } /***/ }), -/***/ 9023: +/***/ 3973: /***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { -var async = __nccwpck_require__(2794) - , abort = __nccwpck_require__(1700) - ; +module.exports = minimatch +minimatch.Minimatch = Minimatch -// API -module.exports = iterate; +var path = (function () { try { return __nccwpck_require__(1017) } catch (e) {}}()) || { + sep: '/' +} +minimatch.sep = path.sep -/** - * Iterates over each job object - * - * @param {array|object} list - array or object (named list) to iterate over - * @param {function} iterator - iterator to run - * @param {object} state - current job status - * @param {function} callback - invoked when all elements processed - */ -function iterate(list, iterator, state, callback) -{ - // store current index - var key = state['keyedList'] ? state['keyedList'][state.index] : state.index; +var GLOBSTAR = minimatch.GLOBSTAR = Minimatch.GLOBSTAR = {} +var expand = __nccwpck_require__(3717) - state.jobs[key] = runJob(iterator, key, list[key], function(error, output) - { - // don't repeat yourself - // skip secondary callbacks - if (!(key in state.jobs)) - { - return; - } +var plTypes = { + '!': { open: '(?:(?!(?:', close: '))[^/]*?)'}, + '?': { open: '(?:', close: ')?' }, + '+': { open: '(?:', close: ')+' }, + '*': { open: '(?:', close: ')*' }, + '@': { open: '(?:', close: ')' } +} - // clean up jobs - delete state.jobs[key]; +// any single thing other than / +// don't need to escape / when using new RegExp() +var qmark = '[^/]' - if (error) - { - // don't process rest of the results - // stop still active jobs - // and reset the list - abort(state); - } - else - { - state.results[key] = output; - } +// * => any number of characters +var star = qmark + '*?' - // return salvaged results - callback(error, state.results); - }); -} +// ** when dots are allowed. Anything goes, except .. and . +// not (^ or / followed by one or two dots followed by $ or /), +// followed by anything, any number of times. +var twoStarDot = '(?:(?!(?:\\\/|^)(?:\\.{1,2})($|\\\/)).)*?' -/** - * Runs iterator over provided job element - * - * @param {function} iterator - iterator to invoke - * @param {string|number} key - key/index of the element in the list of jobs - * @param {mixed} item - job description - * @param {function} callback - invoked after iterator is done with the job - * @returns {function|mixed} - job abort function or something else - */ -function runJob(iterator, key, item, callback) -{ - var aborter; +// not a ^ or / followed by a dot, +// followed by anything, any number of times. +var twoStarNoDot = '(?:(?!(?:\\\/|^)\\.).)*?' - // allow shortcut if iterator expects only two arguments - if (iterator.length == 2) - { - aborter = iterator(item, async(callback)); - } - // otherwise go with full three arguments - else - { - aborter = iterator(item, key, async(callback)); - } +// characters that need to be escaped in RegExp. +var reSpecials = charSet('().*{}+?[]^$\\!') - return aborter; +// "abc" -> { a:true, b:true, c:true } +function charSet (s) { + return s.split('').reduce(function (set, c) { + set[c] = true + return set + }, {}) } +// normalizes slashes. +var slashSplit = /\/+/ -/***/ }), - -/***/ 2474: -/***/ ((module) => { - -// API -module.exports = state; +minimatch.filter = filter +function filter (pattern, options) { + options = options || {} + return function (p, i, list) { + return minimatch(p, pattern, options) + } +} -/** - * Creates initial state object - * for iteration over list - * - * @param {array|object} list - list to iterate over - * @param {function|null} sortMethod - function to use for keys sort, - * or `null` to keep them as is - * @returns {object} - initial state object - */ -function state(list, sortMethod) -{ - var isNamedList = !Array.isArray(list) - , initState = - { - index : 0, - keyedList: isNamedList || sortMethod ? Object.keys(list) : null, - jobs : {}, - results : isNamedList ? {} : [], - size : isNamedList ? Object.keys(list).length : list.length - } - ; +function ext (a, b) { + b = b || {} + var t = {} + Object.keys(a).forEach(function (k) { + t[k] = a[k] + }) + Object.keys(b).forEach(function (k) { + t[k] = b[k] + }) + return t +} - if (sortMethod) - { - // sort array keys based on it's values - // sort object's keys just on own merit - initState.keyedList.sort(isNamedList ? sortMethod : function(a, b) - { - return sortMethod(list[a], list[b]); - }); +minimatch.defaults = function (def) { + if (!def || typeof def !== 'object' || !Object.keys(def).length) { + return minimatch } - return initState; -} + var orig = minimatch + var m = function minimatch (p, pattern, options) { + return orig(p, pattern, ext(def, options)) + } -/***/ }), + m.Minimatch = function Minimatch (pattern, options) { + return new orig.Minimatch(pattern, ext(def, options)) + } + m.Minimatch.defaults = function defaults (options) { + return orig.defaults(ext(def, options)).Minimatch + } -/***/ 7942: -/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + m.filter = function filter (pattern, options) { + return orig.filter(pattern, ext(def, options)) + } -var abort = __nccwpck_require__(1700) - , async = __nccwpck_require__(2794) - ; + m.defaults = function defaults (options) { + return orig.defaults(ext(def, options)) + } -// API -module.exports = terminator; + m.makeRe = function makeRe (pattern, options) { + return orig.makeRe(pattern, ext(def, options)) + } -/** - * Terminates jobs in the attached state context - * - * @this AsyncKitState# - * @param {function} callback - final callback to invoke after termination - */ -function terminator(callback) -{ - if (!Object.keys(this.jobs).length) - { - return; + m.braceExpand = function braceExpand (pattern, options) { + return orig.braceExpand(pattern, ext(def, options)) } - // fast forward iteration index - this.index = this.size; + m.match = function (list, pattern, options) { + return orig.match(list, pattern, ext(def, options)) + } - // abort jobs - abort(this); + return m +} - // send back results we have so far - async(callback)(null, this.results); +Minimatch.defaults = function (def) { + return minimatch.defaults(def).Minimatch } +function minimatch (p, pattern, options) { + assertValidPattern(pattern) -/***/ }), + if (!options) options = {} -/***/ 8210: -/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + // shortcut: comments match nothing. + if (!options.nocomment && pattern.charAt(0) === '#') { + return false + } -var iterate = __nccwpck_require__(9023) - , initState = __nccwpck_require__(2474) - , terminator = __nccwpck_require__(7942) - ; + return new Minimatch(pattern, options).match(p) +} -// Public API -module.exports = parallel; +function Minimatch (pattern, options) { + if (!(this instanceof Minimatch)) { + return new Minimatch(pattern, options) + } -/** - * Runs iterator over provided array elements in parallel - * - * @param {array|object} list - array or object (named list) to iterate over - * @param {function} iterator - iterator to run - * @param {function} callback - invoked when all elements processed - * @returns {function} - jobs terminator - */ -function parallel(list, iterator, callback) -{ - var state = initState(list); + assertValidPattern(pattern) - while (state.index < (state['keyedList'] || list).length) - { - iterate(list, iterator, state, function(error, result) - { - if (error) - { - callback(error, result); - return; - } + if (!options) options = {} - // looks like it's the last one - if (Object.keys(state.jobs).length === 0) - { - callback(null, state.results); - return; - } - }); + pattern = pattern.trim() - state.index++; + // windows support: need to use /, not \ + if (!options.allowWindowsEscape && path.sep !== '/') { + pattern = pattern.split(path.sep).join('/') } - return terminator.bind(state, callback); -} + this.options = options + this.set = [] + this.pattern = pattern + this.regexp = null + this.negate = false + this.comment = false + this.empty = false + this.partial = !!options.partial + // make the set of regexps etc. + this.make() +} -/***/ }), +Minimatch.prototype.debug = function () {} -/***/ 445: -/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { +Minimatch.prototype.make = make +function make () { + var pattern = this.pattern + var options = this.options -var serialOrdered = __nccwpck_require__(3578); + // empty patterns and comments match nothing. + if (!options.nocomment && pattern.charAt(0) === '#') { + this.comment = true + return + } + if (!pattern) { + this.empty = true + return + } -// Public API -module.exports = serial; + // step 1: figure out negation, etc. + this.parseNegate() -/** - * Runs iterator over provided array elements in series - * - * @param {array|object} list - array or object (named list) to iterate over - * @param {function} iterator - iterator to run - * @param {function} callback - invoked when all elements processed - * @returns {function} - jobs terminator - */ -function serial(list, iterator, callback) -{ - return serialOrdered(list, iterator, null, callback); -} + // step 2: expand braces + var set = this.globSet = this.braceExpand() + if (options.debug) this.debug = function debug() { console.error.apply(console, arguments) } -/***/ }), + this.debug(this.pattern, set) -/***/ 3578: -/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + // step 3: now we have a set, so turn each one into a series of path-portion + // matching patterns. + // These will be regexps, except in the case of "**", which is + // set to the GLOBSTAR object for globstar behavior, + // and will not contain any / characters + set = this.globParts = set.map(function (s) { + return s.split(slashSplit) + }) -var iterate = __nccwpck_require__(9023) - , initState = __nccwpck_require__(2474) - , terminator = __nccwpck_require__(7942) - ; + this.debug(this.pattern, set) -// Public API -module.exports = serialOrdered; -// sorting helpers -module.exports.ascending = ascending; -module.exports.descending = descending; + // glob --> regexps + set = set.map(function (s, si, set) { + return s.map(this.parse, this) + }, this) -/** - * Runs iterator over provided sorted array elements in series - * - * @param {array|object} list - array or object (named list) to iterate over - * @param {function} iterator - iterator to run - * @param {function} sortMethod - custom sort function - * @param {function} callback - invoked when all elements processed - * @returns {function} - jobs terminator - */ -function serialOrdered(list, iterator, sortMethod, callback) -{ - var state = initState(list, sortMethod); + this.debug(this.pattern, set) - iterate(list, iterator, state, function iteratorHandler(error, result) - { - if (error) - { - callback(error, result); - return; - } + // filter out everything that didn't compile properly. + set = set.filter(function (s) { + return s.indexOf(false) === -1 + }) - state.index++; + this.debug(this.pattern, set) - // are we there yet? - if (state.index < (state['keyedList'] || list).length) - { - iterate(list, iterator, state, iteratorHandler); - return; - } + this.set = set +} - // done here - callback(null, state.results); - }); +Minimatch.prototype.parseNegate = parseNegate +function parseNegate () { + var pattern = this.pattern + var negate = false + var options = this.options + var negateOffset = 0 - return terminator.bind(state, callback); -} + if (options.nonegate) return -/* - * -- Sort methods - */ + for (var i = 0, l = pattern.length + ; i < l && pattern.charAt(i) === '!' + ; i++) { + negate = !negate + negateOffset++ + } -/** - * sort helper to sort array elements in ascending order - * - * @param {mixed} a - an item to compare - * @param {mixed} b - an item to compare - * @returns {number} - comparison result - */ -function ascending(a, b) -{ - return a < b ? -1 : a > b ? 1 : 0; + if (negateOffset) this.pattern = pattern.substr(negateOffset) + this.negate = negate } -/** - * sort helper to sort array elements in descending order - * - * @param {mixed} a - an item to compare - * @param {mixed} b - an item to compare - * @returns {number} - comparison result - */ -function descending(a, b) -{ - return -1 * ascending(a, b); +// Brace expansion: +// a{b,c}d -> abd acd +// a{b,}c -> abc ac +// a{0..3}d -> a0d a1d a2d a3d +// a{b,c{d,e}f}g -> abg acdfg acefg +// a{b,c}d{e,f}g -> abdeg acdeg abdeg abdfg +// +// Invalid sets are not expanded. +// a{2..}b -> a{2..}b +// a{b}c -> a{b}c +minimatch.braceExpand = function (pattern, options) { + return braceExpand(pattern, options) } +Minimatch.prototype.braceExpand = braceExpand -/***/ }), - -/***/ 9417: -/***/ ((module) => { +function braceExpand (pattern, options) { + if (!options) { + if (this instanceof Minimatch) { + options = this.options + } else { + options = {} + } + } -"use strict"; + pattern = typeof pattern === 'undefined' + ? this.pattern : pattern -module.exports = balanced; -function balanced(a, b, str) { - if (a instanceof RegExp) a = maybeMatch(a, str); - if (b instanceof RegExp) b = maybeMatch(b, str); + assertValidPattern(pattern) - var r = range(a, b, str); + // Thanks to Yeting Li for + // improving this regexp to avoid a ReDOS vulnerability. + if (options.nobrace || !/\{(?:(?!\{).)*\}/.test(pattern)) { + // shortcut. no need to expand. + return [pattern] + } - return r && { - start: r[0], - end: r[1], - pre: str.slice(0, r[0]), - body: str.slice(r[0] + a.length, r[1]), - post: str.slice(r[1] + b.length) - }; + return expand(pattern) } -function maybeMatch(reg, str) { - var m = str.match(reg); - return m ? m[0] : null; -} +var MAX_PATTERN_LENGTH = 1024 * 64 +var assertValidPattern = function (pattern) { + if (typeof pattern !== 'string') { + throw new TypeError('invalid pattern') + } -balanced.range = range; -function range(a, b, str) { - var begs, beg, left, right, result; - var ai = str.indexOf(a); - var bi = str.indexOf(b, ai + 1); - var i = ai; + if (pattern.length > MAX_PATTERN_LENGTH) { + throw new TypeError('pattern is too long') + } +} - if (ai >= 0 && bi > 0) { - if(a===b) { - return [ai, bi]; - } - begs = []; - left = str.length; +// parse a component of the expanded set. +// At this point, no pattern may contain "/" in it +// so we're going to return a 2d array, where each entry is the full +// pattern, split on '/', and then turned into a regular expression. +// A regexp is made at the end which joins each array with an +// escaped /, and another full one which joins each regexp with |. +// +// Following the lead of Bash 4.1, note that "**" only has special meaning +// when it is the *only* thing in a path portion. Otherwise, any series +// of * is equivalent to a single *. Globstar behavior is enabled by +// default, and can be disabled by setting options.noglobstar. +Minimatch.prototype.parse = parse +var SUBPARSE = {} +function parse (pattern, isSub) { + assertValidPattern(pattern) - while (i >= 0 && !result) { - if (i == ai) { - begs.push(i); - ai = str.indexOf(a, i + 1); - } else if (begs.length == 1) { - result = [ begs.pop(), bi ]; - } else { - beg = begs.pop(); - if (beg < left) { - left = beg; - right = bi; - } + var options = this.options - bi = str.indexOf(b, i + 1); - } + // shortcuts + if (pattern === '**') { + if (!options.noglobstar) + return GLOBSTAR + else + pattern = '*' + } + if (pattern === '') return '' - i = ai < bi && ai >= 0 ? ai : bi; - } + var re = '' + var hasMagic = !!options.nocase + var escaping = false + // ? => one single character + var patternListStack = [] + var negativeLists = [] + var stateChar + var inClass = false + var reClassStart = -1 + var classStart = -1 + // . and .. never match anything that doesn't start with ., + // even when options.dot is set. + var patternStart = pattern.charAt(0) === '.' ? '' // anything + // not (start or / followed by . or .. followed by / or end) + : options.dot ? '(?!(?:^|\\\/)\\.{1,2}(?:$|\\\/))' + : '(?!\\.)' + var self = this - if (begs.length) { - result = [ left, right ]; + function clearStateChar () { + if (stateChar) { + // we had some state-tracking character + // that wasn't consumed by this pass. + switch (stateChar) { + case '*': + re += star + hasMagic = true + break + case '?': + re += qmark + hasMagic = true + break + default: + re += '\\' + stateChar + break + } + self.debug('clearStateChar %j %j', stateChar, re) + stateChar = false } } - return result; -} - - -/***/ }), + for (var i = 0, len = pattern.length, c + ; (i < len) && (c = pattern.charAt(i)) + ; i++) { + this.debug('%s\t%s %s %j', pattern, i, re, c) -/***/ 3717: -/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + // skip over any that are escaped. + if (escaping && reSpecials[c]) { + re += '\\' + c + escaping = false + continue + } -var concatMap = __nccwpck_require__(6891); -var balanced = __nccwpck_require__(9417); + switch (c) { + /* istanbul ignore next */ + case '/': { + // completely not allowed, even escaped. + // Should already be path-split by now. + return false + } -module.exports = expandTop; + case '\\': + clearStateChar() + escaping = true + continue -var escSlash = '\0SLASH'+Math.random()+'\0'; -var escOpen = '\0OPEN'+Math.random()+'\0'; -var escClose = '\0CLOSE'+Math.random()+'\0'; -var escComma = '\0COMMA'+Math.random()+'\0'; -var escPeriod = '\0PERIOD'+Math.random()+'\0'; + // the various stateChar values + // for the "extglob" stuff. + case '?': + case '*': + case '+': + case '@': + case '!': + this.debug('%s\t%s %s %j <-- stateChar', pattern, i, re, c) -function numeric(str) { - return parseInt(str, 10) == str - ? parseInt(str, 10) - : str.charCodeAt(0); -} + // all of those are literals inside a class, except that + // the glob [!a] means [^a] in regexp + if (inClass) { + this.debug(' in class') + if (c === '!' && i === classStart + 1) c = '^' + re += c + continue + } -function escapeBraces(str) { - return str.split('\\\\').join(escSlash) - .split('\\{').join(escOpen) - .split('\\}').join(escClose) - .split('\\,').join(escComma) - .split('\\.').join(escPeriod); -} + // if we already have a stateChar, then it means + // that there was something like ** or +? in there. + // Handle the stateChar, then proceed with this one. + self.debug('call clearStateChar %j', stateChar) + clearStateChar() + stateChar = c + // if extglob is disabled, then +(asdf|foo) isn't a thing. + // just clear the statechar *now*, rather than even diving into + // the patternList stuff. + if (options.noext) clearStateChar() + continue -function unescapeBraces(str) { - return str.split(escSlash).join('\\') - .split(escOpen).join('{') - .split(escClose).join('}') - .split(escComma).join(',') - .split(escPeriod).join('.'); -} + case '(': + if (inClass) { + re += '(' + continue + } + if (!stateChar) { + re += '\\(' + continue + } -// Basically just str.split(","), but handling cases -// where we have nested braced sections, which should be -// treated as individual members, like {a,{b,c},d} -function parseCommaParts(str) { - if (!str) - return ['']; + patternListStack.push({ + type: stateChar, + start: i - 1, + reStart: re.length, + open: plTypes[stateChar].open, + close: plTypes[stateChar].close + }) + // negation is (?:(?!js)[^/]*) + re += stateChar === '!' ? '(?:(?!(?:' : '(?:' + this.debug('plType %j %j', stateChar, re) + stateChar = false + continue - var parts = []; - var m = balanced('{', '}', str); + case ')': + if (inClass || !patternListStack.length) { + re += '\\)' + continue + } - if (!m) - return str.split(','); + clearStateChar() + hasMagic = true + var pl = patternListStack.pop() + // negation is (?:(?!js)[^/]*) + // The others are (?:) + re += pl.close + if (pl.type === '!') { + negativeLists.push(pl) + } + pl.reEnd = re.length + continue - var pre = m.pre; - var body = m.body; - var post = m.post; - var p = pre.split(','); + case '|': + if (inClass || !patternListStack.length || escaping) { + re += '\\|' + escaping = false + continue + } - p[p.length-1] += '{' + body + '}'; - var postParts = parseCommaParts(post); - if (post.length) { - p[p.length-1] += postParts.shift(); - p.push.apply(p, postParts); - } + clearStateChar() + re += '|' + continue - parts.push.apply(parts, p); + // these are mostly the same in regexp and glob + case '[': + // swallow any state-tracking char before the [ + clearStateChar() - return parts; -} + if (inClass) { + re += '\\' + c + continue + } -function expandTop(str) { - if (!str) - return []; + inClass = true + classStart = i + reClassStart = re.length + re += c + continue - // I don't know why Bash 4.3 does this, but it does. - // Anything starting with {} will have the first two bytes preserved - // but *only* at the top level, so {},a}b will not expand to anything, - // but a{},b}c will be expanded to [a}c,abc]. - // One could argue that this is a bug in Bash, but since the goal of - // this module is to match Bash's rules, we escape a leading {} - if (str.substr(0, 2) === '{}') { - str = '\\{\\}' + str.substr(2); - } + case ']': + // a right bracket shall lose its special + // meaning and represent itself in + // a bracket expression if it occurs + // first in the list. -- POSIX.2 2.8.3.2 + if (i === classStart + 1 || !inClass) { + re += '\\' + c + escaping = false + continue + } - return expand(escapeBraces(str), true).map(unescapeBraces); -} + // handle the case where we left a class open. + // "[z-a]" is valid, equivalent to "\[z-a\]" + // split where the last [ was, make sure we don't have + // an invalid re. if so, re-walk the contents of the + // would-be class to re-translate any characters that + // were passed through as-is + // TODO: It would probably be faster to determine this + // without a try/catch and a new RegExp, but it's tricky + // to do safely. For now, this is safe and works. + var cs = pattern.substring(classStart + 1, i) + try { + RegExp('[' + cs + ']') + } catch (er) { + // not a valid class! + var sp = this.parse(cs, SUBPARSE) + re = re.substr(0, reClassStart) + '\\[' + sp[0] + '\\]' + hasMagic = hasMagic || sp[1] + inClass = false + continue + } -function identity(e) { - return e; -} + // finish up the class. + hasMagic = true + inClass = false + re += c + continue -function embrace(str) { - return '{' + str + '}'; -} -function isPadded(el) { - return /^-?0\d/.test(el); -} + default: + // swallow any state char that wasn't consumed + clearStateChar() -function lte(i, y) { - return i <= y; -} -function gte(i, y) { - return i >= y; -} + if (escaping) { + // no need + escaping = false + } else if (reSpecials[c] + && !(c === '^' && inClass)) { + re += '\\' + } -function expand(str, isTop) { - var expansions = []; + re += c - var m = balanced('{', '}', str); - if (!m || /\$$/.test(m.pre)) return [str]; + } // switch + } // for - var isNumericSequence = /^-?\d+\.\.-?\d+(?:\.\.-?\d+)?$/.test(m.body); - var isAlphaSequence = /^[a-zA-Z]\.\.[a-zA-Z](?:\.\.-?\d+)?$/.test(m.body); - var isSequence = isNumericSequence || isAlphaSequence; - var isOptions = m.body.indexOf(',') >= 0; - if (!isSequence && !isOptions) { - // {a},b} - if (m.post.match(/,.*\}/)) { - str = m.pre + '{' + m.body + escClose + m.post; - return expand(str); - } - return [str]; + // handle the case where we left a class open. + // "[abc" is valid, equivalent to "\[abc" + if (inClass) { + // split where the last [ was, and escape it + // this is a huge pita. We now have to re-walk + // the contents of the would-be class to re-translate + // any characters that were passed through as-is + cs = pattern.substr(classStart + 1) + sp = this.parse(cs, SUBPARSE) + re = re.substr(0, reClassStart) + '\\[' + sp[0] + hasMagic = hasMagic || sp[1] } - var n; - if (isSequence) { - n = m.body.split(/\.\./); - } else { - n = parseCommaParts(m.body); - if (n.length === 1) { - // x{{a,b}}y ==> x{a}y x{b}y - n = expand(n[0], false).map(embrace); - if (n.length === 1) { - var post = m.post.length - ? expand(m.post, false) - : ['']; - return post.map(function(p) { - return m.pre + n[0] + p; - }); + // handle the case where we had a +( thing at the *end* + // of the pattern. + // each pattern list stack adds 3 chars, and we need to go through + // and escape any | chars that were passed through as-is for the regexp. + // Go through and escape them, taking care not to double-escape any + // | chars that were already escaped. + for (pl = patternListStack.pop(); pl; pl = patternListStack.pop()) { + var tail = re.slice(pl.reStart + pl.open.length) + this.debug('setting tail', re, pl) + // maybe some even number of \, then maybe 1 \, followed by a | + tail = tail.replace(/((?:\\{2}){0,64})(\\?)\|/g, function (_, $1, $2) { + if (!$2) { + // the | isn't already escaped, so escape it. + $2 = '\\' } - } + + // need to escape all those slashes *again*, without escaping the + // one that we need for escaping the | character. As it works out, + // escaping an even number of slashes can be done by simply repeating + // it exactly after itself. That's why this trick works. + // + // I am sorry that you have to see this. + return $1 + $1 + $2 + '|' + }) + + this.debug('tail=%j\n %s', tail, tail, pl, re) + var t = pl.type === '*' ? star + : pl.type === '?' ? qmark + : '\\' + pl.type + + hasMagic = true + re = re.slice(0, pl.reStart) + t + '\\(' + tail } - // at this point, n is the parts, and we know it's not a comma set - // with a single entry. + // handle trailing things that only matter at the very end. + clearStateChar() + if (escaping) { + // trailing \\ + re += '\\\\' + } - // no need to expand pre, since it is guaranteed to be free of brace-sets - var pre = m.pre; - var post = m.post.length - ? expand(m.post, false) - : ['']; + // only need to apply the nodot start if the re starts with + // something that could conceivably capture a dot + var addPatternStart = false + switch (re.charAt(0)) { + case '[': case '.': case '(': addPatternStart = true + } - var N; + // Hack to work around lack of negative lookbehind in JS + // A pattern like: *.!(x).!(y|z) needs to ensure that a name + // like 'a.xyz.yz' doesn't match. So, the first negative + // lookahead, has to look ALL the way ahead, to the end of + // the pattern. + for (var n = negativeLists.length - 1; n > -1; n--) { + var nl = negativeLists[n] - if (isSequence) { - var x = numeric(n[0]); - var y = numeric(n[1]); - var width = Math.max(n[0].length, n[1].length) - var incr = n.length == 3 - ? Math.abs(numeric(n[2])) - : 1; - var test = lte; - var reverse = y < x; - if (reverse) { - incr *= -1; - test = gte; - } - var pad = n.some(isPadded); + var nlBefore = re.slice(0, nl.reStart) + var nlFirst = re.slice(nl.reStart, nl.reEnd - 8) + var nlLast = re.slice(nl.reEnd - 8, nl.reEnd) + var nlAfter = re.slice(nl.reEnd) - N = []; + nlLast += nlAfter - for (var i = x; test(i, y); i += incr) { - var c; - if (isAlphaSequence) { - c = String.fromCharCode(i); - if (c === '\\') - c = ''; - } else { - c = String(i); - if (pad) { - var need = width - c.length; - if (need > 0) { - var z = new Array(need + 1).join('0'); - if (i < 0) - c = '-' + z + c.slice(1); - else - c = z + c; - } - } - } - N.push(c); + // Handle nested stuff like *(*.js|!(*.json)), where open parens + // mean that we should *not* include the ) in the bit that is considered + // "after" the negated section. + var openParensBefore = nlBefore.split('(').length - 1 + var cleanAfter = nlAfter + for (i = 0; i < openParensBefore; i++) { + cleanAfter = cleanAfter.replace(/\)[+*?]?/, '') } - } else { - N = concatMap(n, function(el) { return expand(el, false) }); - } + nlAfter = cleanAfter - for (var j = 0; j < N.length; j++) { - for (var k = 0; k < post.length; k++) { - var expansion = pre + N[j] + post[k]; - if (!isTop || isSequence || expansion) - expansions.push(expansion); + var dollar = '' + if (nlAfter === '' && isSub !== SUBPARSE) { + dollar = '$' } + var newRe = nlBefore + nlFirst + nlAfter + dollar + nlLast + re = newRe } - return expansions; -} + // if the re is not "" at this point, then we need to make sure + // it doesn't match against an empty path part. + // Otherwise a/* will match a/, which it should not. + if (re !== '' && hasMagic) { + re = '(?=.)' + re + } + if (addPatternStart) { + re = patternStart + re + } + // parsing just a piece of a larger pattern. + if (isSub === SUBPARSE) { + return [re, hasMagic] + } -/***/ }), + // skip the regexp for non-magical patterns + // unescape anything in it, though, so that it'll be + // an exact match against a file etc. + if (!hasMagic) { + return globUnescape(pattern) + } -/***/ 5443: -/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + var flags = options.nocase ? 'i' : '' + try { + var regExp = new RegExp('^' + re + '$', flags) + } catch (er) /* istanbul ignore next - should be impossible */ { + // If it was an invalid regular expression, then it can't match + // anything. This trick looks for a character after the end of + // the string, which is of course impossible, except in multi-line + // mode, but it's not a /m regex. + return new RegExp('$.') + } -var util = __nccwpck_require__(3837); -var Stream = (__nccwpck_require__(2781).Stream); -var DelayedStream = __nccwpck_require__(8611); + regExp._glob = pattern + regExp._src = re -module.exports = CombinedStream; -function CombinedStream() { - this.writable = false; - this.readable = true; - this.dataSize = 0; - this.maxDataSize = 2 * 1024 * 1024; - this.pauseStreams = true; + return regExp +} - this._released = false; - this._streams = []; - this._currentStream = null; - this._insideLoop = false; - this._pendingNext = false; +minimatch.makeRe = function (pattern, options) { + return new Minimatch(pattern, options || {}).makeRe() } -util.inherits(CombinedStream, Stream); -CombinedStream.create = function(options) { - var combinedStream = new this(); +Minimatch.prototype.makeRe = makeRe +function makeRe () { + if (this.regexp || this.regexp === false) return this.regexp - options = options || {}; - for (var option in options) { - combinedStream[option] = options[option]; - } + // at this point, this.set is a 2d array of partial + // pattern strings, or "**". + // + // It's better to use .match(). This function shouldn't + // be used, really, but it's pretty convenient sometimes, + // when you just want to work with a regex. + var set = this.set - return combinedStream; -}; + if (!set.length) { + this.regexp = false + return this.regexp + } + var options = this.options -CombinedStream.isStreamLike = function(stream) { - return (typeof stream !== 'function') - && (typeof stream !== 'string') - && (typeof stream !== 'boolean') - && (typeof stream !== 'number') - && (!Buffer.isBuffer(stream)); -}; + var twoStar = options.noglobstar ? star + : options.dot ? twoStarDot + : twoStarNoDot + var flags = options.nocase ? 'i' : '' -CombinedStream.prototype.append = function(stream) { - var isStreamLike = CombinedStream.isStreamLike(stream); + var re = set.map(function (pattern) { + return pattern.map(function (p) { + return (p === GLOBSTAR) ? twoStar + : (typeof p === 'string') ? regExpEscape(p) + : p._src + }).join('\\\/') + }).join('|') - if (isStreamLike) { - if (!(stream instanceof DelayedStream)) { - var newStream = DelayedStream.create(stream, { - maxDataSize: Infinity, - pauseStream: this.pauseStreams, - }); - stream.on('data', this._checkDataSize.bind(this)); - stream = newStream; - } + // must match entire pattern + // ending in a * or ** will make it less strict. + re = '^(?:' + re + ')$' - this._handleErrors(stream); + // can match anything, as long as it's not this. + if (this.negate) re = '^(?!' + re + ').*$' - if (this.pauseStreams) { - stream.pause(); - } + try { + this.regexp = new RegExp(re, flags) + } catch (ex) /* istanbul ignore next - should be impossible */ { + this.regexp = false } + return this.regexp +} - this._streams.push(stream); - return this; -}; +minimatch.match = function (list, pattern, options) { + options = options || {} + var mm = new Minimatch(pattern, options) + list = list.filter(function (f) { + return mm.match(f) + }) + if (mm.options.nonull && !list.length) { + list.push(pattern) + } + return list +} -CombinedStream.prototype.pipe = function(dest, options) { - Stream.prototype.pipe.call(this, dest, options); - this.resume(); - return dest; -}; +Minimatch.prototype.match = function match (f, partial) { + if (typeof partial === 'undefined') partial = this.partial + this.debug('match', f, this.pattern) + // short-circuit in the case of busted things. + // comments, etc. + if (this.comment) return false + if (this.empty) return f === '' -CombinedStream.prototype._getNext = function() { - this._currentStream = null; + if (f === '/' && partial) return true - if (this._insideLoop) { - this._pendingNext = true; - return; // defer call - } + var options = this.options - this._insideLoop = true; - try { - do { - this._pendingNext = false; - this._realGetNext(); - } while (this._pendingNext); - } finally { - this._insideLoop = false; + // windows: need to use /, not \ + if (path.sep !== '/') { + f = f.split(path.sep).join('/') } -}; -CombinedStream.prototype._realGetNext = function() { - var stream = this._streams.shift(); + // treat the test path as a set of pathparts. + f = f.split(slashSplit) + this.debug(this.pattern, 'split', f) + // just ONE of the pattern sets in this.set needs to match + // in order for it to be valid. If negating, then just one + // match means that we have failed. + // Either way, return on the first hit. - if (typeof stream == 'undefined') { - this.end(); - return; - } + var set = this.set + this.debug(this.pattern, 'set', set) - if (typeof stream !== 'function') { - this._pipeNext(stream); - return; + // Find the basename of the path by looking for the last non-empty segment + var filename + var i + for (i = f.length - 1; i >= 0; i--) { + filename = f[i] + if (filename) break } - var getStream = stream; - getStream(function(stream) { - var isStreamLike = CombinedStream.isStreamLike(stream); - if (isStreamLike) { - stream.on('data', this._checkDataSize.bind(this)); - this._handleErrors(stream); + for (i = 0; i < set.length; i++) { + var pattern = set[i] + var file = f + if (options.matchBase && pattern.length === 1) { + file = [filename] + } + var hit = this.matchOne(file, pattern, partial) + if (hit) { + if (options.flipNegate) return true + return !this.negate } - - this._pipeNext(stream); - }.bind(this)); -}; - -CombinedStream.prototype._pipeNext = function(stream) { - this._currentStream = stream; - - var isStreamLike = CombinedStream.isStreamLike(stream); - if (isStreamLike) { - stream.on('end', this._getNext.bind(this)); - stream.pipe(this, {end: false}); - return; } - var value = stream; - this.write(value); - this._getNext(); -}; + // didn't get any hits. this is success if it's a negative + // pattern, failure otherwise. + if (options.flipNegate) return false + return this.negate +} -CombinedStream.prototype._handleErrors = function(stream) { - var self = this; - stream.on('error', function(err) { - self._emitError(err); - }); -}; +// set partial to true to test if, for example, +// "/a/b" matches the start of "/*/b/*/d" +// Partial means, if you run out of file before you run +// out of pattern, then that's fine, as long as all +// the parts match. +Minimatch.prototype.matchOne = function (file, pattern, partial) { + var options = this.options -CombinedStream.prototype.write = function(data) { - this.emit('data', data); -}; + this.debug('matchOne', + { 'this': this, file: file, pattern: pattern }) -CombinedStream.prototype.pause = function() { - if (!this.pauseStreams) { - return; - } + this.debug('matchOne', file.length, pattern.length) - if(this.pauseStreams && this._currentStream && typeof(this._currentStream.pause) == 'function') this._currentStream.pause(); - this.emit('pause'); -}; + for (var fi = 0, + pi = 0, + fl = file.length, + pl = pattern.length + ; (fi < fl) && (pi < pl) + ; fi++, pi++) { + this.debug('matchOne loop') + var p = pattern[pi] + var f = file[fi] -CombinedStream.prototype.resume = function() { - if (!this._released) { - this._released = true; - this.writable = true; - this._getNext(); - } + this.debug(pattern, p, f) - if(this.pauseStreams && this._currentStream && typeof(this._currentStream.resume) == 'function') this._currentStream.resume(); - this.emit('resume'); -}; + // should be impossible. + // some invalid regexp stuff in the set. + /* istanbul ignore if */ + if (p === false) return false -CombinedStream.prototype.end = function() { - this._reset(); - this.emit('end'); -}; + if (p === GLOBSTAR) { + this.debug('GLOBSTAR', [pattern, p, f]) -CombinedStream.prototype.destroy = function() { - this._reset(); - this.emit('close'); -}; + // "**" + // a/**/b/**/c would match the following: + // a/b/x/y/z/c + // a/x/y/z/b/c + // a/b/x/b/x/c + // a/b/c + // To do this, take the rest of the pattern after + // the **, and see if it would match the file remainder. + // If so, return success. + // If not, the ** "swallows" a segment, and try again. + // This is recursively awful. + // + // a/**/b/**/c matching a/b/x/y/z/c + // - a matches a + // - doublestar + // - matchOne(b/x/y/z/c, b/**/c) + // - b matches b + // - doublestar + // - matchOne(x/y/z/c, c) -> no + // - matchOne(y/z/c, c) -> no + // - matchOne(z/c, c) -> no + // - matchOne(c, c) yes, hit + var fr = fi + var pr = pi + 1 + if (pr === pl) { + this.debug('** at the end') + // a ** at the end will just swallow the rest. + // We have found a match. + // however, it will not swallow /.x, unless + // options.dot is set. + // . and .. are *never* matched by **, for explosively + // exponential reasons. + for (; fi < fl; fi++) { + if (file[fi] === '.' || file[fi] === '..' || + (!options.dot && file[fi].charAt(0) === '.')) return false + } + return true + } -CombinedStream.prototype._reset = function() { - this.writable = false; - this._streams = []; - this._currentStream = null; -}; + // ok, let's see if we can swallow whatever we can. + while (fr < fl) { + var swallowee = file[fr] -CombinedStream.prototype._checkDataSize = function() { - this._updateDataSize(); - if (this.dataSize <= this.maxDataSize) { - return; - } + this.debug('\nglobstar while', file, fr, pattern, pr, swallowee) - var message = - 'DelayedStream#maxDataSize of ' + this.maxDataSize + ' bytes exceeded.'; - this._emitError(new Error(message)); -}; + // XXX remove this slice. Just pass the start index. + if (this.matchOne(file.slice(fr), pattern.slice(pr), partial)) { + this.debug('globstar found match!', fr, fl, swallowee) + // found a match. + return true + } else { + // can't swallow "." or ".." ever. + // can only swallow ".foo" when explicitly asked. + if (swallowee === '.' || swallowee === '..' || + (!options.dot && swallowee.charAt(0) === '.')) { + this.debug('dot detected!', file, fr, pattern, pr) + break + } -CombinedStream.prototype._updateDataSize = function() { - this.dataSize = 0; + // ** swallows a segment, and continue. + this.debug('globstar swallow a segment, and continue') + fr++ + } + } - var self = this; - this._streams.forEach(function(stream) { - if (!stream.dataSize) { - return; + // no match was found. + // However, in partial mode, we can't say this is necessarily over. + // If there's more *pattern* left, then + /* istanbul ignore if */ + if (partial) { + // ran out of file + this.debug('\n>>> no match, partial?', file, fr, pattern, pr) + if (fr === fl) return true + } + return false } - self.dataSize += stream.dataSize; - }); + // something other than ** + // non-magic patterns just have to match exactly + // patterns with magic have been turned into regexps. + var hit + if (typeof p === 'string') { + hit = f === p + this.debug('string match', p, f, hit) + } else { + hit = f.match(p) + this.debug('pattern match', p, f, hit) + } - if (this._currentStream && this._currentStream.dataSize) { - this.dataSize += this._currentStream.dataSize; + if (!hit) return false + } + + // Note: ending in / means that we'll get a final "" + // at the end of the pattern. This can only match a + // corresponding "" at the end of the file. + // If the file ends in /, then it can only match a + // a pattern that ends in /, unless the pattern just + // doesn't have any more for it. But, a/b/ should *not* + // match "a/b/*", even though "" matches against the + // [^/]*? pattern, except in partial mode, where it might + // simply not be reached yet. + // However, a/b/ should still satisfy a/* + + // now either we fell off the end of the pattern, or we're done. + if (fi === fl && pi === pl) { + // ran out of pattern and filename at the same time. + // an exact hit! + return true + } else if (fi === fl) { + // ran out of file, but still had pattern left. + // this is ok if we're doing the match as part of + // a glob fs traversal. + return partial + } else /* istanbul ignore else */ if (pi === pl) { + // ran out of pattern, still have file left. + // this is only acceptable if we're on the very last + // empty segment of a file with a trailing slash. + // a/* should match a/b/ + return (fi === fl - 1) && (file[fi] === '') } -}; -CombinedStream.prototype._emitError = function(err) { - this._reset(); - this.emit('error', err); -}; + // should be unreachable. + /* istanbul ignore next */ + throw new Error('wtf?') +} +// replace stuff like \* with * +function globUnescape (s) { + return s.replace(/\\(.)/g, '$1') +} -/***/ }), +function regExpEscape (s) { + return s.replace(/[-[\]{}()*+?.,\\^$|#\s]/g, '\\$&') +} -/***/ 6891: -/***/ ((module) => { -module.exports = function (xs, fn) { - var res = []; - for (var i = 0; i < xs.length; i++) { - var x = fn(xs[i], i); - if (isArray(x)) res.push.apply(res, x); - else res.push(x); - } - return res; -}; +/***/ }), -var isArray = Array.isArray || function (xs) { - return Object.prototype.toString.call(xs) === '[object Array]'; -}; +/***/ 467: +/***/ ((module, exports, __nccwpck_require__) => { +"use strict"; -/***/ }), -/***/ 8611: -/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { +Object.defineProperty(exports, "__esModule", ({ value: true })); -var Stream = (__nccwpck_require__(2781).Stream); -var util = __nccwpck_require__(3837); +function _interopDefault (ex) { return (ex && (typeof ex === 'object') && 'default' in ex) ? ex['default'] : ex; } -module.exports = DelayedStream; -function DelayedStream() { - this.source = null; - this.dataSize = 0; - this.maxDataSize = 1024 * 1024; - this.pauseStream = true; +var Stream = _interopDefault(__nccwpck_require__(2781)); +var http = _interopDefault(__nccwpck_require__(3685)); +var Url = _interopDefault(__nccwpck_require__(7310)); +var whatwgUrl = _interopDefault(__nccwpck_require__(8665)); +var https = _interopDefault(__nccwpck_require__(5687)); +var zlib = _interopDefault(__nccwpck_require__(9796)); - this._maxDataSizeExceeded = false; - this._released = false; - this._bufferedEvents = []; -} -util.inherits(DelayedStream, Stream); +// Based on https://github.com/tmpvar/jsdom/blob/aa85b2abf07766ff7bf5c1f6daafb3726f2f2db5/lib/jsdom/living/blob.js -DelayedStream.create = function(source, options) { - var delayedStream = new this(); +// fix for "Readable" isn't a named export issue +const Readable = Stream.Readable; - options = options || {}; - for (var option in options) { - delayedStream[option] = options[option]; - } +const BUFFER = Symbol('buffer'); +const TYPE = Symbol('type'); - delayedStream.source = source; +class Blob { + constructor() { + this[TYPE] = ''; - var realEmit = source.emit; - source.emit = function() { - delayedStream._handleEmit(arguments); - return realEmit.apply(source, arguments); - }; + const blobParts = arguments[0]; + const options = arguments[1]; - source.on('error', function() {}); - if (delayedStream.pauseStream) { - source.pause(); - } + const buffers = []; + let size = 0; - return delayedStream; -}; + if (blobParts) { + const a = blobParts; + const length = Number(a.length); + for (let i = 0; i < length; i++) { + const element = a[i]; + let buffer; + if (element instanceof Buffer) { + buffer = element; + } else if (ArrayBuffer.isView(element)) { + buffer = Buffer.from(element.buffer, element.byteOffset, element.byteLength); + } else if (element instanceof ArrayBuffer) { + buffer = Buffer.from(element); + } else if (element instanceof Blob) { + buffer = element[BUFFER]; + } else { + buffer = Buffer.from(typeof element === 'string' ? element : String(element)); + } + size += buffer.length; + buffers.push(buffer); + } + } -Object.defineProperty(DelayedStream.prototype, 'readable', { - configurable: true, - enumerable: true, - get: function() { - return this.source.readable; - } -}); + this[BUFFER] = Buffer.concat(buffers); -DelayedStream.prototype.setEncoding = function() { - return this.source.setEncoding.apply(this.source, arguments); -}; + let type = options && options.type !== undefined && String(options.type).toLowerCase(); + if (type && !/[^\u0020-\u007E]/.test(type)) { + this[TYPE] = type; + } + } + get size() { + return this[BUFFER].length; + } + get type() { + return this[TYPE]; + } + text() { + return Promise.resolve(this[BUFFER].toString()); + } + arrayBuffer() { + const buf = this[BUFFER]; + const ab = buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength); + return Promise.resolve(ab); + } + stream() { + const readable = new Readable(); + readable._read = function () {}; + readable.push(this[BUFFER]); + readable.push(null); + return readable; + } + toString() { + return '[object Blob]'; + } + slice() { + const size = this.size; -DelayedStream.prototype.resume = function() { - if (!this._released) { - this.release(); - } + const start = arguments[0]; + const end = arguments[1]; + let relativeStart, relativeEnd; + if (start === undefined) { + relativeStart = 0; + } else if (start < 0) { + relativeStart = Math.max(size + start, 0); + } else { + relativeStart = Math.min(start, size); + } + if (end === undefined) { + relativeEnd = size; + } else if (end < 0) { + relativeEnd = Math.max(size + end, 0); + } else { + relativeEnd = Math.min(end, size); + } + const span = Math.max(relativeEnd - relativeStart, 0); - this.source.resume(); -}; + const buffer = this[BUFFER]; + const slicedBuffer = buffer.slice(relativeStart, relativeStart + span); + const blob = new Blob([], { type: arguments[2] }); + blob[BUFFER] = slicedBuffer; + return blob; + } +} -DelayedStream.prototype.pause = function() { - this.source.pause(); -}; +Object.defineProperties(Blob.prototype, { + size: { enumerable: true }, + type: { enumerable: true }, + slice: { enumerable: true } +}); -DelayedStream.prototype.release = function() { - this._released = true; +Object.defineProperty(Blob.prototype, Symbol.toStringTag, { + value: 'Blob', + writable: false, + enumerable: false, + configurable: true +}); - this._bufferedEvents.forEach(function(args) { - this.emit.apply(this, args); - }.bind(this)); - this._bufferedEvents = []; -}; +/** + * fetch-error.js + * + * FetchError interface for operational errors + */ -DelayedStream.prototype.pipe = function() { - var r = Stream.prototype.pipe.apply(this, arguments); - this.resume(); - return r; -}; +/** + * Create FetchError instance + * + * @param String message Error message for human + * @param String type Error type for machine + * @param String systemError For Node.js system error + * @return FetchError + */ +function FetchError(message, type, systemError) { + Error.call(this, message); -DelayedStream.prototype._handleEmit = function(args) { - if (this._released) { - this.emit.apply(this, args); - return; - } + this.message = message; + this.type = type; - if (args[0] === 'data') { - this.dataSize += args[1].length; - this._checkIfMaxDataSizeExceeded(); + // when err.type is `system`, err.code contains system error code + if (systemError) { + this.code = this.errno = systemError.code; } - this._bufferedEvents.push(args); -}; + // hide custom error implementation details from end-users + Error.captureStackTrace(this, this.constructor); +} -DelayedStream.prototype._checkIfMaxDataSizeExceeded = function() { - if (this._maxDataSizeExceeded) { - return; - } +FetchError.prototype = Object.create(Error.prototype); +FetchError.prototype.constructor = FetchError; +FetchError.prototype.name = 'FetchError'; - if (this.dataSize <= this.maxDataSize) { - return; - } +let convert; +try { + convert = (__nccwpck_require__(2877).convert); +} catch (e) {} - this._maxDataSizeExceeded = true; - var message = - 'DelayedStream#maxDataSize of ' + this.maxDataSize + ' bytes exceeded.' - this.emit('error', new Error(message)); -}; +const INTERNALS = Symbol('Body internals'); +// fix an issue where "PassThrough" isn't a named export for node <10 +const PassThrough = Stream.PassThrough; -/***/ }), +/** + * Body mixin + * + * Ref: https://fetch.spec.whatwg.org/#body + * + * @param Stream body Readable stream + * @param Object opts Response options + * @return Void + */ +function Body(body) { + var _this = this; -/***/ 7129: -/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + var _ref = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}, + _ref$size = _ref.size; -"use strict"; + let size = _ref$size === undefined ? 0 : _ref$size; + var _ref$timeout = _ref.timeout; + let timeout = _ref$timeout === undefined ? 0 : _ref$timeout; + if (body == null) { + // body is undefined or null + body = null; + } else if (isURLSearchParams(body)) { + // body is a URLSearchParams + body = Buffer.from(body.toString()); + } else if (isBlob(body)) ; else if (Buffer.isBuffer(body)) ; else if (Object.prototype.toString.call(body) === '[object ArrayBuffer]') { + // body is ArrayBuffer + body = Buffer.from(body); + } else if (ArrayBuffer.isView(body)) { + // body is ArrayBufferView + body = Buffer.from(body.buffer, body.byteOffset, body.byteLength); + } else if (body instanceof Stream) ; else { + // none of the above + // coerce to string then buffer + body = Buffer.from(String(body)); + } + this[INTERNALS] = { + body, + disturbed: false, + error: null + }; + this.size = size; + this.timeout = timeout; -// A linked list to keep track of recently-used-ness -const Yallist = __nccwpck_require__(665) + if (body instanceof Stream) { + body.on('error', function (err) { + const error = err.name === 'AbortError' ? err : new FetchError(`Invalid response body while trying to fetch ${_this.url}: ${err.message}`, 'system', err); + _this[INTERNALS].error = error; + }); + } +} -const MAX = Symbol('max') -const LENGTH = Symbol('length') -const LENGTH_CALCULATOR = Symbol('lengthCalculator') -const ALLOW_STALE = Symbol('allowStale') -const MAX_AGE = Symbol('maxAge') -const DISPOSE = Symbol('dispose') -const NO_DISPOSE_ON_SET = Symbol('noDisposeOnSet') -const LRU_LIST = Symbol('lruList') -const CACHE = Symbol('cache') -const UPDATE_AGE_ON_GET = Symbol('updateAgeOnGet') +Body.prototype = { + get body() { + return this[INTERNALS].body; + }, -const naiveLength = () => 1 + get bodyUsed() { + return this[INTERNALS].disturbed; + }, -// lruList is a yallist where the head is the youngest -// item, and the tail is the oldest. the list contains the Hit -// objects as the entries. -// Each Hit object has a reference to its Yallist.Node. This -// never changes. -// -// cache is a Map (or PseudoMap) that matches the keys to -// the Yallist.Node object. -class LRUCache { - constructor (options) { - if (typeof options === 'number') - options = { max: options } + /** + * Decode response as ArrayBuffer + * + * @return Promise + */ + arrayBuffer() { + return consumeBody.call(this).then(function (buf) { + return buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength); + }); + }, - if (!options) - options = {} + /** + * Return raw response as Blob + * + * @return Promise + */ + blob() { + let ct = this.headers && this.headers.get('content-type') || ''; + return consumeBody.call(this).then(function (buf) { + return Object.assign( + // Prevent copying + new Blob([], { + type: ct.toLowerCase() + }), { + [BUFFER]: buf + }); + }); + }, - if (options.max && (typeof options.max !== 'number' || options.max < 0)) - throw new TypeError('max must be a non-negative number') - // Kind of weird to have a default max of Infinity, but oh well. - const max = this[MAX] = options.max || Infinity + /** + * Decode response as json + * + * @return Promise + */ + json() { + var _this2 = this; - const lc = options.length || naiveLength - this[LENGTH_CALCULATOR] = (typeof lc !== 'function') ? naiveLength : lc - this[ALLOW_STALE] = options.stale || false - if (options.maxAge && typeof options.maxAge !== 'number') - throw new TypeError('maxAge must be a number') - this[MAX_AGE] = options.maxAge || 0 - this[DISPOSE] = options.dispose - this[NO_DISPOSE_ON_SET] = options.noDisposeOnSet || false - this[UPDATE_AGE_ON_GET] = options.updateAgeOnGet || false - this.reset() - } + return consumeBody.call(this).then(function (buffer) { + try { + return JSON.parse(buffer.toString()); + } catch (err) { + return Body.Promise.reject(new FetchError(`invalid json response body at ${_this2.url} reason: ${err.message}`, 'invalid-json')); + } + }); + }, - // resize the cache when the max changes. - set max (mL) { - if (typeof mL !== 'number' || mL < 0) - throw new TypeError('max must be a non-negative number') + /** + * Decode response as text + * + * @return Promise + */ + text() { + return consumeBody.call(this).then(function (buffer) { + return buffer.toString(); + }); + }, - this[MAX] = mL || Infinity - trim(this) - } - get max () { - return this[MAX] - } + /** + * Decode response as buffer (non-spec api) + * + * @return Promise + */ + buffer() { + return consumeBody.call(this); + }, - set allowStale (allowStale) { - this[ALLOW_STALE] = !!allowStale - } - get allowStale () { - return this[ALLOW_STALE] - } + /** + * Decode response as text, while automatically detecting the encoding and + * trying to decode to UTF-8 (non-spec api) + * + * @return Promise + */ + textConverted() { + var _this3 = this; - set maxAge (mA) { - if (typeof mA !== 'number') - throw new TypeError('maxAge must be a non-negative number') + return consumeBody.call(this).then(function (buffer) { + return convertBody(buffer, _this3.headers); + }); + } +}; - this[MAX_AGE] = mA - trim(this) - } - get maxAge () { - return this[MAX_AGE] - } +// In browsers, all properties are enumerable. +Object.defineProperties(Body.prototype, { + body: { enumerable: true }, + bodyUsed: { enumerable: true }, + arrayBuffer: { enumerable: true }, + blob: { enumerable: true }, + json: { enumerable: true }, + text: { enumerable: true } +}); - // resize the cache when the lengthCalculator changes. - set lengthCalculator (lC) { - if (typeof lC !== 'function') - lC = naiveLength +Body.mixIn = function (proto) { + for (const name of Object.getOwnPropertyNames(Body.prototype)) { + // istanbul ignore else: future proof + if (!(name in proto)) { + const desc = Object.getOwnPropertyDescriptor(Body.prototype, name); + Object.defineProperty(proto, name, desc); + } + } +}; - if (lC !== this[LENGTH_CALCULATOR]) { - this[LENGTH_CALCULATOR] = lC - this[LENGTH] = 0 - this[LRU_LIST].forEach(hit => { - hit.length = this[LENGTH_CALCULATOR](hit.value, hit.key) - this[LENGTH] += hit.length - }) - } - trim(this) - } - get lengthCalculator () { return this[LENGTH_CALCULATOR] } +/** + * Consume and convert an entire Body to a Buffer. + * + * Ref: https://fetch.spec.whatwg.org/#concept-body-consume-body + * + * @return Promise + */ +function consumeBody() { + var _this4 = this; - get length () { return this[LENGTH] } - get itemCount () { return this[LRU_LIST].length } + if (this[INTERNALS].disturbed) { + return Body.Promise.reject(new TypeError(`body used already for: ${this.url}`)); + } - rforEach (fn, thisp) { - thisp = thisp || this - for (let walker = this[LRU_LIST].tail; walker !== null;) { - const prev = walker.prev - forEachStep(this, fn, walker, thisp) - walker = prev - } - } + this[INTERNALS].disturbed = true; - forEach (fn, thisp) { - thisp = thisp || this - for (let walker = this[LRU_LIST].head; walker !== null;) { - const next = walker.next - forEachStep(this, fn, walker, thisp) - walker = next - } - } + if (this[INTERNALS].error) { + return Body.Promise.reject(this[INTERNALS].error); + } - keys () { - return this[LRU_LIST].toArray().map(k => k.key) - } + let body = this.body; - values () { - return this[LRU_LIST].toArray().map(k => k.value) - } + // body is null + if (body === null) { + return Body.Promise.resolve(Buffer.alloc(0)); + } - reset () { - if (this[DISPOSE] && - this[LRU_LIST] && - this[LRU_LIST].length) { - this[LRU_LIST].forEach(hit => this[DISPOSE](hit.key, hit.value)) - } + // body is blob + if (isBlob(body)) { + body = body.stream(); + } - this[CACHE] = new Map() // hash of items by key - this[LRU_LIST] = new Yallist() // list of items in order of use recency - this[LENGTH] = 0 // length of items in the list - } + // body is buffer + if (Buffer.isBuffer(body)) { + return Body.Promise.resolve(body); + } - dump () { - return this[LRU_LIST].map(hit => - isStale(this, hit) ? false : { - k: hit.key, - v: hit.value, - e: hit.now + (hit.maxAge || 0) - }).toArray().filter(h => h) - } + // istanbul ignore if: should never happen + if (!(body instanceof Stream)) { + return Body.Promise.resolve(Buffer.alloc(0)); + } - dumpLru () { - return this[LRU_LIST] - } + // body is stream + // get ready to actually consume the body + let accum = []; + let accumBytes = 0; + let abort = false; - set (key, value, maxAge) { - maxAge = maxAge || this[MAX_AGE] + return new Body.Promise(function (resolve, reject) { + let resTimeout; - if (maxAge && typeof maxAge !== 'number') - throw new TypeError('maxAge must be a number') + // allow timeout on slow response body + if (_this4.timeout) { + resTimeout = setTimeout(function () { + abort = true; + reject(new FetchError(`Response timeout while trying to fetch ${_this4.url} (over ${_this4.timeout}ms)`, 'body-timeout')); + }, _this4.timeout); + } - const now = maxAge ? Date.now() : 0 - const len = this[LENGTH_CALCULATOR](value, key) + // handle stream errors + body.on('error', function (err) { + if (err.name === 'AbortError') { + // if the request was aborted, reject with this Error + abort = true; + reject(err); + } else { + // other errors, such as incorrect content-encoding + reject(new FetchError(`Invalid response body while trying to fetch ${_this4.url}: ${err.message}`, 'system', err)); + } + }); - if (this[CACHE].has(key)) { - if (len > this[MAX]) { - del(this, this[CACHE].get(key)) - return false - } + body.on('data', function (chunk) { + if (abort || chunk === null) { + return; + } - const node = this[CACHE].get(key) - const item = node.value + if (_this4.size && accumBytes + chunk.length > _this4.size) { + abort = true; + reject(new FetchError(`content size at ${_this4.url} over limit: ${_this4.size}`, 'max-size')); + return; + } - // dispose of the old one before overwriting - // split out into 2 ifs for better coverage tracking - if (this[DISPOSE]) { - if (!this[NO_DISPOSE_ON_SET]) - this[DISPOSE](key, item.value) - } + accumBytes += chunk.length; + accum.push(chunk); + }); - item.now = now - item.maxAge = maxAge - item.value = value - this[LENGTH] += len - item.length - item.length = len - this.get(key) - trim(this) - return true - } + body.on('end', function () { + if (abort) { + return; + } - const hit = new Entry(key, value, len, now, maxAge) + clearTimeout(resTimeout); - // oversized objects fall out of cache automatically. - if (hit.length > this[MAX]) { - if (this[DISPOSE]) - this[DISPOSE](key, value) + try { + resolve(Buffer.concat(accum, accumBytes)); + } catch (err) { + // handle streams that have accumulated too much data (issue #414) + reject(new FetchError(`Could not create Buffer from response body for ${_this4.url}: ${err.message}`, 'system', err)); + } + }); + }); +} - return false - } +/** + * Detect buffer encoding and convert to target encoding + * ref: http://www.w3.org/TR/2011/WD-html5-20110113/parsing.html#determining-the-character-encoding + * + * @param Buffer buffer Incoming buffer + * @param String encoding Target encoding + * @return String + */ +function convertBody(buffer, headers) { + if (typeof convert !== 'function') { + throw new Error('The package `encoding` must be installed to use the textConverted() function'); + } - this[LENGTH] += hit.length - this[LRU_LIST].unshift(hit) - this[CACHE].set(key, this[LRU_LIST].head) - trim(this) - return true - } + const ct = headers.get('content-type'); + let charset = 'utf-8'; + let res, str; - has (key) { - if (!this[CACHE].has(key)) return false - const hit = this[CACHE].get(key).value - return !isStale(this, hit) - } + // header + if (ct) { + res = /charset=([^;]*)/i.exec(ct); + } - get (key) { - return get(this, key, true) - } + // no charset in content type, peek at response body for at most 1024 bytes + str = buffer.slice(0, 1024).toString(); - peek (key) { - return get(this, key, false) - } + // html5 + if (!res && str) { + res = /= 0; l--) { - const hit = arr[l] - const expiresAt = hit.e || 0 - if (expiresAt === 0) - // the item was created without expiration in a non aged cache - this.set(hit.k, hit.v) - else { - const maxAge = expiresAt - now - // dont add already expired items - if (maxAge > 0) { - this.set(hit.k, hit.v, maxAge) - } - } - } - } + // prevent decode issues when sites use incorrect encoding + // ref: https://hsivonen.fi/encoding-menu/ + if (charset === 'gb2312' || charset === 'gbk') { + charset = 'gb18030'; + } + } - prune () { - this[CACHE].forEach((value, key) => get(this, key, false)) - } + // turn raw buffers into a single utf-8 buffer + return convert(buffer, 'UTF-8', charset).toString(); } -const get = (self, key, doUse) => { - const node = self[CACHE].get(key) - if (node) { - const hit = node.value - if (isStale(self, hit)) { - del(self, node) - if (!self[ALLOW_STALE]) - return undefined - } else { - if (doUse) { - if (self[UPDATE_AGE_ON_GET]) - node.value.now = Date.now() - self[LRU_LIST].unshiftNode(node) - } - } - return hit.value - } +/** + * Detect a URLSearchParams object + * ref: https://github.com/bitinn/node-fetch/issues/296#issuecomment-307598143 + * + * @param Object obj Object to detect by type or brand + * @return String + */ +function isURLSearchParams(obj) { + // Duck-typing as a necessary condition. + if (typeof obj !== 'object' || typeof obj.append !== 'function' || typeof obj.delete !== 'function' || typeof obj.get !== 'function' || typeof obj.getAll !== 'function' || typeof obj.has !== 'function' || typeof obj.set !== 'function') { + return false; + } + + // Brand-checking and more duck-typing as optional condition. + return obj.constructor.name === 'URLSearchParams' || Object.prototype.toString.call(obj) === '[object URLSearchParams]' || typeof obj.sort === 'function'; } -const isStale = (self, hit) => { - if (!hit || (!hit.maxAge && !self[MAX_AGE])) - return false +/** + * Check if `obj` is a W3C `Blob` object (which `File` inherits from) + * @param {*} obj + * @return {boolean} + */ +function isBlob(obj) { + return typeof obj === 'object' && typeof obj.arrayBuffer === 'function' && typeof obj.type === 'string' && typeof obj.stream === 'function' && typeof obj.constructor === 'function' && typeof obj.constructor.name === 'string' && /^(Blob|File)$/.test(obj.constructor.name) && /^(Blob|File)$/.test(obj[Symbol.toStringTag]); +} - const diff = Date.now() - hit.now - return hit.maxAge ? diff > hit.maxAge - : self[MAX_AGE] && (diff > self[MAX_AGE]) +/** + * Clone body given Res/Req instance + * + * @param Mixed instance Response or Request instance + * @return Mixed + */ +function clone(instance) { + let p1, p2; + let body = instance.body; + + // don't allow cloning a used body + if (instance.bodyUsed) { + throw new Error('cannot clone body after it is used'); + } + + // check that body is a stream and not form-data object + // note: we can't clone the form-data object without having it as a dependency + if (body instanceof Stream && typeof body.getBoundary !== 'function') { + // tee instance body + p1 = new PassThrough(); + p2 = new PassThrough(); + body.pipe(p1); + body.pipe(p2); + // set instance body to teed body and return the other teed body + instance[INTERNALS].body = p1; + body = p2; + } + + return body; } -const trim = self => { - if (self[LENGTH] > self[MAX]) { - for (let walker = self[LRU_LIST].tail; - self[LENGTH] > self[MAX] && walker !== null;) { - // We know that we're about to delete this one, and also - // what the next least recently used key will be, so just - // go ahead and set it now. - const prev = walker.prev - del(self, walker) - walker = prev - } - } +/** + * Performs the operation "extract a `Content-Type` value from |object|" as + * specified in the specification: + * https://fetch.spec.whatwg.org/#concept-bodyinit-extract + * + * This function assumes that instance.body is present. + * + * @param Mixed instance Any options.body input + */ +function extractContentType(body) { + if (body === null) { + // body is null + return null; + } else if (typeof body === 'string') { + // body is string + return 'text/plain;charset=UTF-8'; + } else if (isURLSearchParams(body)) { + // body is a URLSearchParams + return 'application/x-www-form-urlencoded;charset=UTF-8'; + } else if (isBlob(body)) { + // body is blob + return body.type || null; + } else if (Buffer.isBuffer(body)) { + // body is buffer + return null; + } else if (Object.prototype.toString.call(body) === '[object ArrayBuffer]') { + // body is ArrayBuffer + return null; + } else if (ArrayBuffer.isView(body)) { + // body is ArrayBufferView + return null; + } else if (typeof body.getBoundary === 'function') { + // detect form data input from form-data module + return `multipart/form-data;boundary=${body.getBoundary()}`; + } else if (body instanceof Stream) { + // body is stream + // can't really do much about this + return null; + } else { + // Body constructor defaults other things to string + return 'text/plain;charset=UTF-8'; + } } -const del = (self, node) => { - if (node) { - const hit = node.value - if (self[DISPOSE]) - self[DISPOSE](hit.key, hit.value) +/** + * The Fetch Standard treats this as if "total bytes" is a property on the body. + * For us, we have to explicitly get it with a function. + * + * ref: https://fetch.spec.whatwg.org/#concept-body-total-bytes + * + * @param Body instance Instance of Body + * @return Number? Number of bytes, or null if not possible + */ +function getTotalBytes(instance) { + const body = instance.body; - self[LENGTH] -= hit.length - self[CACHE].delete(hit.key) - self[LRU_LIST].removeNode(node) - } -} -class Entry { - constructor (key, value, length, now, maxAge) { - this.key = key - this.value = value - this.length = length - this.now = now - this.maxAge = maxAge || 0 - } + if (body === null) { + // body is null + return 0; + } else if (isBlob(body)) { + return body.size; + } else if (Buffer.isBuffer(body)) { + // body is buffer + return body.length; + } else if (body && typeof body.getLengthSync === 'function') { + // detect form data input from form-data module + if (body._lengthRetrievers && body._lengthRetrievers.length == 0 || // 1.x + body.hasKnownLength && body.hasKnownLength()) { + // 2.x + return body.getLengthSync(); + } + return null; + } else { + // body is stream + return null; + } } -const forEachStep = (self, fn, node, thisp) => { - let hit = node.value - if (isStale(self, hit)) { - del(self, node) - if (!self[ALLOW_STALE]) - hit = undefined - } - if (hit) - fn.call(thisp, hit.value, hit.key, self) +/** + * Write a Body to a Node.js WritableStream (e.g. http.Request) object. + * + * @param Body instance Instance of Body + * @return Void + */ +function writeToStream(dest, instance) { + const body = instance.body; + + + if (body === null) { + // body is null + dest.end(); + } else if (isBlob(body)) { + body.stream().pipe(dest); + } else if (Buffer.isBuffer(body)) { + // body is buffer + dest.write(body); + dest.end(); + } else { + // body is stream + body.pipe(dest); + } } -module.exports = LRUCache +// expose Promise +Body.Promise = global.Promise; +/** + * headers.js + * + * Headers class offers convenient helpers + */ -/***/ }), +const invalidTokenRegex = /[^\^_`a-zA-Z\-0-9!#$%&'*+.|~]/; +const invalidHeaderCharRegex = /[^\t\x20-\x7e\x80-\xff]/; -/***/ 7426: -/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { +function validateName(name) { + name = `${name}`; + if (invalidTokenRegex.test(name) || name === '') { + throw new TypeError(`${name} is not a legal HTTP header name`); + } +} -/*! - * mime-db - * Copyright(c) 2014 Jonathan Ong - * Copyright(c) 2015-2022 Douglas Christopher Wilson - * MIT Licensed +function validateValue(value) { + value = `${value}`; + if (invalidHeaderCharRegex.test(value)) { + throw new TypeError(`${value} is not a legal HTTP header value`); + } +} + +/** + * Find the key in the map object given a header name. + * + * Returns undefined if not found. + * + * @param String name Header name + * @return String|Undefined */ +function find(map, name) { + name = name.toLowerCase(); + for (const key in map) { + if (key.toLowerCase() === name) { + return key; + } + } + return undefined; +} -/** - * Module exports. - */ +const MAP = Symbol('map'); +class Headers { + /** + * Headers class + * + * @param Object headers Response headers + * @return Void + */ + constructor() { + let init = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : undefined; -module.exports = __nccwpck_require__(3765) + this[MAP] = Object.create(null); + if (init instanceof Headers) { + const rawHeaders = init.raw(); + const headerNames = Object.keys(rawHeaders); -/***/ }), + for (const headerName of headerNames) { + for (const value of rawHeaders[headerName]) { + this.append(headerName, value); + } + } -/***/ 3583: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + return; + } -"use strict"; -/*! - * mime-types - * Copyright(c) 2014 Jonathan Ong - * Copyright(c) 2015 Douglas Christopher Wilson - * MIT Licensed - */ + // We don't worry about converting prop to ByteString here as append() + // will handle it. + if (init == null) ; else if (typeof init === 'object') { + const method = init[Symbol.iterator]; + if (method != null) { + if (typeof method !== 'function') { + throw new TypeError('Header pairs must be iterable'); + } + // sequence> + // Note: per spec we have to first exhaust the lists then process them + const pairs = []; + for (const pair of init) { + if (typeof pair !== 'object' || typeof pair[Symbol.iterator] !== 'function') { + throw new TypeError('Each header pair must be iterable'); + } + pairs.push(Array.from(pair)); + } + for (const pair of pairs) { + if (pair.length !== 2) { + throw new TypeError('Each header pair must be a name/value tuple'); + } + this.append(pair[0], pair[1]); + } + } else { + // record + for (const key of Object.keys(init)) { + const value = init[key]; + this.append(key, value); + } + } + } else { + throw new TypeError('Provided initializer must be an object'); + } + } -/** - * Module dependencies. - * @private - */ + /** + * Return combined header value given name + * + * @param String name Header name + * @return Mixed + */ + get(name) { + name = `${name}`; + validateName(name); + const key = find(this[MAP], name); + if (key === undefined) { + return null; + } -var db = __nccwpck_require__(7426) -var extname = (__nccwpck_require__(1017).extname) + return this[MAP][key].join(', '); + } -/** - * Module variables. - * @private - */ + /** + * Iterate over all headers + * + * @param Function callback Executed for each item with parameters (value, name, thisArg) + * @param Boolean thisArg `this` context for callback function + * @return Void + */ + forEach(callback) { + let thisArg = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : undefined; -var EXTRACT_TYPE_REGEXP = /^\s*([^;\s]*)(?:;|\s|$)/ -var TEXT_TYPE_REGEXP = /^text\//i + let pairs = getHeaders(this); + let i = 0; + while (i < pairs.length) { + var _pairs$i = pairs[i]; + const name = _pairs$i[0], + value = _pairs$i[1]; -/** - * Module exports. - * @public - */ + callback.call(thisArg, value, name, this); + pairs = getHeaders(this); + i++; + } + } -exports.charset = charset -exports.charsets = { lookup: charset } -exports.contentType = contentType -exports.extension = extension -exports.extensions = Object.create(null) -exports.lookup = lookup -exports.types = Object.create(null) + /** + * Overwrite header values given name + * + * @param String name Header name + * @param String value Header value + * @return Void + */ + set(name, value) { + name = `${name}`; + value = `${value}`; + validateName(name); + validateValue(value); + const key = find(this[MAP], name); + this[MAP][key !== undefined ? key : name] = [value]; + } -// Populate the extensions/types maps -populateMaps(exports.extensions, exports.types) + /** + * Append a value onto existing header + * + * @param String name Header name + * @param String value Header value + * @return Void + */ + append(name, value) { + name = `${name}`; + value = `${value}`; + validateName(name); + validateValue(value); + const key = find(this[MAP], name); + if (key !== undefined) { + this[MAP][key].push(value); + } else { + this[MAP][name] = [value]; + } + } -/** - * Get the default charset for a MIME type. - * - * @param {string} type - * @return {boolean|string} - */ + /** + * Check for header name existence + * + * @param String name Header name + * @return Boolean + */ + has(name) { + name = `${name}`; + validateName(name); + return find(this[MAP], name) !== undefined; + } -function charset (type) { - if (!type || typeof type !== 'string') { - return false - } + /** + * Delete all header values given name + * + * @param String name Header name + * @return Void + */ + delete(name) { + name = `${name}`; + validateName(name); + const key = find(this[MAP], name); + if (key !== undefined) { + delete this[MAP][key]; + } + } - // TODO: use media-typer - var match = EXTRACT_TYPE_REGEXP.exec(type) - var mime = match && db[match[1].toLowerCase()] + /** + * Return raw headers (non-spec api) + * + * @return Object + */ + raw() { + return this[MAP]; + } - if (mime && mime.charset) { - return mime.charset - } + /** + * Get an iterator on keys. + * + * @return Iterator + */ + keys() { + return createHeadersIterator(this, 'key'); + } - // default text/* to utf-8 - if (match && TEXT_TYPE_REGEXP.test(match[1])) { - return 'UTF-8' - } + /** + * Get an iterator on values. + * + * @return Iterator + */ + values() { + return createHeadersIterator(this, 'value'); + } - return false + /** + * Get an iterator on entries. + * + * This is the default iterator of the Headers object. + * + * @return Iterator + */ + [Symbol.iterator]() { + return createHeadersIterator(this, 'key+value'); + } } +Headers.prototype.entries = Headers.prototype[Symbol.iterator]; -/** - * Create a full Content-Type header given a MIME type or extension. - * - * @param {string} str - * @return {boolean|string} - */ +Object.defineProperty(Headers.prototype, Symbol.toStringTag, { + value: 'Headers', + writable: false, + enumerable: false, + configurable: true +}); -function contentType (str) { - // TODO: should this even be in this module? - if (!str || typeof str !== 'string') { - return false - } +Object.defineProperties(Headers.prototype, { + get: { enumerable: true }, + forEach: { enumerable: true }, + set: { enumerable: true }, + append: { enumerable: true }, + has: { enumerable: true }, + delete: { enumerable: true }, + keys: { enumerable: true }, + values: { enumerable: true }, + entries: { enumerable: true } +}); - var mime = str.indexOf('/') === -1 - ? exports.lookup(str) - : str +function getHeaders(headers) { + let kind = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : 'key+value'; - if (!mime) { - return false - } + const keys = Object.keys(headers[MAP]).sort(); + return keys.map(kind === 'key' ? function (k) { + return k.toLowerCase(); + } : kind === 'value' ? function (k) { + return headers[MAP][k].join(', '); + } : function (k) { + return [k.toLowerCase(), headers[MAP][k].join(', ')]; + }); +} - // TODO: use content-type or other module - if (mime.indexOf('charset') === -1) { - var charset = exports.charset(mime) - if (charset) mime += '; charset=' + charset.toLowerCase() - } +const INTERNAL = Symbol('internal'); - return mime +function createHeadersIterator(target, kind) { + const iterator = Object.create(HeadersIteratorPrototype); + iterator[INTERNAL] = { + target, + kind, + index: 0 + }; + return iterator; } -/** - * Get the default extension for a MIME type. - * - * @param {string} type - * @return {boolean|string} - */ +const HeadersIteratorPrototype = Object.setPrototypeOf({ + next() { + // istanbul ignore if + if (!this || Object.getPrototypeOf(this) !== HeadersIteratorPrototype) { + throw new TypeError('Value of `this` is not a HeadersIterator'); + } -function extension (type) { - if (!type || typeof type !== 'string') { - return false - } + var _INTERNAL = this[INTERNAL]; + const target = _INTERNAL.target, + kind = _INTERNAL.kind, + index = _INTERNAL.index; - // TODO: use media-typer - var match = EXTRACT_TYPE_REGEXP.exec(type) + const values = getHeaders(target, kind); + const len = values.length; + if (index >= len) { + return { + value: undefined, + done: true + }; + } - // get extensions - var exts = match && exports.extensions[match[1].toLowerCase()] + this[INTERNAL].index = index + 1; - if (!exts || !exts.length) { - return false - } + return { + value: values[index], + done: false + }; + } +}, Object.getPrototypeOf(Object.getPrototypeOf([][Symbol.iterator]()))); - return exts[0] -} +Object.defineProperty(HeadersIteratorPrototype, Symbol.toStringTag, { + value: 'HeadersIterator', + writable: false, + enumerable: false, + configurable: true +}); /** - * Lookup the MIME type for a file path/extension. + * Export the Headers object in a form that Node.js can consume. * - * @param {string} path - * @return {boolean|string} + * @param Headers headers + * @return Object */ +function exportNodeCompatibleHeaders(headers) { + const obj = Object.assign({ __proto__: null }, headers[MAP]); -function lookup (path) { - if (!path || typeof path !== 'string') { - return false - } - - // get the extension ("ext" or ".ext" or full path) - var extension = extname('x.' + path) - .toLowerCase() - .substr(1) - - if (!extension) { - return false - } + // http.request() only supports string as Host header. This hack makes + // specifying custom Host header possible. + const hostHeaderKey = find(headers[MAP], 'Host'); + if (hostHeaderKey !== undefined) { + obj[hostHeaderKey] = obj[hostHeaderKey][0]; + } - return exports.types[extension] || false + return obj; } /** - * Populate the extensions and types maps. - * @private + * Create a Headers object from an object of headers, ignoring those that do + * not conform to HTTP grammar productions. + * + * @param Object obj Object of headers + * @return Headers */ - -function populateMaps (extensions, types) { - // source preference (least -> most) - var preference = ['nginx', 'apache', undefined, 'iana'] - - Object.keys(db).forEach(function forEachMimeType (type) { - var mime = db[type] - var exts = mime.extensions - - if (!exts || !exts.length) { - return - } - - // mime -> extensions - extensions[type] = exts - - // extension -> mime - for (var i = 0; i < exts.length; i++) { - var extension = exts[i] - - if (types[extension]) { - var from = preference.indexOf(db[types[extension]].source) - var to = preference.indexOf(mime.source) - - if (types[extension] !== 'application/octet-stream' && - (from > to || (from === to && types[extension].substr(0, 12) === 'application/'))) { - // skip the remapping - continue - } - } - - // set the extension -> mime - types[extension] = type - } - }) +function createHeadersLenient(obj) { + const headers = new Headers(); + for (const name of Object.keys(obj)) { + if (invalidTokenRegex.test(name)) { + continue; + } + if (Array.isArray(obj[name])) { + for (const val of obj[name]) { + if (invalidHeaderCharRegex.test(val)) { + continue; + } + if (headers[MAP][name] === undefined) { + headers[MAP][name] = [val]; + } else { + headers[MAP][name].push(val); + } + } + } else if (!invalidHeaderCharRegex.test(obj[name])) { + headers[MAP][name] = [obj[name]]; + } + } + return headers; } +const INTERNALS$1 = Symbol('Response internals'); -/***/ }), - -/***/ 3973: -/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { - -module.exports = minimatch -minimatch.Minimatch = Minimatch +// fix an issue where "STATUS_CODES" aren't a named export for node <10 +const STATUS_CODES = http.STATUS_CODES; -var path = (function () { try { return __nccwpck_require__(1017) } catch (e) {}}()) || { - sep: '/' -} -minimatch.sep = path.sep +/** + * Response class + * + * @param Stream body Readable stream + * @param Object opts Response options + * @return Void + */ +class Response { + constructor() { + let body = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : null; + let opts = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; -var GLOBSTAR = minimatch.GLOBSTAR = Minimatch.GLOBSTAR = {} -var expand = __nccwpck_require__(3717) + Body.call(this, body, opts); -var plTypes = { - '!': { open: '(?:(?!(?:', close: '))[^/]*?)'}, - '?': { open: '(?:', close: ')?' }, - '+': { open: '(?:', close: ')+' }, - '*': { open: '(?:', close: ')*' }, - '@': { open: '(?:', close: ')' } -} + const status = opts.status || 200; + const headers = new Headers(opts.headers); -// any single thing other than / -// don't need to escape / when using new RegExp() -var qmark = '[^/]' + if (body != null && !headers.has('Content-Type')) { + const contentType = extractContentType(body); + if (contentType) { + headers.append('Content-Type', contentType); + } + } -// * => any number of characters -var star = qmark + '*?' + this[INTERNALS$1] = { + url: opts.url, + status, + statusText: opts.statusText || STATUS_CODES[status], + headers, + counter: opts.counter + }; + } -// ** when dots are allowed. Anything goes, except .. and . -// not (^ or / followed by one or two dots followed by $ or /), -// followed by anything, any number of times. -var twoStarDot = '(?:(?!(?:\\\/|^)(?:\\.{1,2})($|\\\/)).)*?' + get url() { + return this[INTERNALS$1].url || ''; + } -// not a ^ or / followed by a dot, -// followed by anything, any number of times. -var twoStarNoDot = '(?:(?!(?:\\\/|^)\\.).)*?' + get status() { + return this[INTERNALS$1].status; + } -// characters that need to be escaped in RegExp. -var reSpecials = charSet('().*{}+?[]^$\\!') + /** + * Convenience property representing if the request ended normally + */ + get ok() { + return this[INTERNALS$1].status >= 200 && this[INTERNALS$1].status < 300; + } -// "abc" -> { a:true, b:true, c:true } -function charSet (s) { - return s.split('').reduce(function (set, c) { - set[c] = true - return set - }, {}) -} + get redirected() { + return this[INTERNALS$1].counter > 0; + } -// normalizes slashes. -var slashSplit = /\/+/ + get statusText() { + return this[INTERNALS$1].statusText; + } -minimatch.filter = filter -function filter (pattern, options) { - options = options || {} - return function (p, i, list) { - return minimatch(p, pattern, options) - } -} + get headers() { + return this[INTERNALS$1].headers; + } -function ext (a, b) { - b = b || {} - var t = {} - Object.keys(a).forEach(function (k) { - t[k] = a[k] - }) - Object.keys(b).forEach(function (k) { - t[k] = b[k] - }) - return t + /** + * Clone this response + * + * @return Response + */ + clone() { + return new Response(clone(this), { + url: this.url, + status: this.status, + statusText: this.statusText, + headers: this.headers, + ok: this.ok, + redirected: this.redirected + }); + } } -minimatch.defaults = function (def) { - if (!def || typeof def !== 'object' || !Object.keys(def).length) { - return minimatch - } - - var orig = minimatch +Body.mixIn(Response.prototype); - var m = function minimatch (p, pattern, options) { - return orig(p, pattern, ext(def, options)) - } +Object.defineProperties(Response.prototype, { + url: { enumerable: true }, + status: { enumerable: true }, + ok: { enumerable: true }, + redirected: { enumerable: true }, + statusText: { enumerable: true }, + headers: { enumerable: true }, + clone: { enumerable: true } +}); - m.Minimatch = function Minimatch (pattern, options) { - return new orig.Minimatch(pattern, ext(def, options)) - } - m.Minimatch.defaults = function defaults (options) { - return orig.defaults(ext(def, options)).Minimatch - } +Object.defineProperty(Response.prototype, Symbol.toStringTag, { + value: 'Response', + writable: false, + enumerable: false, + configurable: true +}); - m.filter = function filter (pattern, options) { - return orig.filter(pattern, ext(def, options)) - } +const INTERNALS$2 = Symbol('Request internals'); +const URL = Url.URL || whatwgUrl.URL; - m.defaults = function defaults (options) { - return orig.defaults(ext(def, options)) - } +// fix an issue where "format", "parse" aren't a named export for node <10 +const parse_url = Url.parse; +const format_url = Url.format; - m.makeRe = function makeRe (pattern, options) { - return orig.makeRe(pattern, ext(def, options)) - } +/** + * Wrapper around `new URL` to handle arbitrary URLs + * + * @param {string} urlStr + * @return {void} + */ +function parseURL(urlStr) { + /* + Check whether the URL is absolute or not + Scheme: https://tools.ietf.org/html/rfc3986#section-3.1 + Absolute URL: https://tools.ietf.org/html/rfc3986#section-4.3 + */ + if (/^[a-zA-Z][a-zA-Z\d+\-.]*:/.exec(urlStr)) { + urlStr = new URL(urlStr).toString(); + } - m.braceExpand = function braceExpand (pattern, options) { - return orig.braceExpand(pattern, ext(def, options)) - } + // Fallback to old implementation for arbitrary URLs + return parse_url(urlStr); +} - m.match = function (list, pattern, options) { - return orig.match(list, pattern, ext(def, options)) - } +const streamDestructionSupported = 'destroy' in Stream.Readable.prototype; - return m +/** + * Check if a value is an instance of Request. + * + * @param Mixed input + * @return Boolean + */ +function isRequest(input) { + return typeof input === 'object' && typeof input[INTERNALS$2] === 'object'; } -Minimatch.defaults = function (def) { - return minimatch.defaults(def).Minimatch +function isAbortSignal(signal) { + const proto = signal && typeof signal === 'object' && Object.getPrototypeOf(signal); + return !!(proto && proto.constructor.name === 'AbortSignal'); } -function minimatch (p, pattern, options) { - assertValidPattern(pattern) +/** + * Request class + * + * @param Mixed input Url or Request instance + * @param Object init Custom options + * @return Void + */ +class Request { + constructor(input) { + let init = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; - if (!options) options = {} + let parsedURL; - // shortcut: comments match nothing. - if (!options.nocomment && pattern.charAt(0) === '#') { - return false - } + // normalize input + if (!isRequest(input)) { + if (input && input.href) { + // in order to support Node.js' Url objects; though WHATWG's URL objects + // will fall into this branch also (since their `toString()` will return + // `href` property anyway) + parsedURL = parseURL(input.href); + } else { + // coerce input to a string before attempting to parse + parsedURL = parseURL(`${input}`); + } + input = {}; + } else { + parsedURL = parseURL(input.url); + } - return new Minimatch(pattern, options).match(p) -} + let method = init.method || input.method || 'GET'; + method = method.toUpperCase(); -function Minimatch (pattern, options) { - if (!(this instanceof Minimatch)) { - return new Minimatch(pattern, options) - } + if ((init.body != null || isRequest(input) && input.body !== null) && (method === 'GET' || method === 'HEAD')) { + throw new TypeError('Request with GET/HEAD method cannot have body'); + } - assertValidPattern(pattern) + let inputBody = init.body != null ? init.body : isRequest(input) && input.body !== null ? clone(input) : null; - if (!options) options = {} + Body.call(this, inputBody, { + timeout: init.timeout || input.timeout || 0, + size: init.size || input.size || 0 + }); - pattern = pattern.trim() + const headers = new Headers(init.headers || input.headers || {}); - // windows support: need to use /, not \ - if (!options.allowWindowsEscape && path.sep !== '/') { - pattern = pattern.split(path.sep).join('/') - } + if (inputBody != null && !headers.has('Content-Type')) { + const contentType = extractContentType(inputBody); + if (contentType) { + headers.append('Content-Type', contentType); + } + } - this.options = options - this.set = [] - this.pattern = pattern - this.regexp = null - this.negate = false - this.comment = false - this.empty = false - this.partial = !!options.partial + let signal = isRequest(input) ? input.signal : null; + if ('signal' in init) signal = init.signal; - // make the set of regexps etc. - this.make() -} + if (signal != null && !isAbortSignal(signal)) { + throw new TypeError('Expected signal to be an instanceof AbortSignal'); + } -Minimatch.prototype.debug = function () {} + this[INTERNALS$2] = { + method, + redirect: init.redirect || input.redirect || 'follow', + headers, + parsedURL, + signal + }; -Minimatch.prototype.make = make -function make () { - var pattern = this.pattern - var options = this.options + // node-fetch-only options + this.follow = init.follow !== undefined ? init.follow : input.follow !== undefined ? input.follow : 20; + this.compress = init.compress !== undefined ? init.compress : input.compress !== undefined ? input.compress : true; + this.counter = init.counter || input.counter || 0; + this.agent = init.agent || input.agent; + } - // empty patterns and comments match nothing. - if (!options.nocomment && pattern.charAt(0) === '#') { - this.comment = true - return - } - if (!pattern) { - this.empty = true - return - } + get method() { + return this[INTERNALS$2].method; + } - // step 1: figure out negation, etc. - this.parseNegate() + get url() { + return format_url(this[INTERNALS$2].parsedURL); + } - // step 2: expand braces - var set = this.globSet = this.braceExpand() + get headers() { + return this[INTERNALS$2].headers; + } - if (options.debug) this.debug = function debug() { console.error.apply(console, arguments) } + get redirect() { + return this[INTERNALS$2].redirect; + } - this.debug(this.pattern, set) + get signal() { + return this[INTERNALS$2].signal; + } - // step 3: now we have a set, so turn each one into a series of path-portion - // matching patterns. - // These will be regexps, except in the case of "**", which is - // set to the GLOBSTAR object for globstar behavior, - // and will not contain any / characters - set = this.globParts = set.map(function (s) { - return s.split(slashSplit) - }) + /** + * Clone this request + * + * @return Request + */ + clone() { + return new Request(this); + } +} - this.debug(this.pattern, set) +Body.mixIn(Request.prototype); - // glob --> regexps - set = set.map(function (s, si, set) { - return s.map(this.parse, this) - }, this) +Object.defineProperty(Request.prototype, Symbol.toStringTag, { + value: 'Request', + writable: false, + enumerable: false, + configurable: true +}); - this.debug(this.pattern, set) +Object.defineProperties(Request.prototype, { + method: { enumerable: true }, + url: { enumerable: true }, + headers: { enumerable: true }, + redirect: { enumerable: true }, + clone: { enumerable: true }, + signal: { enumerable: true } +}); - // filter out everything that didn't compile properly. - set = set.filter(function (s) { - return s.indexOf(false) === -1 - }) +/** + * Convert a Request to Node.js http request options. + * + * @param Request A Request instance + * @return Object The options object to be passed to http.request + */ +function getNodeRequestOptions(request) { + const parsedURL = request[INTERNALS$2].parsedURL; + const headers = new Headers(request[INTERNALS$2].headers); - this.debug(this.pattern, set) + // fetch step 1.3 + if (!headers.has('Accept')) { + headers.set('Accept', '*/*'); + } - this.set = set -} + // Basic fetch + if (!parsedURL.protocol || !parsedURL.hostname) { + throw new TypeError('Only absolute URLs are supported'); + } -Minimatch.prototype.parseNegate = parseNegate -function parseNegate () { - var pattern = this.pattern - var negate = false - var options = this.options - var negateOffset = 0 + if (!/^https?:$/.test(parsedURL.protocol)) { + throw new TypeError('Only HTTP(S) protocols are supported'); + } - if (options.nonegate) return + if (request.signal && request.body instanceof Stream.Readable && !streamDestructionSupported) { + throw new Error('Cancellation of streamed requests with AbortSignal is not supported in node < 8'); + } - for (var i = 0, l = pattern.length - ; i < l && pattern.charAt(i) === '!' - ; i++) { - negate = !negate - negateOffset++ - } + // HTTP-network-or-cache fetch steps 2.4-2.7 + let contentLengthValue = null; + if (request.body == null && /^(POST|PUT)$/i.test(request.method)) { + contentLengthValue = '0'; + } + if (request.body != null) { + const totalBytes = getTotalBytes(request); + if (typeof totalBytes === 'number') { + contentLengthValue = String(totalBytes); + } + } + if (contentLengthValue) { + headers.set('Content-Length', contentLengthValue); + } - if (negateOffset) this.pattern = pattern.substr(negateOffset) - this.negate = negate -} + // HTTP-network-or-cache fetch step 2.11 + if (!headers.has('User-Agent')) { + headers.set('User-Agent', 'node-fetch/1.0 (+https://github.com/bitinn/node-fetch)'); + } -// Brace expansion: -// a{b,c}d -> abd acd -// a{b,}c -> abc ac -// a{0..3}d -> a0d a1d a2d a3d -// a{b,c{d,e}f}g -> abg acdfg acefg -// a{b,c}d{e,f}g -> abdeg acdeg abdeg abdfg -// -// Invalid sets are not expanded. -// a{2..}b -> a{2..}b -// a{b}c -> a{b}c -minimatch.braceExpand = function (pattern, options) { - return braceExpand(pattern, options) -} + // HTTP-network-or-cache fetch step 2.15 + if (request.compress && !headers.has('Accept-Encoding')) { + headers.set('Accept-Encoding', 'gzip,deflate'); + } -Minimatch.prototype.braceExpand = braceExpand + let agent = request.agent; + if (typeof agent === 'function') { + agent = agent(parsedURL); + } -function braceExpand (pattern, options) { - if (!options) { - if (this instanceof Minimatch) { - options = this.options - } else { - options = {} - } - } + if (!headers.has('Connection') && !agent) { + headers.set('Connection', 'close'); + } - pattern = typeof pattern === 'undefined' - ? this.pattern : pattern + // HTTP-network fetch step 4.2 + // chunked encoding is handled by Node.js - assertValidPattern(pattern) + return Object.assign({}, parsedURL, { + method: request.method, + headers: exportNodeCompatibleHeaders(headers), + agent + }); +} - // Thanks to Yeting Li for - // improving this regexp to avoid a ReDOS vulnerability. - if (options.nobrace || !/\{(?:(?!\{).)*\}/.test(pattern)) { - // shortcut. no need to expand. - return [pattern] - } +/** + * abort-error.js + * + * AbortError interface for cancelled requests + */ - return expand(pattern) -} +/** + * Create AbortError instance + * + * @param String message Error message for human + * @return AbortError + */ +function AbortError(message) { + Error.call(this, message); -var MAX_PATTERN_LENGTH = 1024 * 64 -var assertValidPattern = function (pattern) { - if (typeof pattern !== 'string') { - throw new TypeError('invalid pattern') - } + this.type = 'aborted'; + this.message = message; - if (pattern.length > MAX_PATTERN_LENGTH) { - throw new TypeError('pattern is too long') - } + // hide custom error implementation details from end-users + Error.captureStackTrace(this, this.constructor); } -// parse a component of the expanded set. -// At this point, no pattern may contain "/" in it -// so we're going to return a 2d array, where each entry is the full -// pattern, split on '/', and then turned into a regular expression. -// A regexp is made at the end which joins each array with an -// escaped /, and another full one which joins each regexp with |. -// -// Following the lead of Bash 4.1, note that "**" only has special meaning -// when it is the *only* thing in a path portion. Otherwise, any series -// of * is equivalent to a single *. Globstar behavior is enabled by -// default, and can be disabled by setting options.noglobstar. -Minimatch.prototype.parse = parse -var SUBPARSE = {} -function parse (pattern, isSub) { - assertValidPattern(pattern) +AbortError.prototype = Object.create(Error.prototype); +AbortError.prototype.constructor = AbortError; +AbortError.prototype.name = 'AbortError'; - var options = this.options +const URL$1 = Url.URL || whatwgUrl.URL; - // shortcuts - if (pattern === '**') { - if (!options.noglobstar) - return GLOBSTAR - else - pattern = '*' - } - if (pattern === '') return '' +// fix an issue where "PassThrough", "resolve" aren't a named export for node <10 +const PassThrough$1 = Stream.PassThrough; - var re = '' - var hasMagic = !!options.nocase - var escaping = false - // ? => one single character - var patternListStack = [] - var negativeLists = [] - var stateChar - var inClass = false - var reClassStart = -1 - var classStart = -1 - // . and .. never match anything that doesn't start with ., - // even when options.dot is set. - var patternStart = pattern.charAt(0) === '.' ? '' // anything - // not (start or / followed by . or .. followed by / or end) - : options.dot ? '(?!(?:^|\\\/)\\.{1,2}(?:$|\\\/))' - : '(?!\\.)' - var self = this +const isDomainOrSubdomain = function isDomainOrSubdomain(destination, original) { + const orig = new URL$1(original).hostname; + const dest = new URL$1(destination).hostname; + + return orig === dest || orig[orig.length - dest.length - 1] === '.' && orig.endsWith(dest); +}; + +/** + * Fetch function + * + * @param Mixed url Absolute url or Request instance + * @param Object opts Fetch options + * @return Promise + */ +function fetch(url, opts) { - function clearStateChar () { - if (stateChar) { - // we had some state-tracking character - // that wasn't consumed by this pass. - switch (stateChar) { - case '*': - re += star - hasMagic = true - break - case '?': - re += qmark - hasMagic = true - break - default: - re += '\\' + stateChar - break - } - self.debug('clearStateChar %j %j', stateChar, re) - stateChar = false - } - } + // allow custom promise + if (!fetch.Promise) { + throw new Error('native promise missing, set fetch.Promise to your favorite alternative'); + } - for (var i = 0, len = pattern.length, c - ; (i < len) && (c = pattern.charAt(i)) - ; i++) { - this.debug('%s\t%s %s %j', pattern, i, re, c) + Body.Promise = fetch.Promise; - // skip over any that are escaped. - if (escaping && reSpecials[c]) { - re += '\\' + c - escaping = false - continue - } + // wrap http.request into fetch + return new fetch.Promise(function (resolve, reject) { + // build request object + const request = new Request(url, opts); + const options = getNodeRequestOptions(request); - switch (c) { - /* istanbul ignore next */ - case '/': { - // completely not allowed, even escaped. - // Should already be path-split by now. - return false - } + const send = (options.protocol === 'https:' ? https : http).request; + const signal = request.signal; - case '\\': - clearStateChar() - escaping = true - continue + let response = null; - // the various stateChar values - // for the "extglob" stuff. - case '?': - case '*': - case '+': - case '@': - case '!': - this.debug('%s\t%s %s %j <-- stateChar', pattern, i, re, c) + const abort = function abort() { + let error = new AbortError('The user aborted a request.'); + reject(error); + if (request.body && request.body instanceof Stream.Readable) { + request.body.destroy(error); + } + if (!response || !response.body) return; + response.body.emit('error', error); + }; - // all of those are literals inside a class, except that - // the glob [!a] means [^a] in regexp - if (inClass) { - this.debug(' in class') - if (c === '!' && i === classStart + 1) c = '^' - re += c - continue - } + if (signal && signal.aborted) { + abort(); + return; + } - // if we already have a stateChar, then it means - // that there was something like ** or +? in there. - // Handle the stateChar, then proceed with this one. - self.debug('call clearStateChar %j', stateChar) - clearStateChar() - stateChar = c - // if extglob is disabled, then +(asdf|foo) isn't a thing. - // just clear the statechar *now*, rather than even diving into - // the patternList stuff. - if (options.noext) clearStateChar() - continue + const abortAndFinalize = function abortAndFinalize() { + abort(); + finalize(); + }; - case '(': - if (inClass) { - re += '(' - continue - } + // send request + const req = send(options); + let reqTimeout; - if (!stateChar) { - re += '\\(' - continue - } + if (signal) { + signal.addEventListener('abort', abortAndFinalize); + } - patternListStack.push({ - type: stateChar, - start: i - 1, - reStart: re.length, - open: plTypes[stateChar].open, - close: plTypes[stateChar].close - }) - // negation is (?:(?!js)[^/]*) - re += stateChar === '!' ? '(?:(?!(?:' : '(?:' - this.debug('plType %j %j', stateChar, re) - stateChar = false - continue + function finalize() { + req.abort(); + if (signal) signal.removeEventListener('abort', abortAndFinalize); + clearTimeout(reqTimeout); + } - case ')': - if (inClass || !patternListStack.length) { - re += '\\)' - continue - } + if (request.timeout) { + req.once('socket', function (socket) { + reqTimeout = setTimeout(function () { + reject(new FetchError(`network timeout at: ${request.url}`, 'request-timeout')); + finalize(); + }, request.timeout); + }); + } - clearStateChar() - hasMagic = true - var pl = patternListStack.pop() - // negation is (?:(?!js)[^/]*) - // The others are (?:) - re += pl.close - if (pl.type === '!') { - negativeLists.push(pl) - } - pl.reEnd = re.length - continue + req.on('error', function (err) { + reject(new FetchError(`request to ${request.url} failed, reason: ${err.message}`, 'system', err)); + finalize(); + }); - case '|': - if (inClass || !patternListStack.length || escaping) { - re += '\\|' - escaping = false - continue - } + req.on('response', function (res) { + clearTimeout(reqTimeout); - clearStateChar() - re += '|' - continue + const headers = createHeadersLenient(res.headers); - // these are mostly the same in regexp and glob - case '[': - // swallow any state-tracking char before the [ - clearStateChar() + // HTTP fetch step 5 + if (fetch.isRedirect(res.statusCode)) { + // HTTP fetch step 5.2 + const location = headers.get('Location'); - if (inClass) { - re += '\\' + c - continue - } + // HTTP fetch step 5.3 + let locationURL = null; + try { + locationURL = location === null ? null : new URL$1(location, request.url).toString(); + } catch (err) { + // error here can only be invalid URL in Location: header + // do not throw when options.redirect == manual + // let the user extract the errorneous redirect URL + if (request.redirect !== 'manual') { + reject(new FetchError(`uri requested responds with an invalid redirect URL: ${location}`, 'invalid-redirect')); + finalize(); + return; + } + } - inClass = true - classStart = i - reClassStart = re.length - re += c - continue + // HTTP fetch step 5.5 + switch (request.redirect) { + case 'error': + reject(new FetchError(`uri requested responds with a redirect, redirect mode is set to error: ${request.url}`, 'no-redirect')); + finalize(); + return; + case 'manual': + // node-fetch-specific step: make manual redirect a bit easier to use by setting the Location header value to the resolved URL. + if (locationURL !== null) { + // handle corrupted header + try { + headers.set('Location', locationURL); + } catch (err) { + // istanbul ignore next: nodejs server prevent invalid response headers, we can't test this through normal request + reject(err); + } + } + break; + case 'follow': + // HTTP-redirect fetch step 2 + if (locationURL === null) { + break; + } - case ']': - // a right bracket shall lose its special - // meaning and represent itself in - // a bracket expression if it occurs - // first in the list. -- POSIX.2 2.8.3.2 - if (i === classStart + 1 || !inClass) { - re += '\\' + c - escaping = false - continue - } + // HTTP-redirect fetch step 5 + if (request.counter >= request.follow) { + reject(new FetchError(`maximum redirect reached at: ${request.url}`, 'max-redirect')); + finalize(); + return; + } - // handle the case where we left a class open. - // "[z-a]" is valid, equivalent to "\[z-a\]" - // split where the last [ was, make sure we don't have - // an invalid re. if so, re-walk the contents of the - // would-be class to re-translate any characters that - // were passed through as-is - // TODO: It would probably be faster to determine this - // without a try/catch and a new RegExp, but it's tricky - // to do safely. For now, this is safe and works. - var cs = pattern.substring(classStart + 1, i) - try { - RegExp('[' + cs + ']') - } catch (er) { - // not a valid class! - var sp = this.parse(cs, SUBPARSE) - re = re.substr(0, reClassStart) + '\\[' + sp[0] + '\\]' - hasMagic = hasMagic || sp[1] - inClass = false - continue - } + // HTTP-redirect fetch step 6 (counter increment) + // Create a new Request object. + const requestOpts = { + headers: new Headers(request.headers), + follow: request.follow, + counter: request.counter + 1, + agent: request.agent, + compress: request.compress, + method: request.method, + body: request.body, + signal: request.signal, + timeout: request.timeout, + size: request.size + }; - // finish up the class. - hasMagic = true - inClass = false - re += c - continue + if (!isDomainOrSubdomain(request.url, locationURL)) { + for (const name of ['authorization', 'www-authenticate', 'cookie', 'cookie2']) { + requestOpts.headers.delete(name); + } + } - default: - // swallow any state char that wasn't consumed - clearStateChar() + // HTTP-redirect fetch step 9 + if (res.statusCode !== 303 && request.body && getTotalBytes(request) === null) { + reject(new FetchError('Cannot follow redirect with body being a readable stream', 'unsupported-redirect')); + finalize(); + return; + } - if (escaping) { - // no need - escaping = false - } else if (reSpecials[c] - && !(c === '^' && inClass)) { - re += '\\' - } + // HTTP-redirect fetch step 11 + if (res.statusCode === 303 || (res.statusCode === 301 || res.statusCode === 302) && request.method === 'POST') { + requestOpts.method = 'GET'; + requestOpts.body = undefined; + requestOpts.headers.delete('content-length'); + } - re += c + // HTTP-redirect fetch step 15 + resolve(fetch(new Request(locationURL, requestOpts))); + finalize(); + return; + } + } - } // switch - } // for + // prepare response + res.once('end', function () { + if (signal) signal.removeEventListener('abort', abortAndFinalize); + }); + let body = res.pipe(new PassThrough$1()); - // handle the case where we left a class open. - // "[abc" is valid, equivalent to "\[abc" - if (inClass) { - // split where the last [ was, and escape it - // this is a huge pita. We now have to re-walk - // the contents of the would-be class to re-translate - // any characters that were passed through as-is - cs = pattern.substr(classStart + 1) - sp = this.parse(cs, SUBPARSE) - re = re.substr(0, reClassStart) + '\\[' + sp[0] - hasMagic = hasMagic || sp[1] - } + const response_options = { + url: request.url, + status: res.statusCode, + statusText: res.statusMessage, + headers: headers, + size: request.size, + timeout: request.timeout, + counter: request.counter + }; - // handle the case where we had a +( thing at the *end* - // of the pattern. - // each pattern list stack adds 3 chars, and we need to go through - // and escape any | chars that were passed through as-is for the regexp. - // Go through and escape them, taking care not to double-escape any - // | chars that were already escaped. - for (pl = patternListStack.pop(); pl; pl = patternListStack.pop()) { - var tail = re.slice(pl.reStart + pl.open.length) - this.debug('setting tail', re, pl) - // maybe some even number of \, then maybe 1 \, followed by a | - tail = tail.replace(/((?:\\{2}){0,64})(\\?)\|/g, function (_, $1, $2) { - if (!$2) { - // the | isn't already escaped, so escape it. - $2 = '\\' - } + // HTTP-network fetch step 12.1.1.3 + const codings = headers.get('Content-Encoding'); - // need to escape all those slashes *again*, without escaping the - // one that we need for escaping the | character. As it works out, - // escaping an even number of slashes can be done by simply repeating - // it exactly after itself. That's why this trick works. - // - // I am sorry that you have to see this. - return $1 + $1 + $2 + '|' - }) + // HTTP-network fetch step 12.1.1.4: handle content codings - this.debug('tail=%j\n %s', tail, tail, pl, re) - var t = pl.type === '*' ? star - : pl.type === '?' ? qmark - : '\\' + pl.type + // in following scenarios we ignore compression support + // 1. compression support is disabled + // 2. HEAD request + // 3. no Content-Encoding header + // 4. no content response (204) + // 5. content not modified response (304) + if (!request.compress || request.method === 'HEAD' || codings === null || res.statusCode === 204 || res.statusCode === 304) { + response = new Response(body, response_options); + resolve(response); + return; + } - hasMagic = true - re = re.slice(0, pl.reStart) + t + '\\(' + tail - } + // For Node v6+ + // Be less strict when decoding compressed responses, since sometimes + // servers send slightly invalid responses that are still accepted + // by common browsers. + // Always using Z_SYNC_FLUSH is what cURL does. + const zlibOptions = { + flush: zlib.Z_SYNC_FLUSH, + finishFlush: zlib.Z_SYNC_FLUSH + }; - // handle trailing things that only matter at the very end. - clearStateChar() - if (escaping) { - // trailing \\ - re += '\\\\' - } + // for gzip + if (codings == 'gzip' || codings == 'x-gzip') { + body = body.pipe(zlib.createGunzip(zlibOptions)); + response = new Response(body, response_options); + resolve(response); + return; + } - // only need to apply the nodot start if the re starts with - // something that could conceivably capture a dot - var addPatternStart = false - switch (re.charAt(0)) { - case '[': case '.': case '(': addPatternStart = true - } + // for deflate + if (codings == 'deflate' || codings == 'x-deflate') { + // handle the infamous raw deflate response from old servers + // a hack for old IIS and Apache servers + const raw = res.pipe(new PassThrough$1()); + raw.once('data', function (chunk) { + // see http://stackoverflow.com/questions/37519828 + if ((chunk[0] & 0x0F) === 0x08) { + body = body.pipe(zlib.createInflate()); + } else { + body = body.pipe(zlib.createInflateRaw()); + } + response = new Response(body, response_options); + resolve(response); + }); + return; + } - // Hack to work around lack of negative lookbehind in JS - // A pattern like: *.!(x).!(y|z) needs to ensure that a name - // like 'a.xyz.yz' doesn't match. So, the first negative - // lookahead, has to look ALL the way ahead, to the end of - // the pattern. - for (var n = negativeLists.length - 1; n > -1; n--) { - var nl = negativeLists[n] + // for br + if (codings == 'br' && typeof zlib.createBrotliDecompress === 'function') { + body = body.pipe(zlib.createBrotliDecompress()); + response = new Response(body, response_options); + resolve(response); + return; + } - var nlBefore = re.slice(0, nl.reStart) - var nlFirst = re.slice(nl.reStart, nl.reEnd - 8) - var nlLast = re.slice(nl.reEnd - 8, nl.reEnd) - var nlAfter = re.slice(nl.reEnd) + // otherwise, use response as-is + response = new Response(body, response_options); + resolve(response); + }); - nlLast += nlAfter + writeToStream(req, request); + }); +} +/** + * Redirect code matching + * + * @param Number code Status code + * @return Boolean + */ +fetch.isRedirect = function (code) { + return code === 301 || code === 302 || code === 303 || code === 307 || code === 308; +}; - // Handle nested stuff like *(*.js|!(*.json)), where open parens - // mean that we should *not* include the ) in the bit that is considered - // "after" the negated section. - var openParensBefore = nlBefore.split('(').length - 1 - var cleanAfter = nlAfter - for (i = 0; i < openParensBefore; i++) { - cleanAfter = cleanAfter.replace(/\)[+*?]?/, '') - } - nlAfter = cleanAfter +// expose Promise +fetch.Promise = global.Promise; - var dollar = '' - if (nlAfter === '' && isSub !== SUBPARSE) { - dollar = '$' - } - var newRe = nlBefore + nlFirst + nlAfter + dollar + nlLast - re = newRe - } +module.exports = exports = fetch; +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports["default"] = exports; +exports.Headers = Headers; +exports.Request = Request; +exports.Response = Response; +exports.FetchError = FetchError; - // if the re is not "" at this point, then we need to make sure - // it doesn't match against an empty path part. - // Otherwise a/* will match a/, which it should not. - if (re !== '' && hasMagic) { - re = '(?=.)' + re - } - if (addPatternStart) { - re = patternStart + re - } +/***/ }), - // parsing just a piece of a larger pattern. - if (isSub === SUBPARSE) { - return [re, hasMagic] - } +/***/ 9975: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - // skip the regexp for non-magical patterns - // unescape anything in it, though, so that it'll be - // an exact match against a file etc. - if (!hasMagic) { - return globUnescape(pattern) - } +"use strict"; +/*eslint no-var:0, prefer-arrow-callback: 0, object-shorthand: 0 */ - var flags = options.nocase ? 'i' : '' - try { - var regExp = new RegExp('^' + re + '$', flags) - } catch (er) /* istanbul ignore next - should be impossible */ { - // If it was an invalid regular expression, then it can't match - // anything. This trick looks for a character after the end of - // the string, which is of course impossible, except in multi-line - // mode, but it's not a /m regex. - return new RegExp('$.') - } - regExp._glob = pattern - regExp._src = re - return regExp -} +var Punycode = __nccwpck_require__(5477); -minimatch.makeRe = function (pattern, options) { - return new Minimatch(pattern, options || {}).makeRe() -} -Minimatch.prototype.makeRe = makeRe -function makeRe () { - if (this.regexp || this.regexp === false) return this.regexp +var internals = {}; - // at this point, this.set is a 2d array of partial - // pattern strings, or "**". - // - // It's better to use .match(). This function shouldn't - // be used, really, but it's pretty convenient sometimes, - // when you just want to work with a regex. - var set = this.set - if (!set.length) { - this.regexp = false - return this.regexp - } - var options = this.options +// +// Read rules from file. +// +internals.rules = (__nccwpck_require__(3704).map)(function (rule) { - var twoStar = options.noglobstar ? star - : options.dot ? twoStarDot - : twoStarNoDot - var flags = options.nocase ? 'i' : '' + return { + rule: rule, + suffix: rule.replace(/^(\*\.|\!)/, ''), + punySuffix: -1, + wildcard: rule.charAt(0) === '*', + exception: rule.charAt(0) === '!' + }; +}); - var re = set.map(function (pattern) { - return pattern.map(function (p) { - return (p === GLOBSTAR) ? twoStar - : (typeof p === 'string') ? regExpEscape(p) - : p._src - }).join('\\\/') - }).join('|') - // must match entire pattern - // ending in a * or ** will make it less strict. - re = '^(?:' + re + ')$' +// +// Check is given string ends with `suffix`. +// +internals.endsWith = function (str, suffix) { - // can match anything, as long as it's not this. - if (this.negate) re = '^(?!' + re + ').*$' + return str.indexOf(suffix, str.length - suffix.length) !== -1; +}; - try { - this.regexp = new RegExp(re, flags) - } catch (ex) /* istanbul ignore next - should be impossible */ { - this.regexp = false - } - return this.regexp -} -minimatch.match = function (list, pattern, options) { - options = options || {} - var mm = new Minimatch(pattern, options) - list = list.filter(function (f) { - return mm.match(f) - }) - if (mm.options.nonull && !list.length) { - list.push(pattern) - } - return list -} +// +// Find rule for a given domain. +// +internals.findRule = function (domain) { -Minimatch.prototype.match = function match (f, partial) { - if (typeof partial === 'undefined') partial = this.partial - this.debug('match', f, this.pattern) - // short-circuit in the case of busted things. - // comments, etc. - if (this.comment) return false - if (this.empty) return f === '' + var punyDomain = Punycode.toASCII(domain); + return internals.rules.reduce(function (memo, rule) { - if (f === '/' && partial) return true + if (rule.punySuffix === -1){ + rule.punySuffix = Punycode.toASCII(rule.suffix); + } + if (!internals.endsWith(punyDomain, '.' + rule.punySuffix) && punyDomain !== rule.punySuffix) { + return memo; + } + // This has been commented out as it never seems to run. This is because + // sub tlds always appear after their parents and we never find a shorter + // match. + //if (memo) { + // var memoSuffix = Punycode.toASCII(memo.suffix); + // if (memoSuffix.length >= punySuffix.length) { + // return memo; + // } + //} + return rule; + }, null); +}; - var options = this.options - // windows: need to use /, not \ - if (path.sep !== '/') { - f = f.split(path.sep).join('/') - } +// +// Error codes and messages. +// +exports.errorCodes = { + DOMAIN_TOO_SHORT: 'Domain name too short.', + DOMAIN_TOO_LONG: 'Domain name too long. It should be no more than 255 chars.', + LABEL_STARTS_WITH_DASH: 'Domain name label can not start with a dash.', + LABEL_ENDS_WITH_DASH: 'Domain name label can not end with a dash.', + LABEL_TOO_LONG: 'Domain name label should be at most 63 chars long.', + LABEL_TOO_SHORT: 'Domain name label should be at least 1 character long.', + LABEL_INVALID_CHARS: 'Domain name label can only contain alphanumeric characters or dashes.' +}; - // treat the test path as a set of pathparts. - f = f.split(slashSplit) - this.debug(this.pattern, 'split', f) - // just ONE of the pattern sets in this.set needs to match - // in order for it to be valid. If negating, then just one - // match means that we have failed. - // Either way, return on the first hit. +// +// Validate domain name and throw if not valid. +// +// From wikipedia: +// +// Hostnames are composed of series of labels concatenated with dots, as are all +// domain names. Each label must be between 1 and 63 characters long, and the +// entire hostname (including the delimiting dots) has a maximum of 255 chars. +// +// Allowed chars: +// +// * `a-z` +// * `0-9` +// * `-` but not as a starting or ending character +// * `.` as a separator for the textual portions of a domain name +// +// * http://en.wikipedia.org/wiki/Domain_name +// * http://en.wikipedia.org/wiki/Hostname +// +internals.validate = function (input) { - var set = this.set - this.debug(this.pattern, 'set', set) + // Before we can validate we need to take care of IDNs with unicode chars. + var ascii = Punycode.toASCII(input); - // Find the basename of the path by looking for the last non-empty segment - var filename - var i - for (i = f.length - 1; i >= 0; i--) { - filename = f[i] - if (filename) break + if (ascii.length < 1) { + return 'DOMAIN_TOO_SHORT'; + } + if (ascii.length > 255) { + return 'DOMAIN_TOO_LONG'; } - for (i = 0; i < set.length; i++) { - var pattern = set[i] - var file = f - if (options.matchBase && pattern.length === 1) { - file = [filename] + // Check each part's length and allowed chars. + var labels = ascii.split('.'); + var label; + + for (var i = 0; i < labels.length; ++i) { + label = labels[i]; + if (!label.length) { + return 'LABEL_TOO_SHORT'; } - var hit = this.matchOne(file, pattern, partial) - if (hit) { - if (options.flipNegate) return true - return !this.negate + if (label.length > 63) { + return 'LABEL_TOO_LONG'; + } + if (label.charAt(0) === '-') { + return 'LABEL_STARTS_WITH_DASH'; + } + if (label.charAt(label.length - 1) === '-') { + return 'LABEL_ENDS_WITH_DASH'; + } + if (!/^[a-z0-9\-]+$/.test(label)) { + return 'LABEL_INVALID_CHARS'; } } +}; - // didn't get any hits. this is success if it's a negative - // pattern, failure otherwise. - if (options.flipNegate) return false - return this.negate -} - -// set partial to true to test if, for example, -// "/a/b" matches the start of "/*/b/*/d" -// Partial means, if you run out of file before you run -// out of pattern, then that's fine, as long as all -// the parts match. -Minimatch.prototype.matchOne = function (file, pattern, partial) { - var options = this.options - this.debug('matchOne', - { 'this': this, file: file, pattern: pattern }) +// +// Public API +// - this.debug('matchOne', file.length, pattern.length) - for (var fi = 0, - pi = 0, - fl = file.length, - pl = pattern.length - ; (fi < fl) && (pi < pl) - ; fi++, pi++) { - this.debug('matchOne loop') - var p = pattern[pi] - var f = file[fi] +// +// Parse domain. +// +exports.parse = function (input) { - this.debug(pattern, p, f) + if (typeof input !== 'string') { + throw new TypeError('Domain name must be a string.'); + } - // should be impossible. - // some invalid regexp stuff in the set. - /* istanbul ignore if */ - if (p === false) return false + // Force domain to lowercase. + var domain = input.slice(0).toLowerCase(); - if (p === GLOBSTAR) { - this.debug('GLOBSTAR', [pattern, p, f]) + // Handle FQDN. + // TODO: Simply remove trailing dot? + if (domain.charAt(domain.length - 1) === '.') { + domain = domain.slice(0, domain.length - 1); + } - // "**" - // a/**/b/**/c would match the following: - // a/b/x/y/z/c - // a/x/y/z/b/c - // a/b/x/b/x/c - // a/b/c - // To do this, take the rest of the pattern after - // the **, and see if it would match the file remainder. - // If so, return success. - // If not, the ** "swallows" a segment, and try again. - // This is recursively awful. - // - // a/**/b/**/c matching a/b/x/y/z/c - // - a matches a - // - doublestar - // - matchOne(b/x/y/z/c, b/**/c) - // - b matches b - // - doublestar - // - matchOne(x/y/z/c, c) -> no - // - matchOne(y/z/c, c) -> no - // - matchOne(z/c, c) -> no - // - matchOne(c, c) yes, hit - var fr = fi - var pr = pi + 1 - if (pr === pl) { - this.debug('** at the end') - // a ** at the end will just swallow the rest. - // We have found a match. - // however, it will not swallow /.x, unless - // options.dot is set. - // . and .. are *never* matched by **, for explosively - // exponential reasons. - for (; fi < fl; fi++) { - if (file[fi] === '.' || file[fi] === '..' || - (!options.dot && file[fi].charAt(0) === '.')) return false - } - return true + // Validate and sanitise input. + var error = internals.validate(domain); + if (error) { + return { + input: input, + error: { + message: exports.errorCodes[error], + code: error } + }; + } - // ok, let's see if we can swallow whatever we can. - while (fr < fl) { - var swallowee = file[fr] + var parsed = { + input: input, + tld: null, + sld: null, + domain: null, + subdomain: null, + listed: false + }; - this.debug('\nglobstar while', file, fr, pattern, pr, swallowee) + var domainParts = domain.split('.'); - // XXX remove this slice. Just pass the start index. - if (this.matchOne(file.slice(fr), pattern.slice(pr), partial)) { - this.debug('globstar found match!', fr, fl, swallowee) - // found a match. - return true - } else { - // can't swallow "." or ".." ever. - // can only swallow ".foo" when explicitly asked. - if (swallowee === '.' || swallowee === '..' || - (!options.dot && swallowee.charAt(0) === '.')) { - this.debug('dot detected!', file, fr, pattern, pr) - break - } + // Non-Internet TLD + if (domainParts[domainParts.length - 1] === 'local') { + return parsed; + } - // ** swallows a segment, and continue. - this.debug('globstar swallow a segment, and continue') - fr++ - } - } + var handlePunycode = function () { - // no match was found. - // However, in partial mode, we can't say this is necessarily over. - // If there's more *pattern* left, then - /* istanbul ignore if */ - if (partial) { - // ran out of file - this.debug('\n>>> no match, partial?', file, fr, pattern, pr) - if (fr === fl) return true - } - return false + if (!/xn--/.test(domain)) { + return parsed; + } + if (parsed.domain) { + parsed.domain = Punycode.toASCII(parsed.domain); + } + if (parsed.subdomain) { + parsed.subdomain = Punycode.toASCII(parsed.subdomain); } + return parsed; + }; - // something other than ** - // non-magic patterns just have to match exactly - // patterns with magic have been turned into regexps. - var hit - if (typeof p === 'string') { - hit = f === p - this.debug('string match', p, f, hit) - } else { - hit = f.match(p) - this.debug('pattern match', p, f, hit) + var rule = internals.findRule(domain); + + // Unlisted tld. + if (!rule) { + if (domainParts.length < 2) { + return parsed; + } + parsed.tld = domainParts.pop(); + parsed.sld = domainParts.pop(); + parsed.domain = [parsed.sld, parsed.tld].join('.'); + if (domainParts.length) { + parsed.subdomain = domainParts.pop(); } + return handlePunycode(); + } - if (!hit) return false + // At this point we know the public suffix is listed. + parsed.listed = true; + + var tldParts = rule.suffix.split('.'); + var privateParts = domainParts.slice(0, domainParts.length - tldParts.length); + + if (rule.exception) { + privateParts.push(tldParts.shift()); } - // Note: ending in / means that we'll get a final "" - // at the end of the pattern. This can only match a - // corresponding "" at the end of the file. - // If the file ends in /, then it can only match a - // a pattern that ends in /, unless the pattern just - // doesn't have any more for it. But, a/b/ should *not* - // match "a/b/*", even though "" matches against the - // [^/]*? pattern, except in partial mode, where it might - // simply not be reached yet. - // However, a/b/ should still satisfy a/* + parsed.tld = tldParts.join('.'); - // now either we fell off the end of the pattern, or we're done. - if (fi === fl && pi === pl) { - // ran out of pattern and filename at the same time. - // an exact hit! - return true - } else if (fi === fl) { - // ran out of file, but still had pattern left. - // this is ok if we're doing the match as part of - // a glob fs traversal. - return partial - } else /* istanbul ignore else */ if (pi === pl) { - // ran out of pattern, still have file left. - // this is only acceptable if we're on the very last - // empty segment of a file with a trailing slash. - // a/* should match a/b/ - return (fi === fl - 1) && (file[fi] === '') + if (!privateParts.length) { + return handlePunycode(); } - // should be unreachable. - /* istanbul ignore next */ - throw new Error('wtf?') -} + if (rule.wildcard) { + tldParts.unshift(privateParts.pop()); + parsed.tld = tldParts.join('.'); + } -// replace stuff like \* with * -function globUnescape (s) { - return s.replace(/\\(.)/g, '$1') -} + if (!privateParts.length) { + return handlePunycode(); + } -function regExpEscape (s) { - return s.replace(/[-[\]{}()*+?.,\\^$|#\s]/g, '\\$&') -} + parsed.sld = privateParts.pop(); + parsed.domain = [parsed.sld, parsed.tld].join('.'); + if (privateParts.length) { + parsed.subdomain = privateParts.join('.'); + } -/***/ }), + return handlePunycode(); +}; -/***/ 467: -/***/ ((module, exports, __nccwpck_require__) => { -"use strict"; +// +// Get domain. +// +exports.get = function (domain) { + if (!domain) { + return null; + } + return exports.parse(domain).domain || null; +}; -Object.defineProperty(exports, "__esModule", ({ value: true })); -function _interopDefault (ex) { return (ex && (typeof ex === 'object') && 'default' in ex) ? ex['default'] : ex; } +// +// Check whether domain belongs to a known public suffix. +// +exports.isValid = function (domain) { -var Stream = _interopDefault(__nccwpck_require__(2781)); -var http = _interopDefault(__nccwpck_require__(3685)); -var Url = _interopDefault(__nccwpck_require__(7310)); -var whatwgUrl = _interopDefault(__nccwpck_require__(8665)); -var https = _interopDefault(__nccwpck_require__(5687)); -var zlib = _interopDefault(__nccwpck_require__(9796)); + var parsed = exports.parse(domain); + return Boolean(parsed.domain && parsed.listed); +}; -// Based on https://github.com/tmpvar/jsdom/blob/aa85b2abf07766ff7bf5c1f6daafb3726f2f2db5/lib/jsdom/living/blob.js -// fix for "Readable" isn't a named export issue -const Readable = Stream.Readable; +/***/ }), -const BUFFER = Symbol('buffer'); -const TYPE = Symbol('type'); +/***/ 9540: +/***/ ((module) => { -class Blob { - constructor() { - this[TYPE] = ''; +"use strict"; + + +/** Highest positive signed 32-bit float value */ +const maxInt = 2147483647; // aka. 0x7FFFFFFF or 2^31-1 + +/** Bootstring parameters */ +const base = 36; +const tMin = 1; +const tMax = 26; +const skew = 38; +const damp = 700; +const initialBias = 72; +const initialN = 128; // 0x80 +const delimiter = '-'; // '\x2D' + +/** Regular expressions */ +const regexPunycode = /^xn--/; +const regexNonASCII = /[^\0-\x7E]/; // non-ASCII chars +const regexSeparators = /[\x2E\u3002\uFF0E\uFF61]/g; // RFC 3490 separators - const blobParts = arguments[0]; - const options = arguments[1]; +/** Error messages */ +const errors = { + 'overflow': 'Overflow: input needs wider integers to process', + 'not-basic': 'Illegal input >= 0x80 (not a basic code point)', + 'invalid-input': 'Invalid input' +}; - const buffers = []; - let size = 0; +/** Convenience shortcuts */ +const baseMinusTMin = base - tMin; +const floor = Math.floor; +const stringFromCharCode = String.fromCharCode; - if (blobParts) { - const a = blobParts; - const length = Number(a.length); - for (let i = 0; i < length; i++) { - const element = a[i]; - let buffer; - if (element instanceof Buffer) { - buffer = element; - } else if (ArrayBuffer.isView(element)) { - buffer = Buffer.from(element.buffer, element.byteOffset, element.byteLength); - } else if (element instanceof ArrayBuffer) { - buffer = Buffer.from(element); - } else if (element instanceof Blob) { - buffer = element[BUFFER]; - } else { - buffer = Buffer.from(typeof element === 'string' ? element : String(element)); - } - size += buffer.length; - buffers.push(buffer); - } - } +/*--------------------------------------------------------------------------*/ - this[BUFFER] = Buffer.concat(buffers); +/** + * A generic error utility function. + * @private + * @param {String} type The error type. + * @returns {Error} Throws a `RangeError` with the applicable error message. + */ +function error(type) { + throw new RangeError(errors[type]); +} - let type = options && options.type !== undefined && String(options.type).toLowerCase(); - if (type && !/[^\u0020-\u007E]/.test(type)) { - this[TYPE] = type; - } - } - get size() { - return this[BUFFER].length; - } - get type() { - return this[TYPE]; - } - text() { - return Promise.resolve(this[BUFFER].toString()); - } - arrayBuffer() { - const buf = this[BUFFER]; - const ab = buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength); - return Promise.resolve(ab); - } - stream() { - const readable = new Readable(); - readable._read = function () {}; - readable.push(this[BUFFER]); - readable.push(null); - return readable; +/** + * A generic `Array#map` utility function. + * @private + * @param {Array} array The array to iterate over. + * @param {Function} callback The function that gets called for every array + * item. + * @returns {Array} A new array of values returned by the callback function. + */ +function map(array, fn) { + const result = []; + let length = array.length; + while (length--) { + result[length] = fn(array[length]); } - toString() { - return '[object Blob]'; + return result; +} + +/** + * A simple `Array#map`-like wrapper to work with domain name strings or email + * addresses. + * @private + * @param {String} domain The domain name or email address. + * @param {Function} callback The function that gets called for every + * character. + * @returns {Array} A new string of characters returned by the callback + * function. + */ +function mapDomain(string, fn) { + const parts = string.split('@'); + let result = ''; + if (parts.length > 1) { + // In email addresses, only the domain name should be punycoded. Leave + // the local part (i.e. everything up to `@`) intact. + result = parts[0] + '@'; + string = parts[1]; } - slice() { - const size = this.size; + // Avoid `split(regex)` for IE8 compatibility. See #17. + string = string.replace(regexSeparators, '\x2E'); + const labels = string.split('.'); + const encoded = map(labels, fn).join('.'); + return result + encoded; +} - const start = arguments[0]; - const end = arguments[1]; - let relativeStart, relativeEnd; - if (start === undefined) { - relativeStart = 0; - } else if (start < 0) { - relativeStart = Math.max(size + start, 0); - } else { - relativeStart = Math.min(start, size); - } - if (end === undefined) { - relativeEnd = size; - } else if (end < 0) { - relativeEnd = Math.max(size + end, 0); +/** + * Creates an array containing the numeric code points of each Unicode + * character in the string. While JavaScript uses UCS-2 internally, + * this function will convert a pair of surrogate halves (each of which + * UCS-2 exposes as separate characters) into a single code point, + * matching UTF-16. + * @see `punycode.ucs2.encode` + * @see + * @memberOf punycode.ucs2 + * @name decode + * @param {String} string The Unicode input string (UCS-2). + * @returns {Array} The new array of code points. + */ +function ucs2decode(string) { + const output = []; + let counter = 0; + const length = string.length; + while (counter < length) { + const value = string.charCodeAt(counter++); + if (value >= 0xD800 && value <= 0xDBFF && counter < length) { + // It's a high surrogate, and there is a next character. + const extra = string.charCodeAt(counter++); + if ((extra & 0xFC00) == 0xDC00) { // Low surrogate. + output.push(((value & 0x3FF) << 10) + (extra & 0x3FF) + 0x10000); + } else { + // It's an unmatched surrogate; only append this code unit, in case the + // next code unit is the high surrogate of a surrogate pair. + output.push(value); + counter--; + } } else { - relativeEnd = Math.min(end, size); + output.push(value); } - const span = Math.max(relativeEnd - relativeStart, 0); - - const buffer = this[BUFFER]; - const slicedBuffer = buffer.slice(relativeStart, relativeStart + span); - const blob = new Blob([], { type: arguments[2] }); - blob[BUFFER] = slicedBuffer; - return blob; } + return output; } -Object.defineProperties(Blob.prototype, { - size: { enumerable: true }, - type: { enumerable: true }, - slice: { enumerable: true } -}); - -Object.defineProperty(Blob.prototype, Symbol.toStringTag, { - value: 'Blob', - writable: false, - enumerable: false, - configurable: true -}); - /** - * fetch-error.js - * - * FetchError interface for operational errors + * Creates a string based on an array of numeric code points. + * @see `punycode.ucs2.decode` + * @memberOf punycode.ucs2 + * @name encode + * @param {Array} codePoints The array of numeric code points. + * @returns {String} The new Unicode string (UCS-2). */ +const ucs2encode = array => String.fromCodePoint(...array); /** - * Create FetchError instance - * - * @param String message Error message for human - * @param String type Error type for machine - * @param String systemError For Node.js system error - * @return FetchError + * Converts a basic code point into a digit/integer. + * @see `digitToBasic()` + * @private + * @param {Number} codePoint The basic numeric code point value. + * @returns {Number} The numeric value of a basic code point (for use in + * representing integers) in the range `0` to `base - 1`, or `base` if + * the code point does not represent a value. */ -function FetchError(message, type, systemError) { - Error.call(this, message); - - this.message = message; - this.type = type; - - // when err.type is `system`, err.code contains system error code - if (systemError) { - this.code = this.errno = systemError.code; - } - - // hide custom error implementation details from end-users - Error.captureStackTrace(this, this.constructor); -} - -FetchError.prototype = Object.create(Error.prototype); -FetchError.prototype.constructor = FetchError; -FetchError.prototype.name = 'FetchError'; - -let convert; -try { - convert = (__nccwpck_require__(2877).convert); -} catch (e) {} - -const INTERNALS = Symbol('Body internals'); +const basicToDigit = function(codePoint) { + if (codePoint - 0x30 < 0x0A) { + return codePoint - 0x16; + } + if (codePoint - 0x41 < 0x1A) { + return codePoint - 0x41; + } + if (codePoint - 0x61 < 0x1A) { + return codePoint - 0x61; + } + return base; +}; -// fix an issue where "PassThrough" isn't a named export for node <10 -const PassThrough = Stream.PassThrough; +/** + * Converts a digit/integer into a basic code point. + * @see `basicToDigit()` + * @private + * @param {Number} digit The numeric value of a basic code point. + * @returns {Number} The basic code point whose value (when used for + * representing integers) is `digit`, which needs to be in the range + * `0` to `base - 1`. If `flag` is non-zero, the uppercase form is + * used; else, the lowercase form is used. The behavior is undefined + * if `flag` is non-zero and `digit` has no uppercase form. + */ +const digitToBasic = function(digit, flag) { + // 0..25 map to ASCII a..z or A..Z + // 26..35 map to ASCII 0..9 + return digit + 22 + 75 * (digit < 26) - ((flag != 0) << 5); +}; /** - * Body mixin - * - * Ref: https://fetch.spec.whatwg.org/#body - * - * @param Stream body Readable stream - * @param Object opts Response options - * @return Void + * Bias adaptation function as per section 3.4 of RFC 3492. + * https://tools.ietf.org/html/rfc3492#section-3.4 + * @private */ -function Body(body) { - var _this = this; +const adapt = function(delta, numPoints, firstTime) { + let k = 0; + delta = firstTime ? floor(delta / damp) : delta >> 1; + delta += floor(delta / numPoints); + for (/* no initialization */; delta > baseMinusTMin * tMax >> 1; k += base) { + delta = floor(delta / baseMinusTMin); + } + return floor(k + (baseMinusTMin + 1) * delta / (delta + skew)); +}; - var _ref = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}, - _ref$size = _ref.size; +/** + * Converts a Punycode string of ASCII-only symbols to a string of Unicode + * symbols. + * @memberOf punycode + * @param {String} input The Punycode string of ASCII-only symbols. + * @returns {String} The resulting string of Unicode symbols. + */ +const decode = function(input) { + // Don't use UCS-2. + const output = []; + const inputLength = input.length; + let i = 0; + let n = initialN; + let bias = initialBias; - let size = _ref$size === undefined ? 0 : _ref$size; - var _ref$timeout = _ref.timeout; - let timeout = _ref$timeout === undefined ? 0 : _ref$timeout; + // Handle the basic code points: let `basic` be the number of input code + // points before the last delimiter, or `0` if there is none, then copy + // the first basic code points to the output. - if (body == null) { - // body is undefined or null - body = null; - } else if (isURLSearchParams(body)) { - // body is a URLSearchParams - body = Buffer.from(body.toString()); - } else if (isBlob(body)) ; else if (Buffer.isBuffer(body)) ; else if (Object.prototype.toString.call(body) === '[object ArrayBuffer]') { - // body is ArrayBuffer - body = Buffer.from(body); - } else if (ArrayBuffer.isView(body)) { - // body is ArrayBufferView - body = Buffer.from(body.buffer, body.byteOffset, body.byteLength); - } else if (body instanceof Stream) ; else { - // none of the above - // coerce to string then buffer - body = Buffer.from(String(body)); + let basic = input.lastIndexOf(delimiter); + if (basic < 0) { + basic = 0; } - this[INTERNALS] = { - body, - disturbed: false, - error: null - }; - this.size = size; - this.timeout = timeout; - if (body instanceof Stream) { - body.on('error', function (err) { - const error = err.name === 'AbortError' ? err : new FetchError(`Invalid response body while trying to fetch ${_this.url}: ${err.message}`, 'system', err); - _this[INTERNALS].error = error; - }); + for (let j = 0; j < basic; ++j) { + // if it's not a basic code point + if (input.charCodeAt(j) >= 0x80) { + error('not-basic'); + } + output.push(input.charCodeAt(j)); } -} -Body.prototype = { - get body() { - return this[INTERNALS].body; - }, + // Main decoding loop: start just after the last delimiter if any basic code + // points were copied; start at the beginning otherwise. - get bodyUsed() { - return this[INTERNALS].disturbed; - }, + for (let index = basic > 0 ? basic + 1 : 0; index < inputLength; /* no final expression */) { - /** - * Decode response as ArrayBuffer - * - * @return Promise - */ - arrayBuffer() { - return consumeBody.call(this).then(function (buf) { - return buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength); - }); - }, + // `index` is the index of the next character to be consumed. + // Decode a generalized variable-length integer into `delta`, + // which gets added to `i`. The overflow checking is easier + // if we increase `i` as we go, then subtract off its starting + // value at the end to obtain `delta`. + let oldi = i; + for (let w = 1, k = base; /* no condition */; k += base) { - /** - * Return raw response as Blob - * - * @return Promise - */ - blob() { - let ct = this.headers && this.headers.get('content-type') || ''; - return consumeBody.call(this).then(function (buf) { - return Object.assign( - // Prevent copying - new Blob([], { - type: ct.toLowerCase() - }), { - [BUFFER]: buf - }); - }); - }, + if (index >= inputLength) { + error('invalid-input'); + } - /** - * Decode response as json - * - * @return Promise - */ - json() { - var _this2 = this; + const digit = basicToDigit(input.charCodeAt(index++)); - return consumeBody.call(this).then(function (buffer) { - try { - return JSON.parse(buffer.toString()); - } catch (err) { - return Body.Promise.reject(new FetchError(`invalid json response body at ${_this2.url} reason: ${err.message}`, 'invalid-json')); + if (digit >= base || digit > floor((maxInt - i) / w)) { + error('overflow'); } - }); - }, - /** - * Decode response as text - * - * @return Promise - */ - text() { - return consumeBody.call(this).then(function (buffer) { - return buffer.toString(); - }); - }, + i += digit * w; + const t = k <= bias ? tMin : (k >= bias + tMax ? tMax : k - bias); - /** - * Decode response as buffer (non-spec api) - * - * @return Promise - */ - buffer() { - return consumeBody.call(this); - }, + if (digit < t) { + break; + } - /** - * Decode response as text, while automatically detecting the encoding and - * trying to decode to UTF-8 (non-spec api) - * - * @return Promise - */ - textConverted() { - var _this3 = this; + const baseMinusT = base - t; + if (w > floor(maxInt / baseMinusT)) { + error('overflow'); + } - return consumeBody.call(this).then(function (buffer) { - return convertBody(buffer, _this3.headers); - }); - } -}; + w *= baseMinusT; -// In browsers, all properties are enumerable. -Object.defineProperties(Body.prototype, { - body: { enumerable: true }, - bodyUsed: { enumerable: true }, - arrayBuffer: { enumerable: true }, - blob: { enumerable: true }, - json: { enumerable: true }, - text: { enumerable: true } -}); + } -Body.mixIn = function (proto) { - for (const name of Object.getOwnPropertyNames(Body.prototype)) { - // istanbul ignore else: future proof - if (!(name in proto)) { - const desc = Object.getOwnPropertyDescriptor(Body.prototype, name); - Object.defineProperty(proto, name, desc); + const out = output.length + 1; + bias = adapt(i - oldi, out, oldi == 0); + + // `i` was supposed to wrap around from `out` to `0`, + // incrementing `n` each time, so we'll fix that now: + if (floor(i / out) > maxInt - n) { + error('overflow'); } + + n += floor(i / out); + i %= out; + + // Insert `n` at position `i` of the output. + output.splice(i++, 0, n); + } + + return String.fromCodePoint(...output); }; /** - * Consume and convert an entire Body to a Buffer. - * - * Ref: https://fetch.spec.whatwg.org/#concept-body-consume-body - * - * @return Promise + * Converts a string of Unicode symbols (e.g. a domain name label) to a + * Punycode string of ASCII-only symbols. + * @memberOf punycode + * @param {String} input The string of Unicode symbols. + * @returns {String} The resulting Punycode string of ASCII-only symbols. */ -function consumeBody() { - var _this4 = this; - - if (this[INTERNALS].disturbed) { - return Body.Promise.reject(new TypeError(`body used already for: ${this.url}`)); - } - - this[INTERNALS].disturbed = true; +const encode = function(input) { + const output = []; - if (this[INTERNALS].error) { - return Body.Promise.reject(this[INTERNALS].error); - } + // Convert the input in UCS-2 to an array of Unicode code points. + input = ucs2decode(input); - let body = this.body; + // Cache the length. + let inputLength = input.length; - // body is null - if (body === null) { - return Body.Promise.resolve(Buffer.alloc(0)); - } + // Initialize the state. + let n = initialN; + let delta = 0; + let bias = initialBias; - // body is blob - if (isBlob(body)) { - body = body.stream(); + // Handle the basic code points. + for (const currentValue of input) { + if (currentValue < 0x80) { + output.push(stringFromCharCode(currentValue)); + } } - // body is buffer - if (Buffer.isBuffer(body)) { - return Body.Promise.resolve(body); - } + let basicLength = output.length; + let handledCPCount = basicLength; - // istanbul ignore if: should never happen - if (!(body instanceof Stream)) { - return Body.Promise.resolve(Buffer.alloc(0)); + // `handledCPCount` is the number of code points that have been handled; + // `basicLength` is the number of basic code points. + + // Finish the basic string with a delimiter unless it's empty. + if (basicLength) { + output.push(delimiter); } - // body is stream - // get ready to actually consume the body - let accum = []; - let accumBytes = 0; - let abort = false; + // Main encoding loop: + while (handledCPCount < inputLength) { - return new Body.Promise(function (resolve, reject) { - let resTimeout; + // All non-basic code points < n have been handled already. Find the next + // larger one: + let m = maxInt; + for (const currentValue of input) { + if (currentValue >= n && currentValue < m) { + m = currentValue; + } + } - // allow timeout on slow response body - if (_this4.timeout) { - resTimeout = setTimeout(function () { - abort = true; - reject(new FetchError(`Response timeout while trying to fetch ${_this4.url} (over ${_this4.timeout}ms)`, 'body-timeout')); - }, _this4.timeout); + // Increase `delta` enough to advance the decoder's state to , + // but guard against overflow. + const handledCPCountPlusOne = handledCPCount + 1; + if (m - n > floor((maxInt - delta) / handledCPCountPlusOne)) { + error('overflow'); } - // handle stream errors - body.on('error', function (err) { - if (err.name === 'AbortError') { - // if the request was aborted, reject with this Error - abort = true; - reject(err); - } else { - // other errors, such as incorrect content-encoding - reject(new FetchError(`Invalid response body while trying to fetch ${_this4.url}: ${err.message}`, 'system', err)); - } - }); + delta += (m - n) * handledCPCountPlusOne; + n = m; - body.on('data', function (chunk) { - if (abort || chunk === null) { - return; + for (const currentValue of input) { + if (currentValue < n && ++delta > maxInt) { + error('overflow'); } + if (currentValue == n) { + // Represent delta as a generalized variable-length integer. + let q = delta; + for (let k = base; /* no condition */; k += base) { + const t = k <= bias ? tMin : (k >= bias + tMax ? tMax : k - bias); + if (q < t) { + break; + } + const qMinusT = q - t; + const baseMinusT = base - t; + output.push( + stringFromCharCode(digitToBasic(t + qMinusT % baseMinusT, 0)) + ); + q = floor(qMinusT / baseMinusT); + } - if (_this4.size && accumBytes + chunk.length > _this4.size) { - abort = true; - reject(new FetchError(`content size at ${_this4.url} over limit: ${_this4.size}`, 'max-size')); - return; + output.push(stringFromCharCode(digitToBasic(q, 0))); + bias = adapt(delta, handledCPCountPlusOne, handledCPCount == basicLength); + delta = 0; + ++handledCPCount; } + } - accumBytes += chunk.length; - accum.push(chunk); - }); - - body.on('end', function () { - if (abort) { - return; - } + ++delta; + ++n; - clearTimeout(resTimeout); + } + return output.join(''); +}; - try { - resolve(Buffer.concat(accum, accumBytes)); - } catch (err) { - // handle streams that have accumulated too much data (issue #414) - reject(new FetchError(`Could not create Buffer from response body for ${_this4.url}: ${err.message}`, 'system', err)); - } - }); +/** + * Converts a Punycode string representing a domain name or an email address + * to Unicode. Only the Punycoded parts of the input will be converted, i.e. + * it doesn't matter if you call it on a string that has already been + * converted to Unicode. + * @memberOf punycode + * @param {String} input The Punycoded domain name or email address to + * convert to Unicode. + * @returns {String} The Unicode representation of the given Punycode + * string. + */ +const toUnicode = function(input) { + return mapDomain(input, function(string) { + return regexPunycode.test(string) + ? decode(string.slice(4).toLowerCase()) + : string; }); -} +}; /** - * Detect buffer encoding and convert to target encoding - * ref: http://www.w3.org/TR/2011/WD-html5-20110113/parsing.html#determining-the-character-encoding - * - * @param Buffer buffer Incoming buffer - * @param String encoding Target encoding - * @return String + * Converts a Unicode string representing a domain name or an email address to + * Punycode. Only the non-ASCII parts of the domain name will be converted, + * i.e. it doesn't matter if you call it with a domain that's already in + * ASCII. + * @memberOf punycode + * @param {String} input The domain name or email address to convert, as a + * Unicode string. + * @returns {String} The Punycode representation of the given domain name or + * email address. */ -function convertBody(buffer, headers) { - if (typeof convert !== 'function') { - throw new Error('The package `encoding` must be installed to use the textConverted() function'); - } - - const ct = headers.get('content-type'); - let charset = 'utf-8'; - let res, str; +const toASCII = function(input) { + return mapDomain(input, function(string) { + return regexNonASCII.test(string) + ? 'xn--' + encode(string) + : string; + }); +}; - // header - if (ct) { - res = /charset=([^;]*)/i.exec(ct); - } +/*--------------------------------------------------------------------------*/ - // no charset in content type, peek at response body for at most 1024 bytes - str = buffer.slice(0, 1024).toString(); +/** Define the public API */ +const punycode = { + /** + * A string representing the current Punycode.js version number. + * @memberOf punycode + * @type String + */ + 'version': '2.1.0', + /** + * An object of methods to convert from JavaScript's internal character + * representation (UCS-2) to Unicode code points, and back. + * @see + * @memberOf punycode + * @type Object + */ + 'ucs2': { + 'decode': ucs2decode, + 'encode': ucs2encode + }, + 'decode': decode, + 'encode': encode, + 'toASCII': toASCII, + 'toUnicode': toUnicode +}; - // html5 - if (!res && str) { - res = / { - // found charset - if (res) { - charset = res.pop(); +"use strict"; - // prevent decode issues when sites use incorrect encoding - // ref: https://hsivonen.fi/encoding-menu/ - if (charset === 'gb2312' || charset === 'gbk') { - charset = 'gb18030'; - } - } - // turn raw buffers into a single utf-8 buffer - return convert(buffer, 'UTF-8', charset).toString(); -} +var has = Object.prototype.hasOwnProperty + , undef; /** - * Detect a URLSearchParams object - * ref: https://github.com/bitinn/node-fetch/issues/296#issuecomment-307598143 + * Decode a URI encoded string. * - * @param Object obj Object to detect by type or brand - * @return String + * @param {String} input The URI encoded string. + * @returns {String|Null} The decoded string. + * @api private */ -function isURLSearchParams(obj) { - // Duck-typing as a necessary condition. - if (typeof obj !== 'object' || typeof obj.append !== 'function' || typeof obj.delete !== 'function' || typeof obj.get !== 'function' || typeof obj.getAll !== 'function' || typeof obj.has !== 'function' || typeof obj.set !== 'function') { - return false; - } - - // Brand-checking and more duck-typing as optional condition. - return obj.constructor.name === 'URLSearchParams' || Object.prototype.toString.call(obj) === '[object URLSearchParams]' || typeof obj.sort === 'function'; +function decode(input) { + try { + return decodeURIComponent(input.replace(/\+/g, ' ')); + } catch (e) { + return null; + } } /** - * Check if `obj` is a W3C `Blob` object (which `File` inherits from) - * @param {*} obj - * @return {boolean} + * Attempts to encode a given input. + * + * @param {String} input The string that needs to be encoded. + * @returns {String|Null} The encoded string. + * @api private */ -function isBlob(obj) { - return typeof obj === 'object' && typeof obj.arrayBuffer === 'function' && typeof obj.type === 'string' && typeof obj.stream === 'function' && typeof obj.constructor === 'function' && typeof obj.constructor.name === 'string' && /^(Blob|File)$/.test(obj.constructor.name) && /^(Blob|File)$/.test(obj[Symbol.toStringTag]); +function encode(input) { + try { + return encodeURIComponent(input); + } catch (e) { + return null; + } } /** - * Clone body given Res/Req instance + * Simple query string parser. * - * @param Mixed instance Response or Request instance - * @return Mixed + * @param {String} query The query string that needs to be parsed. + * @returns {Object} + * @api public */ -function clone(instance) { - let p1, p2; - let body = instance.body; +function querystring(query) { + var parser = /([^=?#&]+)=?([^&]*)/g + , result = {} + , part; - // don't allow cloning a used body - if (instance.bodyUsed) { - throw new Error('cannot clone body after it is used'); - } + while (part = parser.exec(query)) { + var key = decode(part[1]) + , value = decode(part[2]); - // check that body is a stream and not form-data object - // note: we can't clone the form-data object without having it as a dependency - if (body instanceof Stream && typeof body.getBoundary !== 'function') { - // tee instance body - p1 = new PassThrough(); - p2 = new PassThrough(); - body.pipe(p1); - body.pipe(p2); - // set instance body to teed body and return the other teed body - instance[INTERNALS].body = p1; - body = p2; - } + // + // Prevent overriding of existing properties. This ensures that build-in + // methods like `toString` or __proto__ are not overriden by malicious + // querystrings. + // + // In the case if failed decoding, we want to omit the key/value pairs + // from the result. + // + if (key === null || value === null || key in result) continue; + result[key] = value; + } - return body; + return result; } /** - * Performs the operation "extract a `Content-Type` value from |object|" as - * specified in the specification: - * https://fetch.spec.whatwg.org/#concept-bodyinit-extract - * - * This function assumes that instance.body is present. + * Transform a query string to an object. * - * @param Mixed instance Any options.body input + * @param {Object} obj Object that should be transformed. + * @param {String} prefix Optional prefix. + * @returns {String} + * @api public */ -function extractContentType(body) { - if (body === null) { - // body is null - return null; - } else if (typeof body === 'string') { - // body is string - return 'text/plain;charset=UTF-8'; - } else if (isURLSearchParams(body)) { - // body is a URLSearchParams - return 'application/x-www-form-urlencoded;charset=UTF-8'; - } else if (isBlob(body)) { - // body is blob - return body.type || null; - } else if (Buffer.isBuffer(body)) { - // body is buffer - return null; - } else if (Object.prototype.toString.call(body) === '[object ArrayBuffer]') { - // body is ArrayBuffer - return null; - } else if (ArrayBuffer.isView(body)) { - // body is ArrayBufferView - return null; - } else if (typeof body.getBoundary === 'function') { - // detect form data input from form-data module - return `multipart/form-data;boundary=${body.getBoundary()}`; - } else if (body instanceof Stream) { - // body is stream - // can't really do much about this - return null; - } else { - // Body constructor defaults other things to string - return 'text/plain;charset=UTF-8'; - } -} +function querystringify(obj, prefix) { + prefix = prefix || ''; -/** - * The Fetch Standard treats this as if "total bytes" is a property on the body. - * For us, we have to explicitly get it with a function. - * - * ref: https://fetch.spec.whatwg.org/#concept-body-total-bytes - * - * @param Body instance Instance of Body - * @return Number? Number of bytes, or null if not possible - */ -function getTotalBytes(instance) { - const body = instance.body; + var pairs = [] + , value + , key; + + // + // Optionally prefix with a '?' if needed + // + if ('string' !== typeof prefix) prefix = '?'; + for (key in obj) { + if (has.call(obj, key)) { + value = obj[key]; - if (body === null) { - // body is null - return 0; - } else if (isBlob(body)) { - return body.size; - } else if (Buffer.isBuffer(body)) { - // body is buffer - return body.length; - } else if (body && typeof body.getLengthSync === 'function') { - // detect form data input from form-data module - if (body._lengthRetrievers && body._lengthRetrievers.length == 0 || // 1.x - body.hasKnownLength && body.hasKnownLength()) { - // 2.x - return body.getLengthSync(); - } - return null; - } else { - // body is stream - return null; - } -} + // + // Edge cases where we actually want to encode the value to an empty + // string instead of the stringified value. + // + if (!value && (value === null || value === undef || isNaN(value))) { + value = ''; + } -/** - * Write a Body to a Node.js WritableStream (e.g. http.Request) object. - * - * @param Body instance Instance of Body - * @return Void - */ -function writeToStream(dest, instance) { - const body = instance.body; + key = encode(key); + value = encode(value); + // + // If we failed to encode the strings, we should bail out as we don't + // want to add invalid strings to the query. + // + if (key === null || value === null) continue; + pairs.push(key +'='+ value); + } + } - if (body === null) { - // body is null - dest.end(); - } else if (isBlob(body)) { - body.stream().pipe(dest); - } else if (Buffer.isBuffer(body)) { - // body is buffer - dest.write(body); - dest.end(); - } else { - // body is stream - body.pipe(dest); - } + return pairs.length ? prefix + pairs.join('&') : ''; } -// expose Promise -Body.Promise = global.Promise; +// +// Expose the module. +// +exports.stringify = querystringify; +exports.parse = querystring; -/** - * headers.js - * - * Headers class offers convenient helpers - */ -const invalidTokenRegex = /[^\^_`a-zA-Z\-0-9!#$%&'*+.|~]/; -const invalidHeaderCharRegex = /[^\t\x20-\x7e\x80-\xff]/; +/***/ }), -function validateName(name) { - name = `${name}`; - if (invalidTokenRegex.test(name) || name === '') { - throw new TypeError(`${name} is not a legal HTTP header name`); - } -} +/***/ 4742: +/***/ ((module) => { + +"use strict"; -function validateValue(value) { - value = `${value}`; - if (invalidHeaderCharRegex.test(value)) { - throw new TypeError(`${value} is not a legal HTTP header value`); - } -} /** - * Find the key in the map object given a header name. - * - * Returns undefined if not found. + * Check if we're required to add a port number. * - * @param String name Header name - * @return String|Undefined - */ -function find(map, name) { - name = name.toLowerCase(); - for (const key in map) { - if (key.toLowerCase() === name) { - return key; - } - } - return undefined; -} - -const MAP = Symbol('map'); -class Headers { - /** - * Headers class - * - * @param Object headers Response headers - * @return Void - */ - constructor() { - let init = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : undefined; + * @see https://url.spec.whatwg.org/#default-port + * @param {Number|String} port Port number we need to check + * @param {String} protocol Protocol we need to check against. + * @returns {Boolean} Is it a default port for the given protocol + * @api private + */ +module.exports = function required(port, protocol) { + protocol = protocol.split(':')[0]; + port = +port; - this[MAP] = Object.create(null); + if (!port) return false; - if (init instanceof Headers) { - const rawHeaders = init.raw(); - const headerNames = Object.keys(rawHeaders); + switch (protocol) { + case 'http': + case 'ws': + return port !== 80; - for (const headerName of headerNames) { - for (const value of rawHeaders[headerName]) { - this.append(headerName, value); - } - } + case 'https': + case 'wss': + return port !== 443; - return; - } + case 'ftp': + return port !== 21; - // We don't worry about converting prop to ByteString here as append() - // will handle it. - if (init == null) ; else if (typeof init === 'object') { - const method = init[Symbol.iterator]; - if (method != null) { - if (typeof method !== 'function') { - throw new TypeError('Header pairs must be iterable'); - } + case 'gopher': + return port !== 70; - // sequence> - // Note: per spec we have to first exhaust the lists then process them - const pairs = []; - for (const pair of init) { - if (typeof pair !== 'object' || typeof pair[Symbol.iterator] !== 'function') { - throw new TypeError('Each header pair must be iterable'); - } - pairs.push(Array.from(pair)); - } + case 'file': + return false; + } - for (const pair of pairs) { - if (pair.length !== 2) { - throw new TypeError('Each header pair must be a name/value tuple'); - } - this.append(pair[0], pair[1]); - } - } else { - // record - for (const key of Object.keys(init)) { - const value = init[key]; - this.append(key, value); - } - } - } else { - throw new TypeError('Provided initializer must be an object'); - } - } + return port !== 0; +}; - /** - * Return combined header value given name - * - * @param String name Header name - * @return Mixed - */ - get(name) { - name = `${name}`; - validateName(name); - const key = find(this[MAP], name); - if (key === undefined) { - return null; - } - return this[MAP][key].join(', '); - } +/***/ }), - /** - * Iterate over all headers - * - * @param Function callback Executed for each item with parameters (value, name, thisArg) - * @param Boolean thisArg `this` context for callback function - * @return Void - */ - forEach(callback) { - let thisArg = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : undefined; +/***/ 2043: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - let pairs = getHeaders(this); - let i = 0; - while (i < pairs.length) { - var _pairs$i = pairs[i]; - const name = _pairs$i[0], - value = _pairs$i[1]; +;(function (sax) { // wrapper for non-node envs + sax.parser = function (strict, opt) { return new SAXParser(strict, opt) } + sax.SAXParser = SAXParser + sax.SAXStream = SAXStream + sax.createStream = createStream - callback.call(thisArg, value, name, this); - pairs = getHeaders(this); - i++; - } - } + // When we pass the MAX_BUFFER_LENGTH position, start checking for buffer overruns. + // When we check, schedule the next check for MAX_BUFFER_LENGTH - (max(buffer lengths)), + // since that's the earliest that a buffer overrun could occur. This way, checks are + // as rare as required, but as often as necessary to ensure never crossing this bound. + // Furthermore, buffers are only tested at most once per write(), so passing a very + // large string into write() might have undesirable effects, but this is manageable by + // the caller, so it is assumed to be safe. Thus, a call to write() may, in the extreme + // edge case, result in creating at most one complete copy of the string passed in. + // Set to Infinity to have unlimited buffers. + sax.MAX_BUFFER_LENGTH = 64 * 1024 - /** - * Overwrite header values given name - * - * @param String name Header name - * @param String value Header value - * @return Void - */ - set(name, value) { - name = `${name}`; - value = `${value}`; - validateName(name); - validateValue(value); - const key = find(this[MAP], name); - this[MAP][key !== undefined ? key : name] = [value]; - } + var buffers = [ + 'comment', 'sgmlDecl', 'textNode', 'tagName', 'doctype', + 'procInstName', 'procInstBody', 'entity', 'attribName', + 'attribValue', 'cdata', 'script' + ] - /** - * Append a value onto existing header - * - * @param String name Header name - * @param String value Header value - * @return Void - */ - append(name, value) { - name = `${name}`; - value = `${value}`; - validateName(name); - validateValue(value); - const key = find(this[MAP], name); - if (key !== undefined) { - this[MAP][key].push(value); - } else { - this[MAP][name] = [value]; - } - } + sax.EVENTS = [ + 'text', + 'processinginstruction', + 'sgmldeclaration', + 'doctype', + 'comment', + 'opentagstart', + 'attribute', + 'opentag', + 'closetag', + 'opencdata', + 'cdata', + 'closecdata', + 'error', + 'end', + 'ready', + 'script', + 'opennamespace', + 'closenamespace' + ] - /** - * Check for header name existence - * - * @param String name Header name - * @return Boolean - */ - has(name) { - name = `${name}`; - validateName(name); - return find(this[MAP], name) !== undefined; - } + function SAXParser (strict, opt) { + if (!(this instanceof SAXParser)) { + return new SAXParser(strict, opt) + } - /** - * Delete all header values given name - * - * @param String name Header name - * @return Void - */ - delete(name) { - name = `${name}`; - validateName(name); - const key = find(this[MAP], name); - if (key !== undefined) { - delete this[MAP][key]; - } - } + var parser = this + clearBuffers(parser) + parser.q = parser.c = '' + parser.bufferCheckPosition = sax.MAX_BUFFER_LENGTH + parser.opt = opt || {} + parser.opt.lowercase = parser.opt.lowercase || parser.opt.lowercasetags + parser.looseCase = parser.opt.lowercase ? 'toLowerCase' : 'toUpperCase' + parser.tags = [] + parser.closed = parser.closedRoot = parser.sawRoot = false + parser.tag = parser.error = null + parser.strict = !!strict + parser.noscript = !!(strict || parser.opt.noscript) + parser.state = S.BEGIN + parser.strictEntities = parser.opt.strictEntities + parser.ENTITIES = parser.strictEntities ? Object.create(sax.XML_ENTITIES) : Object.create(sax.ENTITIES) + parser.attribList = [] - /** - * Return raw headers (non-spec api) - * - * @return Object - */ - raw() { - return this[MAP]; - } + // namespaces form a prototype chain. + // it always points at the current tag, + // which protos to its parent tag. + if (parser.opt.xmlns) { + parser.ns = Object.create(rootNS) + } - /** - * Get an iterator on keys. - * - * @return Iterator - */ - keys() { - return createHeadersIterator(this, 'key'); - } + // mostly just for error reporting + parser.trackPosition = parser.opt.position !== false + if (parser.trackPosition) { + parser.position = parser.line = parser.column = 0 + } + emit(parser, 'onready') + } - /** - * Get an iterator on values. - * - * @return Iterator - */ - values() { - return createHeadersIterator(this, 'value'); - } + if (!Object.create) { + Object.create = function (o) { + function F () {} + F.prototype = o + var newf = new F() + return newf + } + } - /** - * Get an iterator on entries. - * - * This is the default iterator of the Headers object. - * - * @return Iterator - */ - [Symbol.iterator]() { - return createHeadersIterator(this, 'key+value'); - } -} -Headers.prototype.entries = Headers.prototype[Symbol.iterator]; + if (!Object.keys) { + Object.keys = function (o) { + var a = [] + for (var i in o) if (o.hasOwnProperty(i)) a.push(i) + return a + } + } -Object.defineProperty(Headers.prototype, Symbol.toStringTag, { - value: 'Headers', - writable: false, - enumerable: false, - configurable: true -}); + function checkBufferLength (parser) { + var maxAllowed = Math.max(sax.MAX_BUFFER_LENGTH, 10) + var maxActual = 0 + for (var i = 0, l = buffers.length; i < l; i++) { + var len = parser[buffers[i]].length + if (len > maxAllowed) { + // Text/cdata nodes can get big, and since they're buffered, + // we can get here under normal conditions. + // Avoid issues by emitting the text node now, + // so at least it won't get any bigger. + switch (buffers[i]) { + case 'textNode': + closeText(parser) + break -Object.defineProperties(Headers.prototype, { - get: { enumerable: true }, - forEach: { enumerable: true }, - set: { enumerable: true }, - append: { enumerable: true }, - has: { enumerable: true }, - delete: { enumerable: true }, - keys: { enumerable: true }, - values: { enumerable: true }, - entries: { enumerable: true } -}); + case 'cdata': + emitNode(parser, 'oncdata', parser.cdata) + parser.cdata = '' + break -function getHeaders(headers) { - let kind = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : 'key+value'; + case 'script': + emitNode(parser, 'onscript', parser.script) + parser.script = '' + break - const keys = Object.keys(headers[MAP]).sort(); - return keys.map(kind === 'key' ? function (k) { - return k.toLowerCase(); - } : kind === 'value' ? function (k) { - return headers[MAP][k].join(', '); - } : function (k) { - return [k.toLowerCase(), headers[MAP][k].join(', ')]; - }); -} + default: + error(parser, 'Max buffer length exceeded: ' + buffers[i]) + } + } + maxActual = Math.max(maxActual, len) + } + // schedule the next check for the earliest possible buffer overrun. + var m = sax.MAX_BUFFER_LENGTH - maxActual + parser.bufferCheckPosition = m + parser.position + } -const INTERNAL = Symbol('internal'); + function clearBuffers (parser) { + for (var i = 0, l = buffers.length; i < l; i++) { + parser[buffers[i]] = '' + } + } -function createHeadersIterator(target, kind) { - const iterator = Object.create(HeadersIteratorPrototype); - iterator[INTERNAL] = { - target, - kind, - index: 0 - }; - return iterator; -} + function flushBuffers (parser) { + closeText(parser) + if (parser.cdata !== '') { + emitNode(parser, 'oncdata', parser.cdata) + parser.cdata = '' + } + if (parser.script !== '') { + emitNode(parser, 'onscript', parser.script) + parser.script = '' + } + } -const HeadersIteratorPrototype = Object.setPrototypeOf({ - next() { - // istanbul ignore if - if (!this || Object.getPrototypeOf(this) !== HeadersIteratorPrototype) { - throw new TypeError('Value of `this` is not a HeadersIterator'); - } + SAXParser.prototype = { + end: function () { end(this) }, + write: write, + resume: function () { this.error = null; return this }, + close: function () { return this.write(null) }, + flush: function () { flushBuffers(this) } + } - var _INTERNAL = this[INTERNAL]; - const target = _INTERNAL.target, - kind = _INTERNAL.kind, - index = _INTERNAL.index; + var Stream + try { + Stream = (__nccwpck_require__(2781).Stream) + } catch (ex) { + Stream = function () {} + } - const values = getHeaders(target, kind); - const len = values.length; - if (index >= len) { - return { - value: undefined, - done: true - }; - } + var streamWraps = sax.EVENTS.filter(function (ev) { + return ev !== 'error' && ev !== 'end' + }) - this[INTERNAL].index = index + 1; + function createStream (strict, opt) { + return new SAXStream(strict, opt) + } - return { - value: values[index], - done: false - }; - } -}, Object.getPrototypeOf(Object.getPrototypeOf([][Symbol.iterator]()))); + function SAXStream (strict, opt) { + if (!(this instanceof SAXStream)) { + return new SAXStream(strict, opt) + } -Object.defineProperty(HeadersIteratorPrototype, Symbol.toStringTag, { - value: 'HeadersIterator', - writable: false, - enumerable: false, - configurable: true -}); + Stream.apply(this) -/** - * Export the Headers object in a form that Node.js can consume. - * - * @param Headers headers - * @return Object - */ -function exportNodeCompatibleHeaders(headers) { - const obj = Object.assign({ __proto__: null }, headers[MAP]); + this._parser = new SAXParser(strict, opt) + this.writable = true + this.readable = true - // http.request() only supports string as Host header. This hack makes - // specifying custom Host header possible. - const hostHeaderKey = find(headers[MAP], 'Host'); - if (hostHeaderKey !== undefined) { - obj[hostHeaderKey] = obj[hostHeaderKey][0]; - } + var me = this - return obj; -} + this._parser.onend = function () { + me.emit('end') + } -/** - * Create a Headers object from an object of headers, ignoring those that do - * not conform to HTTP grammar productions. - * - * @param Object obj Object of headers - * @return Headers - */ -function createHeadersLenient(obj) { - const headers = new Headers(); - for (const name of Object.keys(obj)) { - if (invalidTokenRegex.test(name)) { - continue; - } - if (Array.isArray(obj[name])) { - for (const val of obj[name]) { - if (invalidHeaderCharRegex.test(val)) { - continue; - } - if (headers[MAP][name] === undefined) { - headers[MAP][name] = [val]; - } else { - headers[MAP][name].push(val); - } - } - } else if (!invalidHeaderCharRegex.test(obj[name])) { - headers[MAP][name] = [obj[name]]; - } - } - return headers; -} + this._parser.onerror = function (er) { + me.emit('error', er) -const INTERNALS$1 = Symbol('Response internals'); + // if didn't throw, then means error was handled. + // go ahead and clear error, so we can write again. + me._parser.error = null + } -// fix an issue where "STATUS_CODES" aren't a named export for node <10 -const STATUS_CODES = http.STATUS_CODES; + this._decoder = null -/** - * Response class - * - * @param Stream body Readable stream - * @param Object opts Response options - * @return Void - */ -class Response { - constructor() { - let body = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : null; - let opts = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; + streamWraps.forEach(function (ev) { + Object.defineProperty(me, 'on' + ev, { + get: function () { + return me._parser['on' + ev] + }, + set: function (h) { + if (!h) { + me.removeAllListeners(ev) + me._parser['on' + ev] = h + return h + } + me.on(ev, h) + }, + enumerable: true, + configurable: false + }) + }) + } - Body.call(this, body, opts); + SAXStream.prototype = Object.create(Stream.prototype, { + constructor: { + value: SAXStream + } + }) - const status = opts.status || 200; - const headers = new Headers(opts.headers); + SAXStream.prototype.write = function (data) { + if (typeof Buffer === 'function' && + typeof Buffer.isBuffer === 'function' && + Buffer.isBuffer(data)) { + if (!this._decoder) { + var SD = (__nccwpck_require__(1576).StringDecoder) + this._decoder = new SD('utf8') + } + data = this._decoder.write(data) + } - if (body != null && !headers.has('Content-Type')) { - const contentType = extractContentType(body); - if (contentType) { - headers.append('Content-Type', contentType); - } - } + this._parser.write(data.toString()) + this.emit('data', data) + return true + } - this[INTERNALS$1] = { - url: opts.url, - status, - statusText: opts.statusText || STATUS_CODES[status], - headers, - counter: opts.counter - }; - } + SAXStream.prototype.end = function (chunk) { + if (chunk && chunk.length) { + this.write(chunk) + } + this._parser.end() + return true + } - get url() { - return this[INTERNALS$1].url || ''; - } + SAXStream.prototype.on = function (ev, handler) { + var me = this + if (!me._parser['on' + ev] && streamWraps.indexOf(ev) !== -1) { + me._parser['on' + ev] = function () { + var args = arguments.length === 1 ? [arguments[0]] : Array.apply(null, arguments) + args.splice(0, 0, ev) + me.emit.apply(me, args) + } + } - get status() { - return this[INTERNALS$1].status; - } + return Stream.prototype.on.call(me, ev, handler) + } - /** - * Convenience property representing if the request ended normally - */ - get ok() { - return this[INTERNALS$1].status >= 200 && this[INTERNALS$1].status < 300; - } + // this really needs to be replaced with character classes. + // XML allows all manner of ridiculous numbers and digits. + var CDATA = '[CDATA[' + var DOCTYPE = 'DOCTYPE' + var XML_NAMESPACE = 'http://www.w3.org/XML/1998/namespace' + var XMLNS_NAMESPACE = 'http://www.w3.org/2000/xmlns/' + var rootNS = { xml: XML_NAMESPACE, xmlns: XMLNS_NAMESPACE } - get redirected() { - return this[INTERNALS$1].counter > 0; - } + // http://www.w3.org/TR/REC-xml/#NT-NameStartChar + // This implementation works on strings, a single character at a time + // as such, it cannot ever support astral-plane characters (10000-EFFFF) + // without a significant breaking change to either this parser, or the + // JavaScript language. Implementation of an emoji-capable xml parser + // is left as an exercise for the reader. + var nameStart = /[:_A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD]/ - get statusText() { - return this[INTERNALS$1].statusText; - } + var nameBody = /[:_A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\u00B7\u0300-\u036F\u203F-\u2040.\d-]/ - get headers() { - return this[INTERNALS$1].headers; - } + var entityStart = /[#:_A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD]/ + var entityBody = /[#:_A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\u00B7\u0300-\u036F\u203F-\u2040.\d-]/ - /** - * Clone this response - * - * @return Response - */ - clone() { - return new Response(clone(this), { - url: this.url, - status: this.status, - statusText: this.statusText, - headers: this.headers, - ok: this.ok, - redirected: this.redirected - }); - } -} + function isWhitespace (c) { + return c === ' ' || c === '\n' || c === '\r' || c === '\t' + } -Body.mixIn(Response.prototype); + function isQuote (c) { + return c === '"' || c === '\'' + } -Object.defineProperties(Response.prototype, { - url: { enumerable: true }, - status: { enumerable: true }, - ok: { enumerable: true }, - redirected: { enumerable: true }, - statusText: { enumerable: true }, - headers: { enumerable: true }, - clone: { enumerable: true } -}); + function isAttribEnd (c) { + return c === '>' || isWhitespace(c) + } -Object.defineProperty(Response.prototype, Symbol.toStringTag, { - value: 'Response', - writable: false, - enumerable: false, - configurable: true -}); + function isMatch (regex, c) { + return regex.test(c) + } -const INTERNALS$2 = Symbol('Request internals'); -const URL = Url.URL || whatwgUrl.URL; + function notMatch (regex, c) { + return !isMatch(regex, c) + } -// fix an issue where "format", "parse" aren't a named export for node <10 -const parse_url = Url.parse; -const format_url = Url.format; + var S = 0 + sax.STATE = { + BEGIN: S++, // leading byte order mark or whitespace + BEGIN_WHITESPACE: S++, // leading whitespace + TEXT: S++, // general stuff + TEXT_ENTITY: S++, // & and such. + OPEN_WAKA: S++, // < + SGML_DECL: S++, // + SCRIPT: S++, //