diff --git a/.github/workflows/docs-publish.yml b/.github/workflows/docs-publish.yml deleted file mode 100644 index 00072840554ef..0000000000000 --- a/.github/workflows/docs-publish.yml +++ /dev/null @@ -1,41 +0,0 @@ -name: Documentation Publish -on: - push: - paths: - - '.github/workflows/docs.yml' - - 'docs-gen/**' - - 'docs/**' - branches: - - master -jobs: - docs: - name: Publish - runs-on: ubuntu-20.04 - defaults: - run: - working-directory: ./docs - steps: - - name: Checkout - uses: actions/checkout@v4 - - uses: actions/setup-node@v3 - with: - node-version: '16.x' - - name: Restore cache - uses: actions/cache@v3 - with: - path: | - node_modules - docs/node_modules - key: ${{ runner.os }}-workspace-docs-${{ matrix.node-version }} - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v1 - with: - aws-access-key-id: ${{ secrets.DOCS_DEPLOY_AWS_ACCESS_KEY_ID }} - aws-secret-access-key: ${{ secrets.DOCS_DEPLOY_AWS_SECRET_ACCESS_KEY }} - aws-region: us-east-2 - - name: Generate API documentation - run: ./docs-gen.sh - - name: Build Gatsby site - run: ./deploy-production.sh - env: - PERCY_TOKEN: ${{ secrets.PERCY_TOKEN }} diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml deleted file mode 100644 index 34a9e1077a481..0000000000000 --- a/.github/workflows/docs.yml +++ /dev/null @@ -1,31 +0,0 @@ -name: Documentation Build -on: - pull_request: - paths: - - '.github/workflows/docs.yml' - - 'docs-gen/**' - - 'docs/**' -jobs: - docs: - name: Build - runs-on: ubuntu-20.04 - defaults: - run: - working-directory: ./docs - steps: - - name: Checkout - uses: actions/checkout@v4 - - uses: actions/setup-node@v3 - with: - node-version: '16.x' - - name: Restore cache - uses: actions/cache@v3 - with: - path: | - node_modules - docs/node_modules - key: ${{ runner.os }}-workspace-docs-${{ matrix.node-version }} - - name: Generate API documentation - run: ./docs-gen.sh - - name: Build website - run: ./build.sh diff --git a/docs-gen/.gitignore b/docs-gen/.gitignore deleted file mode 100644 index 53c37a16608c0..0000000000000 --- a/docs-gen/.gitignore +++ /dev/null @@ -1 +0,0 @@ -dist \ No newline at end of file diff --git a/docs-gen/LICENSE b/docs-gen/LICENSE deleted file mode 100644 index f01a4a4b3ff2e..0000000000000 --- a/docs-gen/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -MIT License - -Copyright (c) 2020 Cube Dev, Inc. -Copyright (c) 2016 Thomas Grey - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/docs-gen/README.md b/docs-gen/README.md deleted file mode 100644 index 84f19261a8a36..0000000000000 --- a/docs-gen/README.md +++ /dev/null @@ -1,50 +0,0 @@ -# typedoc-plugin-markdown - -A plugin for [TypeDoc](https://github.com/TypeStrong/typedoc) that enables TypeScript API documentation to be generated in Markdown. - -[![npm](https://img.shields.io/npm/v/typedoc-plugin-markdown.svg)](https://www.npmjs.com/package/typedoc-plugin-markdown) -[![Build Status](https://travis-ci.org/tgreyuk/typedoc-plugin-markdown.svg?branch=master)](https://travis-ci.org/tgreyuk/typedoc-plugin-markdown) - -## What it does? - -The plugin will replace the default HTML theme with a built-in Markdown theme, and expose some additional arguments. - -By default, the Markdown theme will attempt to render standard CommonMark, suitable for the majority of Markdown engines. -It follows the same structure and file patterns as the default HTML theme. - -## Installation - -```bash -npm install --save-dev typedoc typedoc-plugin-markdown -``` - -## Usage - -```bash -$ npx typedoc --plugin typedoc-plugin-markdown [args] -``` - -### Note: - -- The `--plugin` arg is optional - if omitted all installed plugins will run. -- If using with the default HTML theme or other themes, use `--plugin none` to switch the plugin off. -- The plugin needs to be executed from the same location as `typedoc`. Either run as an npm script or make sure to run `npx typedoc`. - -## Arguments - -The following arguments can be used in addition to the default [TypeDoc arguments](https://github.com/TypeStrong/typedoc#arguments). - -- `--theme `
- Specify the theme that should be used. Defaults to `markdown`. Please read [Markdown Themes](https://github.com/tgreyuk/typedoc-plugin-markdown/blob/master/THEMES.md) for further details. -- `--namedAnchors`
- Use HTML named anchors as fragment identifiers for engines that do not automatically assign header ids. -- `--hideSources`
- Do not print source file link rendering. -- `--hideBreadcrumbs`
- Do not print breadcrumbs. -- `--skipSidebar`
- Do not update the `sidebar.json` file when used with `docusaurus` or `docusaurus2` theme. - -## License - -[MIT](https://github.com/tgreyuk/typedoc-plugin-markdown/blob/master/LICENSE) diff --git a/docs-gen/THEMES.md b/docs-gen/THEMES.md deleted file mode 100644 index 52bf6889d4fa4..0000000000000 --- a/docs-gen/THEMES.md +++ /dev/null @@ -1,151 +0,0 @@ -# Markdown Themes - -By default, the Markdown theme will attempt to render standard CommonMark, suitable for the majority of Markdown engines. -It follows the same structure and file patterns as the default HTML theme (see [typedoc-default-themes](https://github.com/TypeStrong/typedoc-default-themes)). - -The plugin also comes packaged with some additional built-in themes and can also be extended with a custom theme. - -- [Built-in themes](#built-in-themes) -- [Writing a custom Markdown theme](#writing-a-custom-markdown-theme) - -## Writing a custom markdown theme - -The Markdown theme packaged with the plugin can also be extended with a custom Markdown theme using the standard TypeDoc theming pattern as per https://typedoc.org/guides/themes/. - -### Create a theme.js class - -As per the theme docs create a `theme.js` file which TypeDoc will then attempt to load from a given location. - -_mytheme/custom-theme.js_ - -```js -const MarkdownTheme = require('typedoc-plugin-markdown/dist/theme'); - -class CustomMarkdownTheme extends MarkdownTheme.default { - constructor(renderer, basePath) { - super(renderer, basePath); - } -} - -exports.default = CustomMarkdownTheme; -``` - -### Theme resources - -By default the theme will inherit the resources of the Markdown theme (https://github.com/tgreyuk/typedoc-plugin-markdown/tree/master/src/resources). - -These can be replaced and updated as required. - -### Building the theme - -#### CLI - -``` -npx typedoc ./src --plugin typedoc-plugin-markdown --theme ./mytheme/custom-theme --out docs -``` - -#### API - -```js -const { Application } = require('typedoc'); -const path = require('path'); - -const app = new Application(); -app.bootstrap({ - module: 'CommonJS', - target: 'ES5', - readme: 'none', - theme: path.join(__dirname, 'mytheme', 'custom-theme'), - plugin: 'typedoc-plugin-markdown', -}); - -app.generateDocs(app.expandInputFiles(['./src']), 'docs'); -``` - -See https://typedoc.org/guides/installation/#node-module - -## Built-in themes - -### `docusaurus` / `docusaurus2` - -The --out path is assumed be a Docusaurus docs directory. - -- Adds Front Matter to pages to support Docusaurus [Markdown Headers](https://docusaurus.io/docs/en/doc-markdown#markdown-headers). -- Appends releavant JSON to website/sidebars.json|sidebars.js, to support [sidebar navigation](https://docusaurus.io/docs/en/navigation). - -#### Output - -``` -root-directory -├── docs -│ ├── myapi -│ | ├── classes -│ │ ├── enums -│ │ ├── interfaces -│ │ ├── index.md -│ │ -└── website - ├── sidebars.json - -``` - -#### Adding links in siteconfig - -Manually add the index page to headerLinks in the [siteConfig.js](https://docusaurus.io/docs/en/site-config) to access the api from header. - -```js -headerLinks: [ - { doc: "myapi/index", label: "My API" }, -], -``` - -### `vuepress` - -- Adds Front Matter to pages. -- The --out path is assumed be a Vuepress docs directory. -- Will create: - - - `.vuepress/api-sidebar.json` to be used with [sidebar](https://vuepress.vuejs.org/default-theme-config/#sidebar). - - `.vuepress/api-sidebar-relative.json` to be used with [multiple sidebars](https://vuepress.vuejs.org/default-theme-config/#multiple-sidebars). - - `.vuepress/config.json` - -#### Examples - -```js -const apiSideBar = require('./api-sidebar.json'); - -// Without groups -module.exports = { - themeConfig: { - sidebar: ['some-content', ...apiSideBar], - }, -}; - -// With groups -module.exports = { - themeConfig: { - sidebar: ['some-content', { title: 'API', children: apiSideBar }], - }, -}; -``` - -```js -const apiSideBarRelative = require('./api-sidebar-relative.json'); - -// Multiple sidebars -module.exports = { - themeConfig: { - sidebar: { - '/guide/': ['some-content'], - '/api/': apiSideBarRelative, - '/': ['other'], - }, - }, -}; -``` - -### `bitbucket` - -_Note: this theme applicable to Bitbucket Cloud. If using Bitbucket Server please use the `--namedAnchors` argument to fix anchor links._ - -- Parses internal anchor links to support Bitbucket's internal anchor linking. diff --git a/docs-gen/jest.config.js b/docs-gen/jest.config.js deleted file mode 100644 index 80a03151d7198..0000000000000 --- a/docs-gen/jest.config.js +++ /dev/null @@ -1,10 +0,0 @@ -module.exports = { - modulePaths: ['/dist'], - transform: { - '^.+\\.tsx?$': 'ts-jest', - }, - verbose: true, - collectCoverage: true, - coverageReporters: ['text-summary'], - collectCoverageFrom: ['/dist/**/*js'], -}; diff --git a/docs-gen/package.json b/docs-gen/package.json deleted file mode 100644 index c1a7fa366be90..0000000000000 --- a/docs-gen/package.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "name": "cubejs-typedoc-plugin", - "version": "0.0.1", - "description": "A plugin for TypeDoc that enables TypeScript API documentation to be generated in Markdown.", - "main": "dist/index.js", - "files": [ - "dist/" - ], - "scripts": { - "lint": "tslint --project ./tsconfig.json", - "generate": "yarn build && yarn gen-docs", - "gen-docs": "node tasks/generate.js", - "dev-gen": "copyfiles --up 1 ./src/**/*.hbs ./dist/ && yarn gen-docs", - "build": "rm -rf dist && tsc --sourceMap false --declaration false && copyfiles --up 1 ./src/**/*.hbs ./dist/", - "watch": "tsc-watch", - "prepublishOnly": "yarn test", - "pretest": "yarn lint && yarn build", - "test": "jest", - "compile": "tsc src/index.ts", - "test:updateSnapshot": "jest --updateSnapshot", - "fixtures": "rm -rf ./test/fixtures && node ./tasks/fixtures.js", - "examples": "yarn build && rm -rf out && node ./tasks/link-plugin.js && yarn examples:html && yarn examples:md", - "examples:html": "typedoc --tsconfig ./test/stubs/tsconfig.json --options ./test/options.json --plugin none --out ./out/html", - "examples:md": "typedoc --tsconfig ./test/stubs/tsconfig.json --options ./test/options.json --out ./out/md" - }, - "author": "Cube Dev, Inc.", - "license": "MIT", - "engines": { - "node": ">= 8.0.0" - }, - "peerDependencies": { - "typedoc": ">=0.17.0" - }, - "devDependencies": { - "@types/fs-extra": "^9.0.1", - "@types/jest": "^25.2.3", - "@types/node": "^14.0.9", - "@types/react": "^16.9.41", - "copyfiles": "^2.3.0", - "jest": "^26.0.1", - "ts-jest": "^26.1.0", - "tsc-watch": "^4.2.9", - "tslint": "^6.1.2", - "typedoc": "0.19.2", - "typescript": "^4" - }, - "dependencies": { - "fs-extra": "^9.0.0", - "inflection": "^1.12.0" - } -} diff --git a/docs-gen/src/__snapshots__/theme.spec.ts.snap b/docs-gen/src/__snapshots__/theme.spec.ts.snap deleted file mode 100644 index 9fcfa7fdc1bf1..0000000000000 --- a/docs-gen/src/__snapshots__/theme.spec.ts.snap +++ /dev/null @@ -1,2585 +0,0 @@ -// Jest Snapshot v1, https://goo.gl/fbAQLP - -exports[`MarkdownTheme getUrls should get navigation 1`] = ` -NavigationItem { - "children": Array [ - NavigationItem { - "children": Array [ - NavigationItem { - "children": Array [ - NavigationItem { - "parent": [Circular], - "title": "CategoryInterfaceA1", - "url": "interfaces/_categories_.categoryinterfacea1.md", - }, - NavigationItem { - "parent": [Circular], - "title": "CategoryInterfaceA2", - "url": "interfaces/_categories_.categoryinterfacea2.md", - }, - NavigationItem { - "parent": [Circular], - "title": "OtherInterfaceA", - "url": "interfaces/_categories_.otherinterfacea.md", - }, - NavigationItem { - "parent": [Circular], - "title": "CategoryInterfaceA1", - "url": "interfaces/_categories_.categoryinterfacea1.md", - }, - NavigationItem { - "parent": [Circular], - "title": "CategoryInterfaceA2", - "url": "interfaces/_categories_.categoryinterfacea2.md", - }, - NavigationItem { - "parent": [Circular], - "title": "OtherInterfaceA", - "url": "interfaces/_categories_.otherinterfacea.md", - }, - ], - "parent": undefined, - "title": "categories", - "url": "modules/_categories_.md", - }, - NavigationItem { - "parent": NavigationItem { - "children": Array [ - [Circular], - NavigationItem { - "parent": [Circular], - "title": "CategoryInterfaceA2", - "url": "interfaces/_categories_.categoryinterfacea2.md", - }, - NavigationItem { - "parent": [Circular], - "title": "OtherInterfaceA", - "url": "interfaces/_categories_.otherinterfacea.md", - }, - NavigationItem { - "parent": [Circular], - "title": "CategoryInterfaceA1", - "url": "interfaces/_categories_.categoryinterfacea1.md", - }, - NavigationItem { - "parent": [Circular], - "title": "CategoryInterfaceA2", - "url": "interfaces/_categories_.categoryinterfacea2.md", - }, - NavigationItem { - "parent": [Circular], - "title": "OtherInterfaceA", - "url": "interfaces/_categories_.otherinterfacea.md", - }, - ], - "parent": undefined, - "title": "categories", - "url": "modules/_categories_.md", - }, - "title": "CategoryInterfaceA1", - "url": "interfaces/_categories_.categoryinterfacea1.md", - }, - NavigationItem { - "parent": NavigationItem { - "children": Array [ - NavigationItem { - "parent": [Circular], - "title": "CategoryInterfaceA1", - "url": "interfaces/_categories_.categoryinterfacea1.md", - }, - [Circular], - NavigationItem { - "parent": [Circular], - "title": "OtherInterfaceA", - "url": "interfaces/_categories_.otherinterfacea.md", - }, - NavigationItem { - "parent": [Circular], - "title": "CategoryInterfaceA1", - "url": "interfaces/_categories_.categoryinterfacea1.md", - }, - NavigationItem { - "parent": [Circular], - "title": "CategoryInterfaceA2", - "url": "interfaces/_categories_.categoryinterfacea2.md", - }, - NavigationItem { - "parent": [Circular], - "title": "OtherInterfaceA", - "url": "interfaces/_categories_.otherinterfacea.md", - }, - ], - "parent": undefined, - "title": "categories", - "url": "modules/_categories_.md", - }, - "title": "CategoryInterfaceA2", - "url": "interfaces/_categories_.categoryinterfacea2.md", - }, - NavigationItem { - "parent": NavigationItem { - "children": Array [ - NavigationItem { - "parent": [Circular], - "title": "CategoryInterfaceA1", - "url": "interfaces/_categories_.categoryinterfacea1.md", - }, - NavigationItem { - "parent": [Circular], - "title": "CategoryInterfaceA2", - "url": "interfaces/_categories_.categoryinterfacea2.md", - }, - [Circular], - NavigationItem { - "parent": [Circular], - "title": "CategoryInterfaceA1", - "url": "interfaces/_categories_.categoryinterfacea1.md", - }, - NavigationItem { - "parent": [Circular], - "title": "CategoryInterfaceA2", - "url": "interfaces/_categories_.categoryinterfacea2.md", - }, - NavigationItem { - "parent": [Circular], - "title": "OtherInterfaceA", - "url": "interfaces/_categories_.otherinterfacea.md", - }, - ], - "parent": undefined, - "title": "categories", - "url": "modules/_categories_.md", - }, - "title": "OtherInterfaceA", - "url": "interfaces/_categories_.otherinterfacea.md", - }, - NavigationItem { - "children": Array [ - NavigationItem { - "parent": [Circular], - "title": "BaseClass", - "url": "classes/_classes_.baseclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "GenericClass", - "url": "classes/_classes_.genericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "InternalClass", - "url": "classes/_classes_.internalclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "NonGenericClass", - "url": "classes/_classes_.nongenericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassA", - "url": "classes/_classes_.subclassa.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassB", - "url": "classes/_classes_.subclassb.md", - }, - NavigationItem { - "parent": [Circular], - "title": "INameInterface", - "url": "interfaces/_classes_.inameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintInterface", - "url": "interfaces/_classes_.iprintinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintNameInterface", - "url": "interfaces/_classes_.iprintnameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "BaseClass", - "url": "classes/_classes_.baseclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "GenericClass", - "url": "classes/_classes_.genericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "InternalClass", - "url": "classes/_classes_.internalclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "NonGenericClass", - "url": "classes/_classes_.nongenericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassA", - "url": "classes/_classes_.subclassa.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassB", - "url": "classes/_classes_.subclassb.md", - }, - NavigationItem { - "parent": [Circular], - "title": "INameInterface", - "url": "interfaces/_classes_.inameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintInterface", - "url": "interfaces/_classes_.iprintinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintNameInterface", - "url": "interfaces/_classes_.iprintnameinterface.md", - }, - ], - "parent": undefined, - "title": "classes", - "url": "modules/_classes_.md", - }, - NavigationItem { - "parent": NavigationItem { - "children": Array [ - [Circular], - NavigationItem { - "parent": [Circular], - "title": "GenericClass", - "url": "classes/_classes_.genericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "InternalClass", - "url": "classes/_classes_.internalclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "NonGenericClass", - "url": "classes/_classes_.nongenericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassA", - "url": "classes/_classes_.subclassa.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassB", - "url": "classes/_classes_.subclassb.md", - }, - NavigationItem { - "parent": [Circular], - "title": "INameInterface", - "url": "interfaces/_classes_.inameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintInterface", - "url": "interfaces/_classes_.iprintinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintNameInterface", - "url": "interfaces/_classes_.iprintnameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "BaseClass", - "url": "classes/_classes_.baseclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "GenericClass", - "url": "classes/_classes_.genericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "InternalClass", - "url": "classes/_classes_.internalclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "NonGenericClass", - "url": "classes/_classes_.nongenericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassA", - "url": "classes/_classes_.subclassa.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassB", - "url": "classes/_classes_.subclassb.md", - }, - NavigationItem { - "parent": [Circular], - "title": "INameInterface", - "url": "interfaces/_classes_.inameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintInterface", - "url": "interfaces/_classes_.iprintinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintNameInterface", - "url": "interfaces/_classes_.iprintnameinterface.md", - }, - ], - "parent": undefined, - "title": "classes", - "url": "modules/_classes_.md", - }, - "title": "BaseClass", - "url": "classes/_classes_.baseclass.md", - }, - NavigationItem { - "parent": NavigationItem { - "children": Array [ - NavigationItem { - "parent": [Circular], - "title": "BaseClass", - "url": "classes/_classes_.baseclass.md", - }, - [Circular], - NavigationItem { - "parent": [Circular], - "title": "InternalClass", - "url": "classes/_classes_.internalclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "NonGenericClass", - "url": "classes/_classes_.nongenericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassA", - "url": "classes/_classes_.subclassa.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassB", - "url": "classes/_classes_.subclassb.md", - }, - NavigationItem { - "parent": [Circular], - "title": "INameInterface", - "url": "interfaces/_classes_.inameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintInterface", - "url": "interfaces/_classes_.iprintinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintNameInterface", - "url": "interfaces/_classes_.iprintnameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "BaseClass", - "url": "classes/_classes_.baseclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "GenericClass", - "url": "classes/_classes_.genericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "InternalClass", - "url": "classes/_classes_.internalclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "NonGenericClass", - "url": "classes/_classes_.nongenericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassA", - "url": "classes/_classes_.subclassa.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassB", - "url": "classes/_classes_.subclassb.md", - }, - NavigationItem { - "parent": [Circular], - "title": "INameInterface", - "url": "interfaces/_classes_.inameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintInterface", - "url": "interfaces/_classes_.iprintinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintNameInterface", - "url": "interfaces/_classes_.iprintnameinterface.md", - }, - ], - "parent": undefined, - "title": "classes", - "url": "modules/_classes_.md", - }, - "title": "GenericClass", - "url": "classes/_classes_.genericclass.md", - }, - NavigationItem { - "parent": NavigationItem { - "children": Array [ - NavigationItem { - "parent": [Circular], - "title": "BaseClass", - "url": "classes/_classes_.baseclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "GenericClass", - "url": "classes/_classes_.genericclass.md", - }, - [Circular], - NavigationItem { - "parent": [Circular], - "title": "NonGenericClass", - "url": "classes/_classes_.nongenericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassA", - "url": "classes/_classes_.subclassa.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassB", - "url": "classes/_classes_.subclassb.md", - }, - NavigationItem { - "parent": [Circular], - "title": "INameInterface", - "url": "interfaces/_classes_.inameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintInterface", - "url": "interfaces/_classes_.iprintinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintNameInterface", - "url": "interfaces/_classes_.iprintnameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "BaseClass", - "url": "classes/_classes_.baseclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "GenericClass", - "url": "classes/_classes_.genericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "InternalClass", - "url": "classes/_classes_.internalclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "NonGenericClass", - "url": "classes/_classes_.nongenericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassA", - "url": "classes/_classes_.subclassa.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassB", - "url": "classes/_classes_.subclassb.md", - }, - NavigationItem { - "parent": [Circular], - "title": "INameInterface", - "url": "interfaces/_classes_.inameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintInterface", - "url": "interfaces/_classes_.iprintinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintNameInterface", - "url": "interfaces/_classes_.iprintnameinterface.md", - }, - ], - "parent": undefined, - "title": "classes", - "url": "modules/_classes_.md", - }, - "title": "InternalClass", - "url": "classes/_classes_.internalclass.md", - }, - NavigationItem { - "parent": NavigationItem { - "children": Array [ - NavigationItem { - "parent": [Circular], - "title": "BaseClass", - "url": "classes/_classes_.baseclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "GenericClass", - "url": "classes/_classes_.genericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "InternalClass", - "url": "classes/_classes_.internalclass.md", - }, - [Circular], - NavigationItem { - "parent": [Circular], - "title": "SubClassA", - "url": "classes/_classes_.subclassa.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassB", - "url": "classes/_classes_.subclassb.md", - }, - NavigationItem { - "parent": [Circular], - "title": "INameInterface", - "url": "interfaces/_classes_.inameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintInterface", - "url": "interfaces/_classes_.iprintinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintNameInterface", - "url": "interfaces/_classes_.iprintnameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "BaseClass", - "url": "classes/_classes_.baseclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "GenericClass", - "url": "classes/_classes_.genericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "InternalClass", - "url": "classes/_classes_.internalclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "NonGenericClass", - "url": "classes/_classes_.nongenericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassA", - "url": "classes/_classes_.subclassa.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassB", - "url": "classes/_classes_.subclassb.md", - }, - NavigationItem { - "parent": [Circular], - "title": "INameInterface", - "url": "interfaces/_classes_.inameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintInterface", - "url": "interfaces/_classes_.iprintinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintNameInterface", - "url": "interfaces/_classes_.iprintnameinterface.md", - }, - ], - "parent": undefined, - "title": "classes", - "url": "modules/_classes_.md", - }, - "title": "NonGenericClass", - "url": "classes/_classes_.nongenericclass.md", - }, - NavigationItem { - "parent": NavigationItem { - "children": Array [ - NavigationItem { - "parent": [Circular], - "title": "BaseClass", - "url": "classes/_classes_.baseclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "GenericClass", - "url": "classes/_classes_.genericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "InternalClass", - "url": "classes/_classes_.internalclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "NonGenericClass", - "url": "classes/_classes_.nongenericclass.md", - }, - [Circular], - NavigationItem { - "parent": [Circular], - "title": "SubClassB", - "url": "classes/_classes_.subclassb.md", - }, - NavigationItem { - "parent": [Circular], - "title": "INameInterface", - "url": "interfaces/_classes_.inameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintInterface", - "url": "interfaces/_classes_.iprintinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintNameInterface", - "url": "interfaces/_classes_.iprintnameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "BaseClass", - "url": "classes/_classes_.baseclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "GenericClass", - "url": "classes/_classes_.genericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "InternalClass", - "url": "classes/_classes_.internalclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "NonGenericClass", - "url": "classes/_classes_.nongenericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassA", - "url": "classes/_classes_.subclassa.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassB", - "url": "classes/_classes_.subclassb.md", - }, - NavigationItem { - "parent": [Circular], - "title": "INameInterface", - "url": "interfaces/_classes_.inameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintInterface", - "url": "interfaces/_classes_.iprintinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintNameInterface", - "url": "interfaces/_classes_.iprintnameinterface.md", - }, - ], - "parent": undefined, - "title": "classes", - "url": "modules/_classes_.md", - }, - "title": "SubClassA", - "url": "classes/_classes_.subclassa.md", - }, - NavigationItem { - "parent": NavigationItem { - "children": Array [ - NavigationItem { - "parent": [Circular], - "title": "BaseClass", - "url": "classes/_classes_.baseclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "GenericClass", - "url": "classes/_classes_.genericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "InternalClass", - "url": "classes/_classes_.internalclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "NonGenericClass", - "url": "classes/_classes_.nongenericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassA", - "url": "classes/_classes_.subclassa.md", - }, - [Circular], - NavigationItem { - "parent": [Circular], - "title": "INameInterface", - "url": "interfaces/_classes_.inameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintInterface", - "url": "interfaces/_classes_.iprintinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintNameInterface", - "url": "interfaces/_classes_.iprintnameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "BaseClass", - "url": "classes/_classes_.baseclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "GenericClass", - "url": "classes/_classes_.genericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "InternalClass", - "url": "classes/_classes_.internalclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "NonGenericClass", - "url": "classes/_classes_.nongenericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassA", - "url": "classes/_classes_.subclassa.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassB", - "url": "classes/_classes_.subclassb.md", - }, - NavigationItem { - "parent": [Circular], - "title": "INameInterface", - "url": "interfaces/_classes_.inameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintInterface", - "url": "interfaces/_classes_.iprintinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintNameInterface", - "url": "interfaces/_classes_.iprintnameinterface.md", - }, - ], - "parent": undefined, - "title": "classes", - "url": "modules/_classes_.md", - }, - "title": "SubClassB", - "url": "classes/_classes_.subclassb.md", - }, - NavigationItem { - "parent": NavigationItem { - "children": Array [ - NavigationItem { - "parent": [Circular], - "title": "BaseClass", - "url": "classes/_classes_.baseclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "GenericClass", - "url": "classes/_classes_.genericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "InternalClass", - "url": "classes/_classes_.internalclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "NonGenericClass", - "url": "classes/_classes_.nongenericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassA", - "url": "classes/_classes_.subclassa.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassB", - "url": "classes/_classes_.subclassb.md", - }, - [Circular], - NavigationItem { - "parent": [Circular], - "title": "IPrintInterface", - "url": "interfaces/_classes_.iprintinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintNameInterface", - "url": "interfaces/_classes_.iprintnameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "BaseClass", - "url": "classes/_classes_.baseclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "GenericClass", - "url": "classes/_classes_.genericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "InternalClass", - "url": "classes/_classes_.internalclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "NonGenericClass", - "url": "classes/_classes_.nongenericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassA", - "url": "classes/_classes_.subclassa.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassB", - "url": "classes/_classes_.subclassb.md", - }, - NavigationItem { - "parent": [Circular], - "title": "INameInterface", - "url": "interfaces/_classes_.inameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintInterface", - "url": "interfaces/_classes_.iprintinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintNameInterface", - "url": "interfaces/_classes_.iprintnameinterface.md", - }, - ], - "parent": undefined, - "title": "classes", - "url": "modules/_classes_.md", - }, - "title": "INameInterface", - "url": "interfaces/_classes_.inameinterface.md", - }, - NavigationItem { - "parent": NavigationItem { - "children": Array [ - NavigationItem { - "parent": [Circular], - "title": "BaseClass", - "url": "classes/_classes_.baseclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "GenericClass", - "url": "classes/_classes_.genericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "InternalClass", - "url": "classes/_classes_.internalclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "NonGenericClass", - "url": "classes/_classes_.nongenericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassA", - "url": "classes/_classes_.subclassa.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassB", - "url": "classes/_classes_.subclassb.md", - }, - NavigationItem { - "parent": [Circular], - "title": "INameInterface", - "url": "interfaces/_classes_.inameinterface.md", - }, - [Circular], - NavigationItem { - "parent": [Circular], - "title": "IPrintNameInterface", - "url": "interfaces/_classes_.iprintnameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "BaseClass", - "url": "classes/_classes_.baseclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "GenericClass", - "url": "classes/_classes_.genericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "InternalClass", - "url": "classes/_classes_.internalclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "NonGenericClass", - "url": "classes/_classes_.nongenericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassA", - "url": "classes/_classes_.subclassa.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassB", - "url": "classes/_classes_.subclassb.md", - }, - NavigationItem { - "parent": [Circular], - "title": "INameInterface", - "url": "interfaces/_classes_.inameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintInterface", - "url": "interfaces/_classes_.iprintinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintNameInterface", - "url": "interfaces/_classes_.iprintnameinterface.md", - }, - ], - "parent": undefined, - "title": "classes", - "url": "modules/_classes_.md", - }, - "title": "IPrintInterface", - "url": "interfaces/_classes_.iprintinterface.md", - }, - NavigationItem { - "parent": NavigationItem { - "children": Array [ - NavigationItem { - "parent": [Circular], - "title": "BaseClass", - "url": "classes/_classes_.baseclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "GenericClass", - "url": "classes/_classes_.genericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "InternalClass", - "url": "classes/_classes_.internalclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "NonGenericClass", - "url": "classes/_classes_.nongenericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassA", - "url": "classes/_classes_.subclassa.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassB", - "url": "classes/_classes_.subclassb.md", - }, - NavigationItem { - "parent": [Circular], - "title": "INameInterface", - "url": "interfaces/_classes_.inameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintInterface", - "url": "interfaces/_classes_.iprintinterface.md", - }, - [Circular], - NavigationItem { - "parent": [Circular], - "title": "BaseClass", - "url": "classes/_classes_.baseclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "GenericClass", - "url": "classes/_classes_.genericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "InternalClass", - "url": "classes/_classes_.internalclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "NonGenericClass", - "url": "classes/_classes_.nongenericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassA", - "url": "classes/_classes_.subclassa.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassB", - "url": "classes/_classes_.subclassb.md", - }, - NavigationItem { - "parent": [Circular], - "title": "INameInterface", - "url": "interfaces/_classes_.inameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintInterface", - "url": "interfaces/_classes_.iprintinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintNameInterface", - "url": "interfaces/_classes_.iprintnameinterface.md", - }, - ], - "parent": undefined, - "title": "classes", - "url": "modules/_classes_.md", - }, - "title": "IPrintNameInterface", - "url": "interfaces/_classes_.iprintnameinterface.md", - }, - NavigationItem { - "parent": undefined, - "title": "comments", - "url": "modules/_comments_.md", - }, - NavigationItem { - "children": Array [ - NavigationItem { - "parent": [Circular], - "title": "I", - "url": "interfaces/_destrucuting_.i.md", - }, - NavigationItem { - "parent": [Circular], - "title": "I", - "url": "interfaces/_destrucuting_.i.md", - }, - ], - "parent": undefined, - "title": "destrucuting", - "url": "modules/_destrucuting_.md", - }, - NavigationItem { - "parent": NavigationItem { - "children": Array [ - [Circular], - NavigationItem { - "parent": [Circular], - "title": "I", - "url": "interfaces/_destrucuting_.i.md", - }, - ], - "parent": undefined, - "title": "destrucuting", - "url": "modules/_destrucuting_.md", - }, - "title": "I", - "url": "interfaces/_destrucuting_.i.md", - }, - NavigationItem { - "children": Array [ - NavigationItem { - "parent": [Circular], - "title": "moduleFunction", - "url": "modules/_functions_.modulefunction.md", - }, - NavigationItem { - "parent": [Circular], - "title": "moduleFunction", - "url": "modules/_functions_.modulefunction.md", - }, - ], - "parent": undefined, - "title": "functions", - "url": "modules/_functions_.md", - }, - NavigationItem { - "parent": NavigationItem { - "children": Array [ - [Circular], - NavigationItem { - "parent": [Circular], - "title": "moduleFunction", - "url": "modules/_functions_.modulefunction.md", - }, - ], - "parent": undefined, - "title": "functions", - "url": "modules/_functions_.md", - }, - "title": "moduleFunction", - "url": "modules/_functions_.modulefunction.md", - }, - NavigationItem { - "parent": undefined, - "title": "literals", - "url": "modules/_literals_.md", - }, - NavigationItem { - "parent": undefined, - "title": "variables", - "url": "modules/_variables_.md", - }, - ], - "isLabel": true, - "parent": undefined, - "title": "Modules", - "url": "", - }, - NavigationItem { - "children": Array [ - NavigationItem { - "parent": NavigationItem { - "children": Array [ - NavigationItem { - "parent": [Circular], - "title": "BaseClass", - "url": "classes/_classes_.baseclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "GenericClass", - "url": "classes/_classes_.genericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "InternalClass", - "url": "classes/_classes_.internalclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "NonGenericClass", - "url": "classes/_classes_.nongenericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassA", - "url": "classes/_classes_.subclassa.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassB", - "url": "classes/_classes_.subclassb.md", - }, - NavigationItem { - "parent": [Circular], - "title": "INameInterface", - "url": "interfaces/_classes_.inameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintInterface", - "url": "interfaces/_classes_.iprintinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintNameInterface", - "url": "interfaces/_classes_.iprintnameinterface.md", - }, - [Circular], - NavigationItem { - "parent": [Circular], - "title": "GenericClass", - "url": "classes/_classes_.genericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "InternalClass", - "url": "classes/_classes_.internalclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "NonGenericClass", - "url": "classes/_classes_.nongenericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassA", - "url": "classes/_classes_.subclassa.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassB", - "url": "classes/_classes_.subclassb.md", - }, - NavigationItem { - "parent": [Circular], - "title": "INameInterface", - "url": "interfaces/_classes_.inameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintInterface", - "url": "interfaces/_classes_.iprintinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintNameInterface", - "url": "interfaces/_classes_.iprintnameinterface.md", - }, - ], - "parent": undefined, - "title": "classes", - "url": "modules/_classes_.md", - }, - "title": "BaseClass", - "url": "classes/_classes_.baseclass.md", - }, - NavigationItem { - "parent": NavigationItem { - "children": Array [ - NavigationItem { - "parent": [Circular], - "title": "BaseClass", - "url": "classes/_classes_.baseclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "GenericClass", - "url": "classes/_classes_.genericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "InternalClass", - "url": "classes/_classes_.internalclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "NonGenericClass", - "url": "classes/_classes_.nongenericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassA", - "url": "classes/_classes_.subclassa.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassB", - "url": "classes/_classes_.subclassb.md", - }, - NavigationItem { - "parent": [Circular], - "title": "INameInterface", - "url": "interfaces/_classes_.inameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintInterface", - "url": "interfaces/_classes_.iprintinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintNameInterface", - "url": "interfaces/_classes_.iprintnameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "BaseClass", - "url": "classes/_classes_.baseclass.md", - }, - [Circular], - NavigationItem { - "parent": [Circular], - "title": "InternalClass", - "url": "classes/_classes_.internalclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "NonGenericClass", - "url": "classes/_classes_.nongenericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassA", - "url": "classes/_classes_.subclassa.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassB", - "url": "classes/_classes_.subclassb.md", - }, - NavigationItem { - "parent": [Circular], - "title": "INameInterface", - "url": "interfaces/_classes_.inameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintInterface", - "url": "interfaces/_classes_.iprintinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintNameInterface", - "url": "interfaces/_classes_.iprintnameinterface.md", - }, - ], - "parent": undefined, - "title": "classes", - "url": "modules/_classes_.md", - }, - "title": "GenericClass", - "url": "classes/_classes_.genericclass.md", - }, - NavigationItem { - "parent": NavigationItem { - "children": Array [ - NavigationItem { - "parent": [Circular], - "title": "BaseClass", - "url": "classes/_classes_.baseclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "GenericClass", - "url": "classes/_classes_.genericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "InternalClass", - "url": "classes/_classes_.internalclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "NonGenericClass", - "url": "classes/_classes_.nongenericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassA", - "url": "classes/_classes_.subclassa.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassB", - "url": "classes/_classes_.subclassb.md", - }, - NavigationItem { - "parent": [Circular], - "title": "INameInterface", - "url": "interfaces/_classes_.inameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintInterface", - "url": "interfaces/_classes_.iprintinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintNameInterface", - "url": "interfaces/_classes_.iprintnameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "BaseClass", - "url": "classes/_classes_.baseclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "GenericClass", - "url": "classes/_classes_.genericclass.md", - }, - [Circular], - NavigationItem { - "parent": [Circular], - "title": "NonGenericClass", - "url": "classes/_classes_.nongenericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassA", - "url": "classes/_classes_.subclassa.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassB", - "url": "classes/_classes_.subclassb.md", - }, - NavigationItem { - "parent": [Circular], - "title": "INameInterface", - "url": "interfaces/_classes_.inameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintInterface", - "url": "interfaces/_classes_.iprintinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintNameInterface", - "url": "interfaces/_classes_.iprintnameinterface.md", - }, - ], - "parent": undefined, - "title": "classes", - "url": "modules/_classes_.md", - }, - "title": "InternalClass", - "url": "classes/_classes_.internalclass.md", - }, - NavigationItem { - "parent": NavigationItem { - "children": Array [ - NavigationItem { - "parent": [Circular], - "title": "BaseClass", - "url": "classes/_classes_.baseclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "GenericClass", - "url": "classes/_classes_.genericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "InternalClass", - "url": "classes/_classes_.internalclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "NonGenericClass", - "url": "classes/_classes_.nongenericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassA", - "url": "classes/_classes_.subclassa.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassB", - "url": "classes/_classes_.subclassb.md", - }, - NavigationItem { - "parent": [Circular], - "title": "INameInterface", - "url": "interfaces/_classes_.inameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintInterface", - "url": "interfaces/_classes_.iprintinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintNameInterface", - "url": "interfaces/_classes_.iprintnameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "BaseClass", - "url": "classes/_classes_.baseclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "GenericClass", - "url": "classes/_classes_.genericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "InternalClass", - "url": "classes/_classes_.internalclass.md", - }, - [Circular], - NavigationItem { - "parent": [Circular], - "title": "SubClassA", - "url": "classes/_classes_.subclassa.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassB", - "url": "classes/_classes_.subclassb.md", - }, - NavigationItem { - "parent": [Circular], - "title": "INameInterface", - "url": "interfaces/_classes_.inameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintInterface", - "url": "interfaces/_classes_.iprintinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintNameInterface", - "url": "interfaces/_classes_.iprintnameinterface.md", - }, - ], - "parent": undefined, - "title": "classes", - "url": "modules/_classes_.md", - }, - "title": "NonGenericClass", - "url": "classes/_classes_.nongenericclass.md", - }, - NavigationItem { - "parent": NavigationItem { - "children": Array [ - NavigationItem { - "parent": [Circular], - "title": "BaseClass", - "url": "classes/_classes_.baseclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "GenericClass", - "url": "classes/_classes_.genericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "InternalClass", - "url": "classes/_classes_.internalclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "NonGenericClass", - "url": "classes/_classes_.nongenericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassA", - "url": "classes/_classes_.subclassa.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassB", - "url": "classes/_classes_.subclassb.md", - }, - NavigationItem { - "parent": [Circular], - "title": "INameInterface", - "url": "interfaces/_classes_.inameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintInterface", - "url": "interfaces/_classes_.iprintinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintNameInterface", - "url": "interfaces/_classes_.iprintnameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "BaseClass", - "url": "classes/_classes_.baseclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "GenericClass", - "url": "classes/_classes_.genericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "InternalClass", - "url": "classes/_classes_.internalclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "NonGenericClass", - "url": "classes/_classes_.nongenericclass.md", - }, - [Circular], - NavigationItem { - "parent": [Circular], - "title": "SubClassB", - "url": "classes/_classes_.subclassb.md", - }, - NavigationItem { - "parent": [Circular], - "title": "INameInterface", - "url": "interfaces/_classes_.inameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintInterface", - "url": "interfaces/_classes_.iprintinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintNameInterface", - "url": "interfaces/_classes_.iprintnameinterface.md", - }, - ], - "parent": undefined, - "title": "classes", - "url": "modules/_classes_.md", - }, - "title": "SubClassA", - "url": "classes/_classes_.subclassa.md", - }, - NavigationItem { - "parent": NavigationItem { - "children": Array [ - NavigationItem { - "parent": [Circular], - "title": "BaseClass", - "url": "classes/_classes_.baseclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "GenericClass", - "url": "classes/_classes_.genericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "InternalClass", - "url": "classes/_classes_.internalclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "NonGenericClass", - "url": "classes/_classes_.nongenericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassA", - "url": "classes/_classes_.subclassa.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassB", - "url": "classes/_classes_.subclassb.md", - }, - NavigationItem { - "parent": [Circular], - "title": "INameInterface", - "url": "interfaces/_classes_.inameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintInterface", - "url": "interfaces/_classes_.iprintinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintNameInterface", - "url": "interfaces/_classes_.iprintnameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "BaseClass", - "url": "classes/_classes_.baseclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "GenericClass", - "url": "classes/_classes_.genericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "InternalClass", - "url": "classes/_classes_.internalclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "NonGenericClass", - "url": "classes/_classes_.nongenericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassA", - "url": "classes/_classes_.subclassa.md", - }, - [Circular], - NavigationItem { - "parent": [Circular], - "title": "INameInterface", - "url": "interfaces/_classes_.inameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintInterface", - "url": "interfaces/_classes_.iprintinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintNameInterface", - "url": "interfaces/_classes_.iprintnameinterface.md", - }, - ], - "parent": undefined, - "title": "classes", - "url": "modules/_classes_.md", - }, - "title": "SubClassB", - "url": "classes/_classes_.subclassb.md", - }, - ], - "isLabel": true, - "parent": undefined, - "title": "Classes", - "url": "", - }, - NavigationItem { - "children": Array [ - NavigationItem { - "parent": NavigationItem { - "children": Array [ - NavigationItem { - "parent": [Circular], - "title": "CategoryInterfaceA1", - "url": "interfaces/_categories_.categoryinterfacea1.md", - }, - NavigationItem { - "parent": [Circular], - "title": "CategoryInterfaceA2", - "url": "interfaces/_categories_.categoryinterfacea2.md", - }, - NavigationItem { - "parent": [Circular], - "title": "OtherInterfaceA", - "url": "interfaces/_categories_.otherinterfacea.md", - }, - [Circular], - NavigationItem { - "parent": [Circular], - "title": "CategoryInterfaceA2", - "url": "interfaces/_categories_.categoryinterfacea2.md", - }, - NavigationItem { - "parent": [Circular], - "title": "OtherInterfaceA", - "url": "interfaces/_categories_.otherinterfacea.md", - }, - ], - "parent": undefined, - "title": "categories", - "url": "modules/_categories_.md", - }, - "title": "CategoryInterfaceA1", - "url": "interfaces/_categories_.categoryinterfacea1.md", - }, - NavigationItem { - "parent": NavigationItem { - "children": Array [ - NavigationItem { - "parent": [Circular], - "title": "CategoryInterfaceA1", - "url": "interfaces/_categories_.categoryinterfacea1.md", - }, - NavigationItem { - "parent": [Circular], - "title": "CategoryInterfaceA2", - "url": "interfaces/_categories_.categoryinterfacea2.md", - }, - NavigationItem { - "parent": [Circular], - "title": "OtherInterfaceA", - "url": "interfaces/_categories_.otherinterfacea.md", - }, - NavigationItem { - "parent": [Circular], - "title": "CategoryInterfaceA1", - "url": "interfaces/_categories_.categoryinterfacea1.md", - }, - [Circular], - NavigationItem { - "parent": [Circular], - "title": "OtherInterfaceA", - "url": "interfaces/_categories_.otherinterfacea.md", - }, - ], - "parent": undefined, - "title": "categories", - "url": "modules/_categories_.md", - }, - "title": "CategoryInterfaceA2", - "url": "interfaces/_categories_.categoryinterfacea2.md", - }, - NavigationItem { - "parent": NavigationItem { - "children": Array [ - NavigationItem { - "parent": [Circular], - "title": "CategoryInterfaceA1", - "url": "interfaces/_categories_.categoryinterfacea1.md", - }, - NavigationItem { - "parent": [Circular], - "title": "CategoryInterfaceA2", - "url": "interfaces/_categories_.categoryinterfacea2.md", - }, - NavigationItem { - "parent": [Circular], - "title": "OtherInterfaceA", - "url": "interfaces/_categories_.otherinterfacea.md", - }, - NavigationItem { - "parent": [Circular], - "title": "CategoryInterfaceA1", - "url": "interfaces/_categories_.categoryinterfacea1.md", - }, - NavigationItem { - "parent": [Circular], - "title": "CategoryInterfaceA2", - "url": "interfaces/_categories_.categoryinterfacea2.md", - }, - [Circular], - ], - "parent": undefined, - "title": "categories", - "url": "modules/_categories_.md", - }, - "title": "OtherInterfaceA", - "url": "interfaces/_categories_.otherinterfacea.md", - }, - NavigationItem { - "parent": NavigationItem { - "children": Array [ - NavigationItem { - "parent": [Circular], - "title": "BaseClass", - "url": "classes/_classes_.baseclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "GenericClass", - "url": "classes/_classes_.genericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "InternalClass", - "url": "classes/_classes_.internalclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "NonGenericClass", - "url": "classes/_classes_.nongenericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassA", - "url": "classes/_classes_.subclassa.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassB", - "url": "classes/_classes_.subclassb.md", - }, - NavigationItem { - "parent": [Circular], - "title": "INameInterface", - "url": "interfaces/_classes_.inameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintInterface", - "url": "interfaces/_classes_.iprintinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintNameInterface", - "url": "interfaces/_classes_.iprintnameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "BaseClass", - "url": "classes/_classes_.baseclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "GenericClass", - "url": "classes/_classes_.genericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "InternalClass", - "url": "classes/_classes_.internalclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "NonGenericClass", - "url": "classes/_classes_.nongenericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassA", - "url": "classes/_classes_.subclassa.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassB", - "url": "classes/_classes_.subclassb.md", - }, - [Circular], - NavigationItem { - "parent": [Circular], - "title": "IPrintInterface", - "url": "interfaces/_classes_.iprintinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintNameInterface", - "url": "interfaces/_classes_.iprintnameinterface.md", - }, - ], - "parent": undefined, - "title": "classes", - "url": "modules/_classes_.md", - }, - "title": "INameInterface", - "url": "interfaces/_classes_.inameinterface.md", - }, - NavigationItem { - "parent": NavigationItem { - "children": Array [ - NavigationItem { - "parent": [Circular], - "title": "BaseClass", - "url": "classes/_classes_.baseclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "GenericClass", - "url": "classes/_classes_.genericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "InternalClass", - "url": "classes/_classes_.internalclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "NonGenericClass", - "url": "classes/_classes_.nongenericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassA", - "url": "classes/_classes_.subclassa.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassB", - "url": "classes/_classes_.subclassb.md", - }, - NavigationItem { - "parent": [Circular], - "title": "INameInterface", - "url": "interfaces/_classes_.inameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintInterface", - "url": "interfaces/_classes_.iprintinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintNameInterface", - "url": "interfaces/_classes_.iprintnameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "BaseClass", - "url": "classes/_classes_.baseclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "GenericClass", - "url": "classes/_classes_.genericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "InternalClass", - "url": "classes/_classes_.internalclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "NonGenericClass", - "url": "classes/_classes_.nongenericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassA", - "url": "classes/_classes_.subclassa.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassB", - "url": "classes/_classes_.subclassb.md", - }, - NavigationItem { - "parent": [Circular], - "title": "INameInterface", - "url": "interfaces/_classes_.inameinterface.md", - }, - [Circular], - NavigationItem { - "parent": [Circular], - "title": "IPrintNameInterface", - "url": "interfaces/_classes_.iprintnameinterface.md", - }, - ], - "parent": undefined, - "title": "classes", - "url": "modules/_classes_.md", - }, - "title": "IPrintInterface", - "url": "interfaces/_classes_.iprintinterface.md", - }, - NavigationItem { - "parent": NavigationItem { - "children": Array [ - NavigationItem { - "parent": [Circular], - "title": "BaseClass", - "url": "classes/_classes_.baseclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "GenericClass", - "url": "classes/_classes_.genericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "InternalClass", - "url": "classes/_classes_.internalclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "NonGenericClass", - "url": "classes/_classes_.nongenericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassA", - "url": "classes/_classes_.subclassa.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassB", - "url": "classes/_classes_.subclassb.md", - }, - NavigationItem { - "parent": [Circular], - "title": "INameInterface", - "url": "interfaces/_classes_.inameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintInterface", - "url": "interfaces/_classes_.iprintinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintNameInterface", - "url": "interfaces/_classes_.iprintnameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "BaseClass", - "url": "classes/_classes_.baseclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "GenericClass", - "url": "classes/_classes_.genericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "InternalClass", - "url": "classes/_classes_.internalclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "NonGenericClass", - "url": "classes/_classes_.nongenericclass.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassA", - "url": "classes/_classes_.subclassa.md", - }, - NavigationItem { - "parent": [Circular], - "title": "SubClassB", - "url": "classes/_classes_.subclassb.md", - }, - NavigationItem { - "parent": [Circular], - "title": "INameInterface", - "url": "interfaces/_classes_.inameinterface.md", - }, - NavigationItem { - "parent": [Circular], - "title": "IPrintInterface", - "url": "interfaces/_classes_.iprintinterface.md", - }, - [Circular], - ], - "parent": undefined, - "title": "classes", - "url": "modules/_classes_.md", - }, - "title": "IPrintNameInterface", - "url": "interfaces/_classes_.iprintnameinterface.md", - }, - NavigationItem { - "parent": NavigationItem { - "children": Array [ - NavigationItem { - "parent": [Circular], - "title": "I", - "url": "interfaces/_destrucuting_.i.md", - }, - [Circular], - ], - "parent": undefined, - "title": "destrucuting", - "url": "modules/_destrucuting_.md", - }, - "title": "I", - "url": "interfaces/_destrucuting_.i.md", - }, - ], - "isLabel": true, - "parent": undefined, - "title": "Interfaces", - "url": "", - }, - ], - "parent": undefined, - "title": "typedoc-plugin-markdown", - "url": "README.md", -} -`; - -exports[`MarkdownTheme getUrls should getUrls when readme is defined 1`] = ` -Array [ - "globals.md", - "modules/_categories_.md", - "modules/_classes_.md", - "modules/_comments_.md", - "modules/_destrucuting_.md", - "modules/_functions_.md", - "modules/_literals_.md", - "modules/_variables_.md", - "README.md", - "modules/_categories_.md", - "modules/_classes_.md", - "modules/_comments_.md", - "modules/_destrucuting_.md", - "modules/_functions_.md", - "modules/_literals_.md", - "modules/_variables_.md", - "modules/_categories_.md", - "interfaces/_categories_.categoryinterfacea1.md", - "interfaces/_categories_.categoryinterfacea2.md", - "interfaces/_categories_.otherinterfacea.md", - "modules/_categories_.md#let-categoryvariablea1", - "modules/_categories_.md#let-categoryvariablea2", - "modules/_categories_.md#let-categoryvariableb1", - "modules/_categories_.md#let-othervariablea", - "modules/_categories_.md#let-othervariableb", - "modules/_categories_.md#categoryfunctiona1", - "modules/_categories_.md#categoryfunctiona2", - "modules/_categories_.md#categoryfunctiona3", - "modules/_categories_.md#categoryfunctionb2", - "modules/_categories_.md#otherfunctiona", - "modules/_categories_.md#otherfunctionb", - "interfaces/_categories_.categoryinterfacea1.md", - "interfaces/_categories_.categoryinterfacea1.md#value", - "interfaces/_categories_.categoryinterfacea2.md", - "interfaces/_categories_.categoryinterfacea2.md#value", - "interfaces/_categories_.otherinterfacea.md", - "interfaces/_categories_.otherinterfacea.md#value", - "modules/_classes_.md", - "classes/_classes_.baseclass.md", - "classes/_classes_.genericclass.md", - "classes/_classes_.internalclass.md", - "classes/_classes_.nongenericclass.md", - "classes/_classes_.subclassa.md", - "classes/_classes_.subclassb.md", - "interfaces/_classes_.inameinterface.md", - "interfaces/_classes_.iprintinterface.md", - "interfaces/_classes_.iprintnameinterface.md", - "classes/_classes_.baseclass.md", - "classes/_classes_.baseclass.md#constructor", - "classes/_classes_.baseclass.md#private-internalclass", - "classes/_classes_.baseclass.md#protected-kind", - "classes/_classes_.baseclass.md#name", - "classes/_classes_.baseclass.md#static-instance", - "classes/_classes_.baseclass.md#static-instances", - "classes/_classes_.baseclass.md#abstract-abstractmethod", - "classes/_classes_.baseclass.md#arrowfunction", - "classes/_classes_.baseclass.md#private-checkname", - "classes/_classes_.baseclass.md#getname", - "classes/_classes_.baseclass.md#setname", - "classes/_classes_.baseclass.md#static-catest", - "classes/_classes_.baseclass.md#static-getinstance", - "classes/_classes_.baseclass.md#static-getname", - "classes/_classes_.genericclass.md", - "classes/_classes_.genericclass.md#constructor", - "classes/_classes_.genericclass.md#protected-p2", - "classes/_classes_.genericclass.md#p3", - "classes/_classes_.genericclass.md#private-p4", - "classes/_classes_.genericclass.md#readonly-p5", - "classes/_classes_.genericclass.md#value", - "classes/_classes_.genericclass.md#getvalue", - "classes/_classes_.genericclass.md#setvalue", - "classes/_classes_.internalclass.md", - "classes/_classes_.internalclass.md#constructor", - "classes/_classes_.nongenericclass.md", - "classes/_classes_.nongenericclass.md#constructor", - "classes/_classes_.nongenericclass.md#protected-p2", - "classes/_classes_.nongenericclass.md#p3", - "classes/_classes_.nongenericclass.md#readonly-p5", - "classes/_classes_.nongenericclass.md#value", - "classes/_classes_.nongenericclass.md#getvalue", - "classes/_classes_.nongenericclass.md#setvalue", - "classes/_classes_.subclassa.md", - "classes/_classes_.subclassa.md#constructor", - "classes/_classes_.subclassa.md#protected-kind", - "classes/_classes_.subclassa.md#name", - "classes/_classes_.subclassa.md#static-instance", - "classes/_classes_.subclassa.md#static-instances", - "classes/_classes_.subclassa.md#nameproperty", - "classes/_classes_.subclassa.md#readonlynameproperty", - "classes/_classes_.subclassa.md#writeonlynameproperty", - "classes/_classes_.subclassa.md#abstractmethod", - "classes/_classes_.subclassa.md#arrowfunction", - "classes/_classes_.subclassa.md#getname", - "classes/_classes_.subclassa.md#print", - "classes/_classes_.subclassa.md#printname", - "classes/_classes_.subclassa.md#setname", - "classes/_classes_.subclassa.md#static-catest", - "classes/_classes_.subclassa.md#static-getinstance", - "classes/_classes_.subclassa.md#static-getname", - "classes/_classes_.subclassb.md", - "classes/_classes_.subclassb.md#constructor", - "classes/_classes_.subclassb.md#protected-kind", - "classes/_classes_.subclassb.md#name", - "classes/_classes_.subclassb.md#static-instance", - "classes/_classes_.subclassb.md#static-instances", - "classes/_classes_.subclassb.md#abstractmethod", - "classes/_classes_.subclassb.md#arrowfunction", - "classes/_classes_.subclassb.md#dosomething", - "classes/_classes_.subclassb.md#getname", - "classes/_classes_.subclassb.md#setname", - "classes/_classes_.subclassb.md#static-catest", - "classes/_classes_.subclassb.md#static-getinstance", - "classes/_classes_.subclassb.md#static-getname", - "interfaces/_classes_.inameinterface.md", - "interfaces/_classes_.inameinterface.md#name", - "interfaces/_classes_.inameinterface.md#getname", - "interfaces/_classes_.iprintinterface.md", - "interfaces/_classes_.iprintinterface.md#print", - "interfaces/_classes_.iprintnameinterface.md", - "interfaces/_classes_.iprintnameinterface.md#name", - "interfaces/_classes_.iprintnameinterface.md#getname", - "interfaces/_classes_.iprintnameinterface.md#print", - "interfaces/_classes_.iprintnameinterface.md#printname", - "modules/_comments_.md", - "modules/_comments_.md#let-commentswithfencedblock", - "modules/_comments_.md#let-commentswithincludes", - "modules/_comments_.md#let-commentswithsymbollinks", - "modules/_comments_.md#let-commentswithtags", - "modules/_comments_.md#commentsinreturn", - "modules/_comments_.md#functionwithdoclink", - "modules/_destrucuting_.md", - "interfaces/_destrucuting_.i.md", - "modules/_destrucuting_.md#destructarraya", - "modules/_destrucuting_.md#destructarrayb", - "modules/_destrucuting_.md#destructarrayc", - "modules/_destrucuting_.md#destructarraywithignoresa", - "modules/_destrucuting_.md#destructarraywithignoresrest", - "modules/_destrucuting_.md#destructarraywithrest", - "modules/_destrucuting_.md#destructarraywithresta", - "modules/_destrucuting_.md#destructarraywithrestb", - "modules/_destrucuting_.md#destructobjecta", - "modules/_destrucuting_.md#destructobjectb", - "modules/_destrucuting_.md#destructobjectc", - "modules/_destrucuting_.md#drawtext", - "interfaces/_destrucuting_.i.md", - "interfaces/_destrucuting_.i.md#name", - "modules/_functions_.md", - "modules/_functions_.modulefunction.md", - "modules/_functions_.md#buildname", - "modules/_functions_.md#exportedfunction", - "modules/_functions_.md#functionwithdefaults", - "modules/_functions_.md#functionwithoptionalvalue", - "modules/_functions_.md#functionwithparameters", - "modules/_functions_.md#functionwithrest", - "modules/_functions_.md#internalfunction", - "modules/_functions_.md#multiplesignatures", - "modules/_functions_.md#const-variablefunction", - "modules/_functions_.modulefunction.md", - "modules/_functions_.modulefunction.md#let-functionvariable", - "modules/_functions_.modulefunction.md#append", - "modules/_functions_.modulefunction.md#prepend", - "modules/_literals_.md", - "modules/_literals_.md#let-typeliteral", - "modules/_literals_.md#const-objectliteral", - "modules/_variables_.md", - "modules/_variables_.md#let-color", - "modules/_variables_.md#let-decimal", - "modules/_variables_.md#let-isdone", - "modules/_variables_.md#let-list", - "modules/_variables_.md#let-n", - "modules/_variables_.md#let-notsure", - "modules/_variables_.md#let-u", - "modules/_variables_.md#let-x", -] -`; - -exports[`MarkdownTheme getUrls should getUrls' 1`] = ` -Array [ - "README.md", - "modules/_categories_.md", - "modules/_classes_.md", - "modules/_comments_.md", - "modules/_destrucuting_.md", - "modules/_functions_.md", - "modules/_literals_.md", - "modules/_variables_.md", - "modules/_categories_.md", - "interfaces/_categories_.categoryinterfacea1.md", - "interfaces/_categories_.categoryinterfacea2.md", - "interfaces/_categories_.otherinterfacea.md", - "modules/_categories_.md#let-categoryvariablea1", - "modules/_categories_.md#let-categoryvariablea2", - "modules/_categories_.md#let-categoryvariableb1", - "modules/_categories_.md#let-othervariablea", - "modules/_categories_.md#let-othervariableb", - "modules/_categories_.md#categoryfunctiona1", - "modules/_categories_.md#categoryfunctiona2", - "modules/_categories_.md#categoryfunctiona3", - "modules/_categories_.md#categoryfunctionb2", - "modules/_categories_.md#otherfunctiona", - "modules/_categories_.md#otherfunctionb", - "interfaces/_categories_.categoryinterfacea1.md", - "interfaces/_categories_.categoryinterfacea1.md#value", - "interfaces/_categories_.categoryinterfacea2.md", - "interfaces/_categories_.categoryinterfacea2.md#value", - "interfaces/_categories_.otherinterfacea.md", - "interfaces/_categories_.otherinterfacea.md#value", - "modules/_classes_.md", - "classes/_classes_.baseclass.md", - "classes/_classes_.genericclass.md", - "classes/_classes_.internalclass.md", - "classes/_classes_.nongenericclass.md", - "classes/_classes_.subclassa.md", - "classes/_classes_.subclassb.md", - "interfaces/_classes_.inameinterface.md", - "interfaces/_classes_.iprintinterface.md", - "interfaces/_classes_.iprintnameinterface.md", - "classes/_classes_.baseclass.md", - "classes/_classes_.baseclass.md#constructor", - "classes/_classes_.baseclass.md#private-internalclass", - "classes/_classes_.baseclass.md#protected-kind", - "classes/_classes_.baseclass.md#name", - "classes/_classes_.baseclass.md#static-instance", - "classes/_classes_.baseclass.md#static-instances", - "classes/_classes_.baseclass.md#abstract-abstractmethod", - "classes/_classes_.baseclass.md#arrowfunction", - "classes/_classes_.baseclass.md#private-checkname", - "classes/_classes_.baseclass.md#getname", - "classes/_classes_.baseclass.md#setname", - "classes/_classes_.baseclass.md#static-catest", - "classes/_classes_.baseclass.md#static-getinstance", - "classes/_classes_.baseclass.md#static-getname", - "classes/_classes_.genericclass.md", - "classes/_classes_.genericclass.md#constructor", - "classes/_classes_.genericclass.md#protected-p2", - "classes/_classes_.genericclass.md#p3", - "classes/_classes_.genericclass.md#private-p4", - "classes/_classes_.genericclass.md#readonly-p5", - "classes/_classes_.genericclass.md#value", - "classes/_classes_.genericclass.md#getvalue", - "classes/_classes_.genericclass.md#setvalue", - "classes/_classes_.internalclass.md", - "classes/_classes_.internalclass.md#constructor", - "classes/_classes_.nongenericclass.md", - "classes/_classes_.nongenericclass.md#constructor", - "classes/_classes_.nongenericclass.md#protected-p2", - "classes/_classes_.nongenericclass.md#p3", - "classes/_classes_.nongenericclass.md#readonly-p5", - "classes/_classes_.nongenericclass.md#value", - "classes/_classes_.nongenericclass.md#getvalue", - "classes/_classes_.nongenericclass.md#setvalue", - "classes/_classes_.subclassa.md", - "classes/_classes_.subclassa.md#constructor", - "classes/_classes_.subclassa.md#protected-kind", - "classes/_classes_.subclassa.md#name", - "classes/_classes_.subclassa.md#static-instance", - "classes/_classes_.subclassa.md#static-instances", - "classes/_classes_.subclassa.md#nameproperty", - "classes/_classes_.subclassa.md#readonlynameproperty", - "classes/_classes_.subclassa.md#writeonlynameproperty", - "classes/_classes_.subclassa.md#abstractmethod", - "classes/_classes_.subclassa.md#arrowfunction", - "classes/_classes_.subclassa.md#getname", - "classes/_classes_.subclassa.md#print", - "classes/_classes_.subclassa.md#printname", - "classes/_classes_.subclassa.md#setname", - "classes/_classes_.subclassa.md#static-catest", - "classes/_classes_.subclassa.md#static-getinstance", - "classes/_classes_.subclassa.md#static-getname", - "classes/_classes_.subclassb.md", - "classes/_classes_.subclassb.md#constructor", - "classes/_classes_.subclassb.md#protected-kind", - "classes/_classes_.subclassb.md#name", - "classes/_classes_.subclassb.md#static-instance", - "classes/_classes_.subclassb.md#static-instances", - "classes/_classes_.subclassb.md#abstractmethod", - "classes/_classes_.subclassb.md#arrowfunction", - "classes/_classes_.subclassb.md#dosomething", - "classes/_classes_.subclassb.md#getname", - "classes/_classes_.subclassb.md#setname", - "classes/_classes_.subclassb.md#static-catest", - "classes/_classes_.subclassb.md#static-getinstance", - "classes/_classes_.subclassb.md#static-getname", - "interfaces/_classes_.inameinterface.md", - "interfaces/_classes_.inameinterface.md#name", - "interfaces/_classes_.inameinterface.md#getname", - "interfaces/_classes_.iprintinterface.md", - "interfaces/_classes_.iprintinterface.md#print", - "interfaces/_classes_.iprintnameinterface.md", - "interfaces/_classes_.iprintnameinterface.md#name", - "interfaces/_classes_.iprintnameinterface.md#getname", - "interfaces/_classes_.iprintnameinterface.md#print", - "interfaces/_classes_.iprintnameinterface.md#printname", - "modules/_comments_.md", - "modules/_comments_.md#let-commentswithfencedblock", - "modules/_comments_.md#let-commentswithincludes", - "modules/_comments_.md#let-commentswithsymbollinks", - "modules/_comments_.md#let-commentswithtags", - "modules/_comments_.md#commentsinreturn", - "modules/_comments_.md#functionwithdoclink", - "modules/_destrucuting_.md", - "interfaces/_destrucuting_.i.md", - "modules/_destrucuting_.md#destructarraya", - "modules/_destrucuting_.md#destructarrayb", - "modules/_destrucuting_.md#destructarrayc", - "modules/_destrucuting_.md#destructarraywithignoresa", - "modules/_destrucuting_.md#destructarraywithignoresrest", - "modules/_destrucuting_.md#destructarraywithrest", - "modules/_destrucuting_.md#destructarraywithresta", - "modules/_destrucuting_.md#destructarraywithrestb", - "modules/_destrucuting_.md#destructobjecta", - "modules/_destrucuting_.md#destructobjectb", - "modules/_destrucuting_.md#destructobjectc", - "modules/_destrucuting_.md#drawtext", - "interfaces/_destrucuting_.i.md", - "interfaces/_destrucuting_.i.md#name", - "modules/_functions_.md", - "modules/_functions_.modulefunction.md", - "modules/_functions_.md#buildname", - "modules/_functions_.md#exportedfunction", - "modules/_functions_.md#functionwithdefaults", - "modules/_functions_.md#functionwithoptionalvalue", - "modules/_functions_.md#functionwithparameters", - "modules/_functions_.md#functionwithrest", - "modules/_functions_.md#internalfunction", - "modules/_functions_.md#multiplesignatures", - "modules/_functions_.md#const-variablefunction", - "modules/_functions_.modulefunction.md", - "modules/_functions_.modulefunction.md#let-functionvariable", - "modules/_functions_.modulefunction.md#append", - "modules/_functions_.modulefunction.md#prepend", - "modules/_literals_.md", - "modules/_literals_.md#let-typeliteral", - "modules/_literals_.md#const-objectliteral", - "modules/_variables_.md", - "modules/_variables_.md#let-color", - "modules/_variables_.md#let-decimal", - "modules/_variables_.md#let-isdone", - "modules/_variables_.md#let-list", - "modules/_variables_.md#let-n", - "modules/_variables_.md#let-notsure", - "modules/_variables_.md#let-u", - "modules/_variables_.md#let-x", -] -`; diff --git a/docs-gen/src/components/__snapshots__/front-matter.component.spec.ts.snap b/docs-gen/src/components/__snapshots__/front-matter.component.spec.ts.snap deleted file mode 100644 index 9dd83d215f253..0000000000000 --- a/docs-gen/src/components/__snapshots__/front-matter.component.spec.ts.snap +++ /dev/null @@ -1,27 +0,0 @@ -// Jest Snapshot v1, https://goo.gl/fbAQLP - -exports[`FrontMatterComponent should set correct label for globals file 1`] = `"Globals"`; - -exports[`FrontMatterComponent should compile set correct label for index without a README 1`] = `"Globals"`; - -exports[`FrontMatterComponent should parse a quoted string 1`] = `"xyx\\\\'s \\"quoted\\" title"`; - -exports[`FrontMatterComponent should prepend YAML block to start of page 1`] = ` -"--- -id: \\"_access_access_\\" -title: \\"Page title\\" -sidebar_label: \\"Page title\\" ---- - -[CONTENT]" -`; - -exports[`FrontMatterComponent should set correct label for index with a README 1`] = `"README"`; - -exports[`FrontMatterComponent should set correct title for index page 1`] = `"Project name"`; - -exports[`FrontMatterComponent should set correct title for index page if packageInfo label available 1`] = `"Package Label"`; - -exports[`FrontMatterComponent should set correct title for pages without a navigation match 1`] = `"Project name"`; - -exports[`FrontMatterComponent should set id 1`] = `"_access_access_"`; diff --git a/docs-gen/src/components/__snapshots__/helpers.component.spec.ts.snap b/docs-gen/src/components/__snapshots__/helpers.component.spec.ts.snap deleted file mode 100644 index 76f3e10d81f93..0000000000000 --- a/docs-gen/src/components/__snapshots__/helpers.component.spec.ts.snap +++ /dev/null @@ -1,27 +0,0 @@ -// Jest Snapshot v1, https://goo.gl/fbAQLP - -exports[`HelpersComponent should build @link references' 1`] = ` -"Taken from http://usejsdoc.org/tags-inline-link.html. -" -`; - -exports[`HelpersComponent should convert comments with includes' 1`] = ` -"This is a simple example on how to use include. - -![My image alt text](../media/logo.png) - -![My not found image](media://VOID.png) - - -This is an example of handlebars include - -This is a simple example on a handlebars file. -" -`; - -exports[`HelpersComponent should convert symbols brackets to symbol links' 1`] = ` -"- Link to an external reflection: [BaseClass](../classes/_classes_.baseclass.md) -- Link to an internal reflection: [commentsInReturn](_comments_.md#commentsinreturn) -- Link to an undefined reflection: [[VOID]] -" -`; diff --git a/docs-gen/src/components/front-matter.component.spec.ts b/docs-gen/src/components/front-matter.component.spec.ts deleted file mode 100644 index 25fe1486cd084..0000000000000 --- a/docs-gen/src/components/front-matter.component.spec.ts +++ /dev/null @@ -1,130 +0,0 @@ -import * as path from 'path'; -import { Application } from 'typedoc'; - -const { FrontMatterComponent } = require('components/front-matter.component'); -describe(`FrontMatterComponent`, () => { - let frontMatterComponent; - let app; - - beforeAll(() => { - app = new Application(); - app.bootstrap({ - mode: 'file', - module: 'CommonJS', - target: 'ES5', - readme: 'none', - theme: 'markdown', - logger: 'none', - plugin: path.join(__dirname, '../../../dist/index'), - }); - app.convert(['./test/stubs/functions.ts']); - app.renderer.addComponent('frontmatter', new FrontMatterComponent(app.renderer)); - frontMatterComponent = app.renderer.getComponent('frontmatter'); - }); - - test(`should prepend YAML block to start of page`, () => { - expect(true).toBeTruthy(); - }); - - test(`should prepend YAML block to start of page`, () => { - const spy = jest.spyOn(frontMatterComponent, 'getTitleFromNavigation').mockReturnValue('Page title'); - const page = { - contents: '[CONTENT]', - url: 'modules/_access_access_.md', - model: { name: '"access/access"' }, - project: { url: 'index.md' }, - }; - frontMatterComponent.onPageEnd(page); - expect(page.contents).toMatchSnapshot(); - spy.mockRestore(); - }); - - test(`should set id`, () => { - const page = { url: 'modules/_access_access_.md' }; - expect(frontMatterComponent.getId(page)).toMatchSnapshot(); - }); - - test(`should set correct title for index page`, () => { - const page = { - url: 'index.md', - project: { url: 'index.md', name: 'Project name' }, - }; - expect(frontMatterComponent.getTitle(page)).toMatchSnapshot(); - }); - - test(`should set correct title for pages without a navigation match`, () => { - const spy = jest.spyOn(frontMatterComponent, 'getTitleFromNavigation').mockReturnValue(null); - const page = { - url: 'index.md', - project: { url: 'page.md', name: 'Project name' }, - }; - expect(frontMatterComponent.getTitle(page)).toMatchSnapshot(); - spy.mockRestore(); - }); - - test(`should set correct title for index page if packageInfo label available`, () => { - const page = { - url: 'index.md', - project: { url: 'index.md', packageInfo: { label: 'Package Label' } }, - }; - expect(frontMatterComponent.getTitle(page)).toMatchSnapshot(); - }); - - test(`should compile set correct label for index without a README`, () => { - const spy = jest.spyOn(frontMatterComponent, 'getTitleFromNavigation').mockReturnValue(null); - const page = { - url: 'index.md', - project: { url: 'index.md' }, - }; - expect(frontMatterComponent.getLabel(page)).toMatchSnapshot(); - spy.mockRestore(); - }); - - test(`should set correct label for index with a README`, () => { - const spy = jest.spyOn(frontMatterComponent, 'getTitleFromNavigation').mockReturnValue(null); - const page = { - url: 'index.md', - project: { url: 'index.md', readme: 'README' }, - }; - expect(frontMatterComponent.getLabel(page)).toMatchSnapshot(); - spy.mockRestore(); - }); - - test(`should set correct label for globals file`, () => { - const page = { - url: 'globals.md', - project: { url: 'index.md' }, - }; - expect(frontMatterComponent.getLabel(page)).toMatchSnapshot(); - }); - - test(`should parse a quoted string`, () => { - expect(frontMatterComponent.escapeYAMLString(`xyx's "quoted" title`)).toMatchSnapshot(); - }); - - test(`should find title from navigation object`, () => { - const page = { - navigation: { - children: [ - { - url: 'urla', - title: 'titlea', - children: [ - { url: 'urla1', title: 'titlea1' }, - { - url: 'urlb2', - title: 'titleb2', - children: [{ url: 'urlc1', title: 'titlec1' }], - }, - ], - }, - { url: 'urlb', title: 'titleb' }, - ], - }, - }; - expect(frontMatterComponent.getTitleFromNavigation(page, 'urlb')).toEqual('titleb'); - expect(frontMatterComponent.getTitleFromNavigation(page, 'urla1')).toEqual('titlea1'); - expect(frontMatterComponent.getTitleFromNavigation(page, 'urlc1')).toEqual('titlec1'); - expect(frontMatterComponent.getTitleFromNavigation(page, 'url')).toEqual(null); - }); -}); diff --git a/docs-gen/src/components/front-matter.component.ts b/docs-gen/src/components/front-matter.component.ts deleted file mode 100644 index 02e46c09014b4..0000000000000 --- a/docs-gen/src/components/front-matter.component.ts +++ /dev/null @@ -1,78 +0,0 @@ -import * as path from 'path'; -import { NavigationItem } from 'typedoc'; -import { Component, ContextAwareRendererComponent } from 'typedoc/dist/lib/output/components'; -import { PageEvent } from 'typedoc/dist/lib/output/events'; - -@Component({ name: 'frontmatter' }) -export class FrontMatterComponent extends ContextAwareRendererComponent { - initialize() { - super.initialize(); - - this.listenTo(this.application.renderer, { - [PageEvent.END]: this.onPageEnd, - }); - } - - onPageEnd(page: PageEvent) { - page.contents = page.contents.replace(/^/, this.getYamlString(page) + '\n\n').replace(/[\r\n]{3,}/g, '\n\n'); - } - - getYamlString(page: PageEvent) { - const yaml = `--- -id: "${this.escapeYAMLString(this.getId(page))}" -title: "${this.escapeYAMLString(this.getTitle(page))}" -sidebar_label: "${this.escapeYAMLString(this.getLabel(page))}" ----`; - return yaml; - } - - getId(page: PageEvent) { - return this.stripExt(page.url); - } - - getTitle(page: PageEvent) { - if (page.url === page.project.url) { - return this.getProjectName(page); - } - return this.getTitleFromNavigation(page, page.url) || this.getProjectName(page); - } - - getLabel(page: PageEvent) { - if (this.stripExt(page.url) === 'globals') { - return 'Globals'; - } - const title = this.getTitleFromNavigation(page, page.url); - return title ? title : !!page.project.readme ? 'README' : 'Globals'; - } - - // prettier-ignore - escapeYAMLString(str: string) { - return str.replace(/([^\\])'/g, '$1\\\''); - } - - getProjectName(page: PageEvent) { - return (page.project.packageInfo && page.project.packageInfo.label) || page.project.name; - } - - getTitleFromNavigation(page: PageEvent, url: string) { - const item = this.findNavigationItem(page.navigation.children, url, null); - return item ? item.title : null; - } - - findNavigationItem(navigation: NavigationItem[], url, item: NavigationItem) { - navigation.forEach(navigationChild => { - if (navigationChild.url === url) { - item = navigationChild; - return; - } - if (navigationChild.children) { - item = this.findNavigationItem(navigationChild.children, url, item); - } - }); - return item; - } - - stripExt(url: string) { - return path.basename(url, path.extname(url)); - } -} diff --git a/docs-gen/src/components/helpers.component.spec.ts b/docs-gen/src/components/helpers.component.spec.ts deleted file mode 100644 index 9738e4d26cf59..0000000000000 --- a/docs-gen/src/components/helpers.component.spec.ts +++ /dev/null @@ -1,59 +0,0 @@ -import * as fs from 'fs-extra'; -import * as Handlebars from 'handlebars'; -import * as path from 'path'; -import { Application } from 'typedoc'; - -describe(`HelpersComponent`, () => { - let app; - let project; - let pluginInstance; - const out = path.join(__dirname, 'tmp'); - beforeAll(() => { - app = new Application(); - app.bootstrap({ - module: 'CommonJS', - target: 'ES5', - readme: 'none', - theme: 'markdown', - logger: 'none', - includes: './test/stubs/inc/', - media: './test/stubs/media/', - listInvalidSymbolLinks: true, - plugin: path.join(__dirname, '../../dist/index'), - }); - project = app.convert(app.expandInputFiles(['./test/stubs/'])); - app.generateDocs(project, out); - pluginInstance = app.renderer.getComponent('helpers'); - }); - - afterAll(() => { - fs.removeSync(out); - }); - - test(`should define helper'`, () => { - const helpers = Handlebars.helpers; - expect(helpers.comment).toBeDefined(); - }); - - test(`should convert symbols brackets to symbol links'`, () => { - expect( - Handlebars.helpers.comment.call(project.findReflectionByName('commentsWithSymbolLinks').comment.text), - ).toMatchSnapshot(); - }); - - test(`should set warnings if symbol not found'`, () => { - expect(pluginInstance.warnings.length > 0).toBeTruthy(); - }); - - test(`should convert comments with includes'`, () => { - expect( - Handlebars.helpers.comment.call(project.findReflectionByName('commentsWithIncludes').comment.text), - ).toMatchSnapshot(); - }); - - test(`should build @link references'`, () => { - expect( - Handlebars.helpers.comment.call(project.findReflectionByName('functionWithDocLink').signatures[0].comment.text), - ).toMatchSnapshot(); - }); -}); diff --git a/docs-gen/src/components/helpers.component.ts b/docs-gen/src/components/helpers.component.ts deleted file mode 100644 index 78d186644fcb1..0000000000000 --- a/docs-gen/src/components/helpers.component.ts +++ /dev/null @@ -1,230 +0,0 @@ -import * as fs from 'fs-extra'; -import * as Handlebars from 'handlebars'; -import * as path from 'path'; -import { MarkedLinksPlugin, ProjectReflection, Reflection } from 'typedoc'; -import { Component, ContextAwareRendererComponent } from 'typedoc/dist/lib/output/components'; -import { PageEvent, RendererEvent } from 'typedoc/dist/lib/output/events'; -import * as Util from 'util'; - -import MarkdownTheme from '../theme'; - -/** - * This component is essentially a combination of TypeDoc's 'MarkedPlugin' and 'MarkedLinksPlugin'. - * The options are unchanged , but strips out all of the html configs. - */ - -@Component({ name: 'helpers' }) -export class ContextAwareHelpersComponent extends ContextAwareRendererComponent { - /** - * The path referenced files are located in. - */ - private includes?: string; - - /** - * Path to the output media directory. - */ - private mediaDirectory?: string; - - /** - * The pattern used to find references in markdown. - */ - private includePattern: RegExp = /\[\[include:([^\]]+?)\]\]/g; - - /** - * The pattern used to find media links. - */ - private mediaPattern: RegExp = /media:\/\/([^ "\)\]\}]+)/g; - - /** - * Regular expression for detecting bracket links. - */ - private brackets: RegExp = /\[\[([^\]]+)\]\]/g; - - /** - * Regular expression for detecting inline tags like {@link ...}. - */ - private inlineTag: RegExp = /(?:\[(.+?)\])?\{@(link|linkcode|linkplain)\s+((?:.|\n)+?)\}/gi; - - private listInvalidSymbolLinks: boolean; - - private warnings: string[] = []; - - initialize() { - super.initialize(); - - this.includes = this.application.options.getValue('includes'); - this.mediaDirectory = this.application.options.getValue('media'); - this.listInvalidSymbolLinks = this.application.options.getValue('listInvalidSymbolLinks'); - - this.listenTo( - this.owner, - { - [RendererEvent.END]: this.onEndRenderer, - }, - undefined, - 100, - ); - - const component = this; - - MarkdownTheme.handlebars.registerHelper('comment', function(this: string) { - return component.parseComments(this); - }); - - MarkdownTheme.handlebars.registerHelper('breadcrumbs', function(this: PageEvent) { - return component.breadcrumb(this.model, this.project, []); - }); - - MarkdownTheme.handlebars.registerHelper('relativeURL', (url: string) => { - return url ? this.getRelativeUrl(url) : url; - }); - } - - public breadcrumb(model: Reflection, project: ProjectReflection, md: string[]) { - const theme = this.application.renderer.theme as MarkdownTheme; - if (model && model.parent) { - this.breadcrumb(model.parent, project, md); - if (model.url) { - md.push(`[${model.name}](${this.getRelativeUrl(model.url)})`); - } else { - md.push(model.url); - } - } else { - if (!!project.readme) { - md.push(`[${project.name}](${this.getRelativeUrl(theme.indexName + theme.fileExt)})`); - } - md.push(`[${project.readme ? 'Globals' : project.name}](${this.getRelativeUrl(project.url)})`); - } - return md.join(' › '); - } - - /** - * Parse the given comemnts string and return the resulting html. - * - * @param text The markdown string that should be parsed. - * @param context The current handlebars context. - * @returns The resulting html string. - */ - public parseComments(text: string) { - const context = Object.assign(text, ''); - - if (this.includes) { - text = text.replace(this.includePattern, (match: string, includesPath: string) => { - includesPath = path.join(this.includes!, includesPath.trim()); - if (fs.existsSync(includesPath) && fs.statSync(includesPath).isFile()) { - const contents = fs.readFileSync(includesPath, 'utf-8'); - if (includesPath.slice(-4).toLocaleLowerCase() === '.hbs') { - const template = Handlebars.compile(contents); - return template(context); - } else { - return contents; - } - } else { - return ''; - } - }); - } - - if (this.mediaDirectory) { - text = text.replace(this.mediaPattern, (match: string, mediaPath: string) => { - if (fs.existsSync(path.join(this.mediaDirectory!, mediaPath))) { - return this.getRelativeUrl('media') + '/' + mediaPath; - } else { - return match; - } - }); - } - - return this.replaceInlineTags(this.replaceBrackets(text)); - } - - /** - * Find all references to symbols within the given text and transform them into a link. - * - * This function is aware of the current context and will try to find the symbol within the - * current reflection. It will walk up the reflection chain till the symbol is found or the - * root reflection is reached. As a last resort the function will search the entire project - * for the given symbol. - * - * @param text The text that should be parsed. - * @returns The text with symbol references replaced by links. - */ - private replaceBrackets(text: string): string { - return text.replace(this.brackets, (match: string, content: string): string => { - const split = MarkedLinksPlugin.splitLinkText(content); - return this.buildLink(match, split.target, split.caption); - }); - } - - /** - * Find symbol {@link ...} strings in text and turn into html links - * - * @param text The string in which to replace the inline tags. - * @return The updated string. - */ - private replaceInlineTags(text: string): string { - return text.replace(this.inlineTag, (match: string, leading: string, tagName: string, content: string): string => { - const split = MarkedLinksPlugin.splitLinkText(content); - const target = split.target; - const caption = leading || split.caption; - const monospace = tagName === 'linkcode'; - - return this.buildLink(match, target, caption, monospace); - }); - } - - /** - * Format a link with the given text and target. - * - * @param original The original link string, will be returned if the target cannot be resolved.. - * @param target The link target. - * @param caption The caption of the link. - * @param monospace Whether to use monospace formatting or not. - * @returns A html link tag. - */ - private buildLink(original: string, target: string, caption: string, monospace?: boolean): string { - if (!this.urlPrefix.test(target)) { - let reflection: Reflection | undefined; - if (this.reflection) { - reflection = this.reflection.findReflectionByName(target); - } else if (this.project) { - reflection = this.project.findReflectionByName(target); - } - - if (reflection && reflection.url) { - if (this.urlPrefix.test(reflection.url)) { - target = reflection.url; - } else { - target = this.getRelativeUrl(reflection.url); - } - } else { - const fullName = (this.reflection || this.project)!.getFullName(); - this.warnings.push(`In ${fullName}: ${original}`); - return original; - } - } - - if (monospace) { - caption = '`' + caption + '`'; - } - - return Util.format('[%s](%s)', caption, target); - } - - /** - * Triggered when [[Renderer]] is finished - */ - onEndRenderer(event: RendererEvent) { - if (this.listInvalidSymbolLinks && this.warnings.length > 0) { - this.application.logger.write(''); - this.application.logger.warn( - 'Found invalid symbol reference(s) in JSDocs, ' + - 'they will not render as links in the generated documentation.', - ); - - for (const warning of this.warnings) { - this.application.logger.write(' ' + warning); - } - } - } -} diff --git a/docs-gen/src/components/options.component.ts b/docs-gen/src/components/options.component.ts deleted file mode 100644 index d938ce4569f4d..0000000000000 --- a/docs-gen/src/components/options.component.ts +++ /dev/null @@ -1,31 +0,0 @@ -import { Component, ContextAwareRendererComponent } from 'typedoc/dist/lib/output/components'; - -import MarkdownTheme from '../theme'; - -@Component({ name: 'options' }) -export class OptionsComponent extends ContextAwareRendererComponent { - initialize() { - super.initialize(); - - const namedAnchors = this.application.options.getValue('namedAnchors'); - const hideBreadcrumbs = this.application.options.getValue('hideBreadcrumbs'); - const hideIndexes = this.application.options.getValue('hideIndexes'); - const hideSourceFiles = this.application.options.getValue('hideSources'); - - MarkdownTheme.handlebars.registerHelper('ifNamedAnchors', function(options) { - return namedAnchors ? options.fn(this) : options.inverse(this); - }); - - MarkdownTheme.handlebars.registerHelper('ifBreadcrumbs', function(options) { - return hideBreadcrumbs ? options.inverse(this) : options.fn(this); - }); - - MarkdownTheme.handlebars.registerHelper('ifIndexes', function(options) { - return hideIndexes ? options.inverse(this) : options.fn(this); - }); - - MarkdownTheme.handlebars.registerHelper('ifSources', function(options) { - return hideSourceFiles ? options.inverse(this) : options.fn(this); - }); - } -} diff --git a/docs-gen/src/index.ts b/docs-gen/src/index.ts deleted file mode 100644 index 869d23835914f..0000000000000 --- a/docs-gen/src/index.ts +++ /dev/null @@ -1,73 +0,0 @@ -import { Application } from 'typedoc/dist/lib/application'; -import { ParameterType } from 'typedoc/dist/lib/utils/options/declaration'; - -import { MarkdownPlugin } from './plugin'; -import CubejsGroupPlugin from './plugins/CubejsGroupPlugin'; -import NoInheritPlugin from './plugins/NoInheritPlugin'; -import {LinkPlugin} from './plugins/LinkPlugin'; - -export = (PluginHost: Application) => { - const app = PluginHost.owner; - if (app.converter.hasComponent('markdown')) { - return; - } - - app.options.addDeclaration({ - help: 'Markdown Plugin: Deprecated in favour of theme.', - name: 'platform', - type: ParameterType.String, - }); - - app.options.addDeclaration({ - help: 'Markdown Plugin: Deprecated.', - name: 'hideProjectTitle', - type: ParameterType.Boolean, - }); - - app.options.addDeclaration({ - help: 'Markdown Plugin: Do not print source file link rendering.', - name: 'hideSources', - type: ParameterType.Boolean, - }); - - app.options.addDeclaration({ - help: 'Markdown Plugin: Do not print breadcrumbs.', - name: 'hideBreadcrumbs', - type: ParameterType.Boolean, - }); - - app.options.addDeclaration({ - help: 'Markdown Plugin: Do not print indexes.', - name: 'hideIndexes', - type: ParameterType.Boolean, - }); - - app.options.addDeclaration({ - help: - 'Markdown Plugin: Use HTML named anchors as fragment identifiers for engines that do not automatically assign header ids.', - name: 'namedAnchors', - type: ParameterType.Boolean, - }); - - app.options.addDeclaration({ - help: - 'Markdown Plugin: Use long navigation title instead of default short one (applicable to navigation / front-matter only).', - name: 'longTitle', - type: ParameterType.Boolean, - }); - - app.options.addDeclaration({ - help: 'Skips updating of the sidebar.json file when used with docusaurus or docusaurus2 theme', - name: 'skipSidebar', - type: ParameterType.Boolean, - - }); - - app.converter.addComponent('markdown', new MarkdownPlugin(app.converter)); - - app.converter.removeComponent('group'); - app.converter.addComponent('cubejs-group', new CubejsGroupPlugin(app.converter)) - - app.converter.addComponent('no-inherit', new NoInheritPlugin(app.converter)); - app.converter.addComponent('link', new LinkPlugin(app.converter)); -}; diff --git a/docs-gen/src/plugin.ts b/docs-gen/src/plugin.ts deleted file mode 100644 index a0a0f5436dd8e..0000000000000 --- a/docs-gen/src/plugin.ts +++ /dev/null @@ -1,41 +0,0 @@ -import * as path from 'path'; -import { Renderer } from 'typedoc'; -import { Converter } from 'typedoc/dist/lib/converter'; -import { Component, ConverterComponent } from 'typedoc/dist/lib/converter/components'; - -@Component({ name: 'markdown' }) -export class MarkdownPlugin extends ConverterComponent { - initialize() { - this.listenTo(this.owner, { - [Converter.EVENT_BEGIN]: this.onBegin, - [Converter.EVENT_RESOLVE_BEGIN]: this.onResolveBegin, - }); - } - - /** - * Overide the default assets for any custom themes to inherit - */ - onBegin() { - Renderer.getDefaultTheme = () => path.join(__dirname, 'resources'); - } - - /** - * Read the theme option and load the paths of any recognised built in themes - * Otherwise pass the path through to the Renderer - */ - onResolveBegin() { - const options = this.application.options; - const theme = (options.getValue('platform') as string) || (options.getValue('theme') as string); - - // if the theme is 'default' or 'markdown' load the base markdown theme - if (theme === 'default' || theme === 'markdown') { - options.setValue('theme', path.join(__dirname)); - } - - // load any built in sub themes - const subThemes = ['docusaurus', 'docusaurus2', 'vuepress', 'gitbook', 'bitbucket']; - if (subThemes.includes(theme)) { - options.setValue('theme', path.join(__dirname, 'subthemes', theme)); - } - } -} diff --git a/docs-gen/src/plugins/CubejsGroupPlugin.ts b/docs-gen/src/plugins/CubejsGroupPlugin.ts deleted file mode 100644 index e0bd48274c91c..0000000000000 --- a/docs-gen/src/plugins/CubejsGroupPlugin.ts +++ /dev/null @@ -1,313 +0,0 @@ -import { ContainerReflection, DeclarationReflection, Reflection, ReflectionKind } from 'typedoc'; -import { Context, Converter } from 'typedoc/dist/lib/converter'; -import { ConverterComponent } from 'typedoc/dist/lib/converter/components'; -import { Comment, ReferenceType, ReflectionGroup, SourceDirectory } from 'typedoc/dist/lib/models'; -import { Component } from 'typedoc/dist/lib/utils'; - -const STICKY_TAG_NAME = 'stickytypes'; - -@Component({ name: 'cubejs-group' }) -export default class CubejsGroupPlugin extends ConverterComponent { - /** - * Define the sort order of reflections. - */ - static WEIGHTS = [ - ReflectionKind.Class, - ReflectionKind.Function, - ReflectionKind.Global, - ReflectionKind.Module, - ReflectionKind.Namespace, - ReflectionKind.Interface, - ReflectionKind.Enum, - ReflectionKind.EnumMember, - ReflectionKind.TypeAlias, - - ReflectionKind.Constructor, - ReflectionKind.Event, - ReflectionKind.Property, - ReflectionKind.Variable, - ReflectionKind.Accessor, - ReflectionKind.Method, - ReflectionKind.ObjectLiteral, - - ReflectionKind.Parameter, - ReflectionKind.TypeParameter, - ReflectionKind.TypeLiteral, - ReflectionKind.CallSignature, - ReflectionKind.ConstructorSignature, - ReflectionKind.IndexSignature, - ReflectionKind.GetSignature, - ReflectionKind.SetSignature, - ]; - - /** - * Define the singular name of individual reflection kinds. - */ - static SINGULARS = (function () { - const singulars = {}; - singulars[ReflectionKind.Enum] = 'Enumeration'; - singulars[ReflectionKind.EnumMember] = 'Enumeration member'; - return singulars; - })(); - - /** - * Define the plural name of individual reflection kinds. - */ - static PLURALS = (function () { - const plurals = {}; - plurals[ReflectionKind.Class] = 'Classes'; - plurals[ReflectionKind.Property] = 'Properties'; - plurals[ReflectionKind.Enum] = 'Enumerations'; - plurals[ReflectionKind.EnumMember] = 'Enumeration members'; - plurals[ReflectionKind.TypeAlias] = 'Type aliases'; - return plurals; - })(); - - static orderByName = new Map(); - - /** - * Create a new CubejsGroupPlugin instance. - */ - initialize() { - this.listenTo( - this.owner, - { - [Converter.EVENT_RESOLVE]: this.onResolve, - [Converter.EVENT_RESOLVE_END]: this.onEndResolve, - }, - null, - 1 - ); - } - - private populateOrder(children: Reflection[] = []) { - const MAGIC = 100_000; - - function findOrderAndRemove(comment?: Comment) { - const orderTag = (comment?.tags || []).find((tag) => tag.tagName === 'order'); - - if (orderTag) { - comment.removeTags('order'); - // CommentPlugin.removeTags(comment, 'order'); - return parseInt(orderTag.text, 10) - MAGIC; - } - } - - function getOrder(reflection: Reflection) { - if (reflection.hasComment()) { - return findOrderAndRemove(reflection.comment); - } else if (reflection instanceof DeclarationReflection) { - return findOrderAndRemove(reflection.signatures?.[0]?.comment); - } - - return 0; - } - - children.forEach((reflection) => { - if (!CubejsGroupPlugin.orderByName.has(reflection.name)) { - CubejsGroupPlugin.orderByName.set(reflection.name, getOrder(reflection) || 0); - } - }); - } - - private onResolve(context: Context, reflection: ContainerReflection) { - reflection.kindString = CubejsGroupPlugin.getKindSingular(reflection.kind); - - if (reflection.children && reflection.children.length > 0) { - this.populateOrder(reflection.children); - reflection.children.sort(CubejsGroupPlugin.sortCallback); - reflection.groups = CubejsGroupPlugin.getReflectionGroups(reflection.children); - } - } - - /** - * Triggered when the converter has finished resolving a project. - * - * @param context The context object describing the current state the converter is in. - */ - private onEndResolve(context: Context) { - function walkDirectory(directory: SourceDirectory) { - directory.groups = CubejsGroupPlugin.getReflectionGroups(directory.getAllReflections()); - - for (const key in directory.directories) { - if (!directory.directories.hasOwnProperty(key)) { - continue; - } - walkDirectory(directory.directories[key]); - } - } - - const project = context.project; - if (project.children && project.children.length > 0) { - this.populateOrder(project.children); - project.children.sort(CubejsGroupPlugin.sortCallback); - project.groups = CubejsGroupPlugin.getReflectionGroups(project.children); - } - - walkDirectory(project.directory); - project.files.forEach((file) => { - file.groups = CubejsGroupPlugin.getReflectionGroups(file.reflections); - }); - } - - private static getStickyTypes(reflection: DeclarationReflection): string[] { - const typeNames = []; - let comment: Comment; - - if (reflection.comment?.getTag(STICKY_TAG_NAME) != null) { - comment = reflection.comment; - } - - if (!comment) { - reflection.signatures?.some((sig) => { - if (sig.comment?.getTag(STICKY_TAG_NAME) != null) { - comment = sig.comment; - return true; - } - return false; - }); - } - - if (comment) { - const { text } = comment.getTag(STICKY_TAG_NAME); - comment.removeTags(STICKY_TAG_NAME); - // CommentPlugin.removeTags(comment, STICKY_TAG_NAME); - - if (text.trim()) { - return text.split(',').map((name) => name.trim()); - } - - reflection.signatures?.forEach((sig) => { - // Parameter types - sig.parameters?.forEach((param) => { - if (param.type instanceof ReferenceType) { - typeNames.push(param.type.name); - } - }); - - // Return type - if (sig.type && sig.type instanceof ReferenceType) { - typeNames.push(sig.type.name); - } - }); - - reflection.extendedTypes?.forEach((type: ReferenceType) => { - type.typeArguments?.forEach((typeArgument: any) => { - typeArgument.name && typeNames.push(typeArgument.name); - }); - }); - } - - return typeNames; - } - - /** - * Create a grouped representation of the given list of reflections. - * - * Reflections are grouped by kind and sorted by weight and name. - * - * @param reflections The reflections that should be grouped. - * @returns An array containing all children of the given reflection grouped by their kind. - */ - static getReflectionGroups(reflections: Reflection[]): ReflectionGroup[] { - const groups = new Map(); - const handledReflections = new Set(); - const reflectionByName = new Map(); - - reflections.forEach((child) => reflectionByName.set(child.name, child)); - - reflections.forEach((child) => { - if (handledReflections.has(child.name)) { - return; - } - - let typeNames = []; - if (child instanceof DeclarationReflection) { - typeNames = CubejsGroupPlugin.getStickyTypes(child); - } - - if (!groups.has(child.kind)) { - groups.set(child.kind, new ReflectionGroup(CubejsGroupPlugin.getKindPlural(child.kind), child.kind)); - } - - groups.get(child.kind).children.push(child); - - typeNames.forEach((name) => { - if (reflectionByName.has(name)) { - (reflectionByName.get(name) as any).stickToParent = child.name; - groups.get(child.kind).children.push(reflectionByName.get(name)); - handledReflections.add(name); - } - }); - }); - - return [...groups.values()]; - } - - /** - * Transform the internal typescript kind identifier into a human readable version. - * - * @param kind The original typescript kind identifier. - * @returns A human readable version of the given typescript kind identifier. - */ - private static getKindString(kind: ReflectionKind): string { - let str = ReflectionKind[kind]; - str = str.replace(/(.)([A-Z])/g, (m, a, b) => a + ' ' + b.toLowerCase()); - return str; - } - - /** - * Return the singular name of a internal typescript kind identifier. - * - * @param kind The original internal typescript kind identifier. - * @returns The singular name of the given internal typescript kind identifier - */ - static getKindSingular(kind: ReflectionKind): string { - if (CubejsGroupPlugin.SINGULARS[kind]) { - return CubejsGroupPlugin.SINGULARS[kind]; - } else { - return CubejsGroupPlugin.getKindString(kind); - } - } - - /** - * Return the plural name of a internal typescript kind identifier. - * - * @param kind The original internal typescript kind identifier. - * @returns The plural name of the given internal typescript kind identifier - */ - static getKindPlural(kind: ReflectionKind): string { - if (CubejsGroupPlugin.PLURALS[kind]) { - return CubejsGroupPlugin.PLURALS[kind]; - } else { - return this.getKindString(kind) + 's'; - } - } - - /** - * Callback used to sort reflections by weight defined by ´CubejsGroupPlugin.WEIGHTS´ and name. - * - * @param a The left reflection to sort. - * @param b The right reflection to sort. - * @returns The sorting weight. - */ - static sortCallback(a: Reflection, b: Reflection): number { - const aWeight = CubejsGroupPlugin.orderByName.get(a.name) || CubejsGroupPlugin.WEIGHTS.indexOf(a.kind); - const bWeight = CubejsGroupPlugin.orderByName.get(b.name) || CubejsGroupPlugin.WEIGHTS.indexOf(b.kind); - - if (aWeight === bWeight) { - if (a.flags.isStatic && !b.flags.isStatic) { - return 1; - } - if (!a.flags.isStatic && b.flags.isStatic) { - return -1; - } - if (a.name === b.name) { - return 0; - } - return a.name > b.name ? 1 : -1; - } else { - return aWeight - bWeight; - } - } -} diff --git a/docs-gen/src/plugins/LinkPlugin.ts b/docs-gen/src/plugins/LinkPlugin.ts deleted file mode 100644 index 1b9bb144e3a9e..0000000000000 --- a/docs-gen/src/plugins/LinkPlugin.ts +++ /dev/null @@ -1,68 +0,0 @@ -import { camelize, dasherize, underscore } from 'inflection'; -import { DeclarationReflection, Reflection, ReflectionKind, ParameterReflection, SignatureReflection } from 'typedoc'; -import { Context, Converter } from 'typedoc/dist/lib/converter'; -import { ConverterComponent } from 'typedoc/dist/lib/converter/components'; -import { Comment } from 'typedoc/dist/lib/models/comments'; -import { Component } from 'typedoc/dist/lib/utils'; - -const linkRegex = /{@see\s([^}]*)}/g; - -@Component({ name: 'link' }) -export class LinkPlugin extends ConverterComponent { - static anchorName(link) { - return ( - '#' + - dasherize(underscore(link.replace(/[A-Z]{2,}(?=[A-Z])/, (v) => camelize(v.toLowerCase())).replace(/#/g, '-'))) - ); - } - - static toLink(name, reflection: Reflection | string) { - let link = name; - - if (reflection instanceof Reflection) { - if (reflection.kindOf(ReflectionKind.TypeAlias) && !(reflection as any).stickToParent) { - link = `Types${name}`; - } - if ((reflection as any).stickToParent) { - link = (reflection as any).stickToParent + name; - } - } - - return `[${name}](${LinkPlugin.anchorName(link)})`; - } - - private static replaceAnnotations(comment: Comment, reflections: Reflection[]) { - const replacer = (_, name) => { - const reflection = reflections.find((reflection) => reflection.name === name); - return this.toLink(name, reflection); - } - comment.text = comment.text.replace(linkRegex, replacer); - comment.shortText = comment.shortText.replace(linkRegex, replacer); - } - - initialize() { - this.listenTo(this.owner, { - [Converter.EVENT_RESOLVE_END]: this.onEndResolve, - }); - } - - onEndResolve(context: Context) { - const reflections = Object.values(context.project.reflections); - - reflections.forEach((reflection) => { - reflection.comment && LinkPlugin.replaceAnnotations(reflection.comment, reflections); - - if (reflection instanceof DeclarationReflection) { - reflection.signatures?.forEach((sig) => { - sig.comment && LinkPlugin.replaceAnnotations(sig.comment, reflections); - }); - } - - if (reflection instanceof SignatureReflection) { - reflection.parameters?.forEach((param) => { - param.comment && LinkPlugin.replaceAnnotations(param.comment, reflections); - }); - } - }); - } -} diff --git a/docs-gen/src/plugins/NoInheritPlugin.ts b/docs-gen/src/plugins/NoInheritPlugin.ts deleted file mode 100644 index 07fb677b6fc90..0000000000000 --- a/docs-gen/src/plugins/NoInheritPlugin.ts +++ /dev/null @@ -1,188 +0,0 @@ -import { Reflection, ReflectionKind, DeclarationReflection } from 'typedoc/dist/lib/models/reflections/index'; -import { Component, ConverterComponent } from 'typedoc/dist/lib/converter/components'; -import { Converter } from 'typedoc/dist/lib/converter/converter'; -import { Context } from 'typedoc/dist/lib/converter/context'; -import { CommentPlugin } from 'typedoc/dist/lib/converter/plugins/CommentPlugin'; -import { Type, ReferenceType } from 'typedoc/dist/lib/models'; - -/** - * A handler that deals with inherited reflections. - */ -@Component({ name: 'no-inherit' }) -export default class NoInheritPlugin extends ConverterComponent { - /** - * A list of classes/interfaces that don't inherit reflections. - */ - private noInherit: DeclarationReflection[]; - - /** - * A list of reflections that are inherited from a super. - */ - private inheritedReflections: DeclarationReflection[]; - - /** - * Create a new CommentPlugin instance. - */ - initialize() { - this.listenTo(this.owner, Converter.EVENT_BEGIN, this.onBegin); - this.listenTo(this.owner, Converter.EVENT_CREATE_DECLARATION, this.onDeclaration, -100); // after CommentPlugin - this.listenTo(this.owner, Converter.EVENT_RESOLVE_BEGIN, this.onBeginResolve); - } - - /** - * Triggered when the converter begins converting a project. - * - * @param context The context object describing the current state the converter is in. - */ - private onBegin(context: Context) { - this.noInherit = []; - this.inheritedReflections = []; - } - - /** - * Triggered when the converter has created a declaration or signature reflection. - * - * Builds the list of classes/interfaces that don't inherit docs and - * the list of reflections that are inherited that could end up being removed. - * - * @param context The context object describing the current state the converter is in. - * @param reflection The reflection that is currently processed. - * @param node The node that is currently processed if available. - */ - private onDeclaration(context: Context, reflection: Reflection, node?) { - if (reflection instanceof DeclarationReflection) { - // class or interface that won't inherit docs - if ( - reflection.kindOf(ReflectionKind.ClassOrInterface) && - reflection.comment && - reflection.comment.hasTag('noinheritdoc') - ) { - this.noInherit.push(reflection); - reflection.comment.removeTags('noinheritdoc') - } - // class or interface member inherited from a super - if ( - reflection.inheritedFrom && - reflection.parent && - reflection.parent.kindOf(ReflectionKind.ClassOrInterface) && - (!reflection.overwrites || (reflection.overwrites && reflection.overwrites !== reflection.inheritedFrom)) - ) { - this.inheritedReflections.push(reflection); - } - } - } - - /** - * Triggered when the converter begins resolving a project. - * - * Goes over the list of inherited reflections and removes any that are down the hierarchy - * from a class that doesn't inherit docs. - * - * @param context The context object describing the current state the converter is in. - */ - private onBeginResolve(context: Context) { - if (this.noInherit) { - const project = context.project; - const removals: Reflection[] = []; - - this.inheritedReflections.forEach((reflection) => { - // Look through the inheritance chain for a reflection that is flagged as noInherit for this reflection - if (this.isNoInheritRecursive(context, reflection, 0)) { - removals.push(reflection); - } - }); - - removals.forEach((removal) => { - project.removeReflection(removal); - }); - } - } - - /** - * Checks whether some DeclarationReflection is in the noInherit list. - * @param search The DeclarationReflection to search for in the list. - */ - private isNoInherit(search: DeclarationReflection): boolean { - if (this.noInherit.find((no: DeclarationReflection) => no.id === search.id && no.name === search.name)) { - return true; - } - return false; - } - - /** - * Checks whether some Reflection is in the inheritedReflections list. - * @param search The Reflection to search for in the list. - */ - private isInherited(search: Reflection): boolean { - if (this.inheritedReflections.find((inh: Reflection) => inh.id === search.id && inh.name === search.name)) { - return true; - } - return false; - } - - /** - * Checks whether some reflection's inheritance chain is broken by a class or interface that doesn't inherit docs. - * @param context The context object describing the current state the converter is in. - * @param current The current reflection being evaluated for non-inheritance. - * @param depth The current recursion depth, used for stopping on excessively long inheritance chains. - */ - private isNoInheritRecursive(context: Context, current: Reflection, depth: number): boolean { - if (depth > 20) { - this.application.logger.warn( - `Found inheritance chain with depth > 20, stopping no inherit check: ${current.getFullName()}` - ); - return false; // stop if we've recursed more than 20 times - } - - // As we move up the chain, check if the reflection parent is in the noInherit list - const parent = current.parent as DeclarationReflection; - if (!parent) return false; - if (this.isNoInherit(parent) && (depth === 0 || this.isInherited(current))) { - return true; - } - - const checkExtended = (type: Type) => { - const extended = this.resolveType(context, parent, type); - if (extended instanceof Reflection) { - const upLevel = extended.getChildByName(current.name); - if (upLevel && this.isNoInheritRecursive(context, upLevel, depth + 1)) { - return true; - } - } - return false; - }; - - if (parent.extendedTypes) { - if (parent.extendedTypes.some(checkExtended)) { - return true; - } - } - - return false; - } - - /** - * Takes some ReferenceType and resolves it to a reflection. - * This is needed because we are operating prior to the TypePlugin resolving types. - * @param context The context object describing the current state the converter is in. - * @param reflection The reflection context. - * @param typeProp The type to find relative to the reflection. - */ - private resolveType(context: Context, reflection: Reflection, typeProp: Type): Reflection { - const project = context.project; - if (typeProp instanceof ReferenceType) { - const type: any = typeProp; - // @ts-ignore - if (type.symbolID === ReferenceType.SYMBOL_ID_RESOLVE_BY_NAME) { - return reflection.findReflectionByName(type.name); - // @ts-ignore - } else if (!type.reflection && type.symbolID !== ReferenceType.SYMBOL_ID_RESOLVED) { - // @ts-ignore - return project.reflections[project.symbolMapping[type.symbolID]]; - } else { - return type.reflection; - } - } - return null; - } -} diff --git a/docs-gen/src/resources/helpers/__snapshots__/_helpers.spec.ts.snap b/docs-gen/src/resources/helpers/__snapshots__/_helpers.spec.ts.snap deleted file mode 100644 index 3debe740cde71..0000000000000 --- a/docs-gen/src/resources/helpers/__snapshots__/_helpers.spec.ts.snap +++ /dev/null @@ -1,60 +0,0 @@ -// Jest Snapshot v1, https://goo.gl/fbAQLP - -exports[`Helpers declarationTitle helper should compi;e 1`] = `"**color**: *string* = \\"blue\\""`; - -exports[`Helpers literal helper should compile object literal 1`] = ` -"!spaces* **valueA**: *number* = 100 - -!spaces* **valueB**: *boolean* = true - -!spaces* **valueZ**: *string* = \\"foo\\" - -!spaces* **valueY**(): *string* - - -!spaces* **valueX**: *object* - -!spaces * **valueA**: *number[]* = [100, 200, 300] - -!spaces * **valueZ**: *string* = \\"foo\\" - -!spaces * **valueY**(\`z\`: string): *object* - - -!spaces * **a**: *string* = \\"test\\" - -!spaces * **b**: *string* = z - - -" -`; - -exports[`Helpers literal helper should compile type literal 1`] = ` -" -" -`; - -exports[`Helpers parameterNameAndType helper sould compile 1`] = `"▪\`Const\` **objectLiteral**: *object*"`; - -exports[`Helpers parameterTable helper should compile 1`] = ` -" -Name | Type | Default | Description | ------- | ------ | ------ | ------ | -\`valueA\` | string | \\"defaultValue\\" | A parameter with a default string value. | -\`valueB\` | number | 100 | A parameter with a default numeric value. | -\`valueC\` | number | Number.NaN | A parameter with a default NaN value. | -\`valueD\` | boolean | true | A parameter with a default boolean value. | -\`valueE\` | boolean | null | A parameter with a default null value. | -" -`; - -exports[`Helpers signatureTitle helper should compile 1`] = ` -"**functionWithParameters**(\`paramZ\`: string, \`paramG\`: any, \`paramA\`: Object): *number* -" -`; - -exports[`Helpers type helper should compile intrinsic type 1`] = `"string"`; - -exports[`Helpers typeAndParent helper should compile 1`] = `"[INameInterface](../interfaces/_classes_.inameinterface.md).[name](../interfaces/_classes_.inameinterface.md#name)"`; - -exports[`Helpers utils helpers should compile stripLineBreaks helper: line 1 line2 1`] = `"line 1 line2 "`; diff --git a/docs-gen/src/resources/helpers/_helpers.spec.ts b/docs-gen/src/resources/helpers/_helpers.spec.ts deleted file mode 100644 index 9f2d99e498fe8..0000000000000 --- a/docs-gen/src/resources/helpers/_helpers.spec.ts +++ /dev/null @@ -1,185 +0,0 @@ -import * as fs from 'fs-extra'; -import * as Handlebars from 'handlebars'; -import * as path from 'path'; -import { Application } from 'typedoc'; - -const handlebarsHelpersOptionsStub = { - fn: () => 'true', - inverse: () => 'false', - hash: {}, -}; - -describe(`Helpers`, () => { - let app; - let project: any; - const out = path.join(__dirname, 'tmp'); - - beforeAll(() => { - app = new Application(); - app.bootstrap({ - module: 'CommonJS', - target: 'ES5', - readme: 'none', - theme: 'markdown', - logger: 'none', - plugin: path.join(__dirname, '../../../dist/index'), - }); - project = app.convert(app.expandInputFiles(['./test/stubs/'])); - app.generateDocs(project, out); - }); - - afterAll(() => { - fs.removeSync(out); - }); - - describe(`utils helpers`, () => { - test(`should compile headings helper`, () => { - expect(Handlebars.helpers.heading.call(this, 2)).toEqual('##'); - }); - test(`should compile stripLineBreaks helper`, () => { - const result = Handlebars.helpers.stripLineBreaks.call('line 1\n line2\n'); - expect(result).toMatchSnapshot('line 1 line2'); - }); - test(`should compile spaces helper`, () => { - const result = Handlebars.helpers.spaces.call(this, 3); - expect(result).toEqual('!spaces '); - }); - }); - - describe(`declarationTitle helper`, () => { - test(`should compi;e`, () => { - expect(Handlebars.helpers.declarationTitle.call(project.findReflectionByName('color'))).toMatchSnapshot(); - }); - }); - - describe(`ifHasTypeDeclarations helper`, () => { - test(`should return true if ifHasTypeDeclarations is true and expectation is truthy`, () => { - const result = Handlebars.helpers.ifHasTypeDeclarations.call( - project.findReflectionByName('drawText').signatures[0], - true, - handlebarsHelpersOptionsStub, - ); - expect(result).toEqual('true'); - }); - - test(`should return true if ifHasTypeDeclarations is false and expectation is truthy`, () => { - const data = project.findReflectionByName('exportedFunction'); - const result = Handlebars.helpers.ifHasTypeDeclarations.call( - data.signatures[0], - true, - handlebarsHelpersOptionsStub, - ); - expect(result).toEqual('false'); - }); - - test(`should return true if ifHasTypeDeclarations is false and expectation is falsey`, () => { - const data = project.findReflectionByName('exportedFunction'); - const result = Handlebars.helpers.ifHasTypeDeclarations.call( - data.signatures[0], - false, - handlebarsHelpersOptionsStub, - ); - expect(result).toEqual('true'); - }); - }); - - describe(`ifIsLiteralType helper`, () => { - test(`should return true if isLiteralType is is true and expectation is truthy`, () => { - const data = project.findReflectionByName('objectLiteral'); - const result = Handlebars.helpers.ifIsLiteralType.call(data, true, handlebarsHelpersOptionsStub); - expect(result).toEqual('true'); - }); - - test(`should return false if isLiteralType is is true and expectation is falsey`, () => { - const data = project.findReflectionByName('objectLiteral'); - const result = Handlebars.helpers.ifIsLiteralType.call(data, false, handlebarsHelpersOptionsStub); - expect(result).toEqual('false'); - }); - - test(`should return true if isLiteralType is is false and expectation is falsey`, () => { - const data = project.findReflectionByName('color'); - const result = Handlebars.helpers.ifIsLiteralType.call(data, false, handlebarsHelpersOptionsStub); - expect(result).toEqual('true'); - }); - }); - - describe(`ifParentIsObjectLiteral helper`, () => { - test(`should return true if ifParentIsObjectLiteral is is true and expectation is truthy`, () => { - const data = { - parent: { - parent: { - kind: 2097152, - }, - }, - }; - const result = Handlebars.helpers.ifParentIsObjectLiteral.call(data, true, handlebarsHelpersOptionsStub); - expect(result).toEqual('true'); - }); - - test(`should return false if ifParentIsObjectLiteral is is false and expectation is truthy`, () => { - const data = {}; - const result = Handlebars.helpers.ifParentIsObjectLiteral.call(data, true, handlebarsHelpersOptionsStub); - expect(result).toEqual('false'); - }); - - test(`should return true if ifParentIsObjectLiteral is is false and expectation is falsey`, () => { - const data = {}; - const result = Handlebars.helpers.ifParentIsObjectLiteral.call(data, false, handlebarsHelpersOptionsStub); - expect(result).toEqual('true'); - }); - }); - - describe(`literal helper`, () => { - test(`should compile object literal`, () => { - const data = project.findReflectionByName('objectLiteral'); - const result = Handlebars.helpers.literal.call(data); - expect(result).toMatchSnapshot(); - }); - - test(`should compile type literal`, () => { - const data = project.findReflectionByName('typeLiteral'); - const result = Handlebars.helpers.literal.call(data); - expect(result).toMatchSnapshot(); - }); - }); - - describe(`parameterNameAndType helper`, () => { - test(`sould compile`, () => { - const data = project.findReflectionByName('objectLiteral'); - const result = Handlebars.helpers.parameterNameAndType.call(data); - expect(result).toMatchSnapshot(); - }); - }); - - describe(`parameterTable helper`, () => { - test(`should compile`, () => { - const data = project.findReflectionByName('functionWithDefaults'); - const result = Handlebars.helpers.parameterTable.call(data.signatures[0].parameters); - expect(result).toMatchSnapshot(); - }); - }); - - describe(`signatureTitle helper`, () => { - test(`should compile`, () => { - const data = project.findReflectionByName('functionWithParameters'); - const result = Handlebars.helpers.signatureTitle.call(data.signatures[0]); - expect(result).toMatchSnapshot(); - }); - }); - - describe(`typeAndParent helper`, () => { - test(`should compile`, () => { - const data = project.findReflectionByName('BaseClass'); - const result = Handlebars.helpers.typeAndParent.call(data.children[3].implementationOf); - expect(result).toMatchSnapshot(); - }); - }); - - describe(`type helper`, () => { - test(`should compile intrinsic type`, () => { - const data = project.findReflectionByName('color'); - const result = Handlebars.helpers.type.call(data.type); - expect(result).toMatchSnapshot(); - }); - }); -}); diff --git a/docs-gen/src/resources/helpers/breadcrumbs.ts b/docs-gen/src/resources/helpers/breadcrumbs.ts deleted file mode 100644 index d456a2d83cabd..0000000000000 --- a/docs-gen/src/resources/helpers/breadcrumbs.ts +++ /dev/null @@ -1,7 +0,0 @@ -import { PageEvent } from 'typedoc/dist/lib/output/events'; - -import MarkdownTheme from '../../theme'; - -export function breadcrumbs(this: PageEvent) { - return MarkdownTheme.handlebars.helpers.breadcrumbs.call(this); -} diff --git a/docs-gen/src/resources/helpers/comment.ts b/docs-gen/src/resources/helpers/comment.ts deleted file mode 100644 index a5ac2330a348f..0000000000000 --- a/docs-gen/src/resources/helpers/comment.ts +++ /dev/null @@ -1,5 +0,0 @@ -import MarkdownTheme from '../../theme'; - -export function comment(this: string) { - return MarkdownTheme.handlebars.helpers.comment.call(this); -} diff --git a/docs-gen/src/resources/helpers/declaration-title.ts b/docs-gen/src/resources/helpers/declaration-title.ts deleted file mode 100644 index a65fd0652ab3c..0000000000000 --- a/docs-gen/src/resources/helpers/declaration-title.ts +++ /dev/null @@ -1,58 +0,0 @@ -import { DeclarationReflection, ReflectionKind } from 'typedoc'; -import { ReferenceType } from 'typedoc/dist/lib/models'; -import { memberSymbol } from './member-symbol'; -import { type } from './type'; - -export function declarationTitle(this: DeclarationReflection, showSymbol: boolean) { - if (!this.type && !this.defaultValue && !this.typeHierarchy?.types.length) { - return ''; - } - if (this.type && type.call(this.type).toString() === 'object') { - return ''; - } - - const md = []; - const isOptional = this.flags.map((flag) => flag).includes('Optional'); - - if (showSymbol) { - md.push(`\n${memberSymbol.call(this)}\n`); - } - - md.push(`${this.name}${isOptional ? '? ' : ''}`); - - if (this.typeHierarchy?.types.length) { - if (this.typeHierarchy?.isTarget) { - return ''; - } - const [parent] = this.typeHierarchy.types; - if (parent instanceof ReferenceType) { - const name = parent.reflection === undefined ? parent.symbolFullyQualifiedName : parent.name; - - md.push('extends'); - md.push(name); - - if (parent.typeArguments) { - md.push(`‹${parent.typeArguments.map((typeArgument) => type.call(typeArgument)).join(', ')}›`.trim()); - } - } - } - - // We want to display enum members like: - // • DAY = "day" - if (this.kind !== ReflectionKind.EnumMember) { - md[md.length - 1] += ':'; - } - - if (this.type) { - md.push(type.call(this.type)); - } - if (this.defaultValue) { - md.push(`= ${this.defaultValue}`); - } - - if (showSymbol) { - md.push(`\n${memberSymbol.call(this)}\n`); - } - - return md.join(' '); -} diff --git a/docs-gen/src/resources/helpers/heading.ts b/docs-gen/src/resources/helpers/heading.ts deleted file mode 100644 index 6916dc07ec722..0000000000000 --- a/docs-gen/src/resources/helpers/heading.ts +++ /dev/null @@ -1,3 +0,0 @@ -export function heading(level: number) { - return [...Array(level)].map(() => '#').join(''); -} diff --git a/docs-gen/src/resources/helpers/hierarchy-level.ts b/docs-gen/src/resources/helpers/hierarchy-level.ts deleted file mode 100644 index 4c0ca9a5e90c3..0000000000000 --- a/docs-gen/src/resources/helpers/hierarchy-level.ts +++ /dev/null @@ -1,9 +0,0 @@ -import { DeclarationReflection, ReferenceType } from 'typedoc/dist/lib/models'; - -import { spaces } from './spaces'; - -export function hierachyLevel(this: ReferenceType) { - const reflection = this.reflection as DeclarationReflection; - const symbol = reflection && reflection.extendedTypes ? `${spaces(2)}↳` : '*'; - return symbol; -} diff --git a/docs-gen/src/resources/helpers/if-breadcrumbs.ts b/docs-gen/src/resources/helpers/if-breadcrumbs.ts deleted file mode 100644 index 599a9371e642f..0000000000000 --- a/docs-gen/src/resources/helpers/if-breadcrumbs.ts +++ /dev/null @@ -1,10 +0,0 @@ -import { PageEvent } from 'typedoc/dist/lib/output/events'; - -import MarkdownTheme from '../../theme'; - -export function ifBreadcrumbs(this: PageEvent, options) { - if (MarkdownTheme.isSingleFile) { - return options.inverse(this); - } - return MarkdownTheme.handlebars.helpers.ifBreadcrumbs.call(this, options); -} diff --git a/docs-gen/src/resources/helpers/if-cond.ts b/docs-gen/src/resources/helpers/if-cond.ts deleted file mode 100644 index 1d36ccc01d5c8..0000000000000 --- a/docs-gen/src/resources/helpers/if-cond.ts +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Handlebars if helper with condition. - * - * @param v1 The first value to be compared. - * @param operator The operand to perform on the two given values. - * @param v2 The second value to be compared - * @param options The current handlebars object. - * @param this The current handlebars this. - * @returns {*} - */ -export function ifCond(v1: any, operator: any, v2: any, options: any) { - switch (operator) { - case '==': - return v1 == v2 ? options.fn(this) : options.inverse(this); - case '!=': - return v1 != v2 ? options.fn(this) : options.inverse(this); - case '===': - return v1 === v2 ? options.fn(this) : options.inverse(this); - case '<': - return v1 < v2 ? options.fn(this) : options.inverse(this); - case '<=': - return v1 <= v2 ? options.fn(this) : options.inverse(this); - case '>': - return v1 > v2 ? options.fn(this) : options.inverse(this); - case '>=': - return v1 >= v2 ? options.fn(this) : options.inverse(this); - case '&&': - return v1 && v2 ? options.fn(this) : options.inverse(this); - case '||': - return v1 || v2 ? options.fn(this) : options.inverse(this); - default: - return options.inverse(this); - } -} diff --git a/docs-gen/src/resources/helpers/if-has-type-declarations.ts b/docs-gen/src/resources/helpers/if-has-type-declarations.ts deleted file mode 100644 index 32e82f9a4fa91..0000000000000 --- a/docs-gen/src/resources/helpers/if-has-type-declarations.ts +++ /dev/null @@ -1,21 +0,0 @@ -import { ReflectionType, SignatureReflection } from 'typedoc/dist/lib/models'; - -export function ifHasTypeDeclarations(this: SignatureReflection, truthy: boolean, options: any) { - const parameterDeclarations = - this.parameters && - this.parameters.map(parameter => { - const type = parameter.type as ReflectionType; - return ( - parameter.type && - type.declaration && - ((type.declaration.children && type.declaration.children.length > 0) || - (type.declaration.signatures && type.declaration.signatures.length > 0)) - ); - }); - const hasTypeDeclarations = parameterDeclarations && parameterDeclarations.some(parameterDeclaration => parameterDeclaration); - - if (hasTypeDeclarations && truthy) { - return options.fn(this); - } - return !hasTypeDeclarations && !truthy ? options.fn(this) : options.inverse(this); -} diff --git a/docs-gen/src/resources/helpers/if-indexes.ts b/docs-gen/src/resources/helpers/if-indexes.ts deleted file mode 100644 index 40ca5be5d8200..0000000000000 --- a/docs-gen/src/resources/helpers/if-indexes.ts +++ /dev/null @@ -1,5 +0,0 @@ -import MarkdownTheme from '../../theme'; - -export function ifIndexes(options) { - return MarkdownTheme.handlebars.helpers.ifIndexes.call(this, options); -} diff --git a/docs-gen/src/resources/helpers/if-is-function-type.ts b/docs-gen/src/resources/helpers/if-is-function-type.ts deleted file mode 100644 index 3ab2bed9cdffd..0000000000000 --- a/docs-gen/src/resources/helpers/if-is-function-type.ts +++ /dev/null @@ -1,11 +0,0 @@ -import { DeclarationReflection } from 'typedoc'; -import { type } from './type'; - -export function ifIsFunctionType(this: DeclarationReflection, truthy: boolean, options: any) { - const isFunctionType = type.call(this).toString() === 'function'; - - if (isFunctionType && truthy) { - return options.fn(this); - } - return !isFunctionType && !truthy ? options.fn(this) : options.inverse(this); -} diff --git a/docs-gen/src/resources/helpers/if-is-literal-type.ts b/docs-gen/src/resources/helpers/if-is-literal-type.ts deleted file mode 100644 index 28cbfeffb7113..0000000000000 --- a/docs-gen/src/resources/helpers/if-is-literal-type.ts +++ /dev/null @@ -1,9 +0,0 @@ -import { DeclarationReflection, ReflectionKind } from 'typedoc'; - -export function ifIsLiteralType(this: DeclarationReflection, truthy: boolean, options: any) { - const isLiteralType = this.kind === ReflectionKind.ObjectLiteral || this.kind === ReflectionKind.TypeLiteral; - if (isLiteralType && truthy) { - return options.fn(this); - } - return !isLiteralType && !truthy ? options.fn(this) : options.inverse(this); -} diff --git a/docs-gen/src/resources/helpers/if-named-anchors.ts b/docs-gen/src/resources/helpers/if-named-anchors.ts deleted file mode 100644 index ca90cab321838..0000000000000 --- a/docs-gen/src/resources/helpers/if-named-anchors.ts +++ /dev/null @@ -1,5 +0,0 @@ -import MarkdownTheme from '../../theme'; - -export function ifNamedAnchors(options) { - return MarkdownTheme.handlebars.helpers.ifNamedAnchors.call(this, options); -} diff --git a/docs-gen/src/resources/helpers/if-parent-is-module.ts b/docs-gen/src/resources/helpers/if-parent-is-module.ts deleted file mode 100644 index 9bb810c1789dc..0000000000000 --- a/docs-gen/src/resources/helpers/if-parent-is-module.ts +++ /dev/null @@ -1,9 +0,0 @@ -import { DeclarationReflection, ReflectionKind } from 'typedoc'; - -export function ifParentIsModule(this: DeclarationReflection, truthy: boolean, options: any) { - const parentIsModule = this.parent && this.parent.kind === ReflectionKind.Module; - if (parentIsModule && truthy) { - return options.fn(this); - } - return !parentIsModule && !truthy ? options.fn(this) : options.inverse(this); -} diff --git a/docs-gen/src/resources/helpers/if-parent-is-object-literal.ts b/docs-gen/src/resources/helpers/if-parent-is-object-literal.ts deleted file mode 100644 index 35ef7b7e550d1..0000000000000 --- a/docs-gen/src/resources/helpers/if-parent-is-object-literal.ts +++ /dev/null @@ -1,9 +0,0 @@ -import { DeclarationReflection, ReflectionKind } from 'typedoc'; - -export function ifParentIsObjectLiteral(this: DeclarationReflection, truthy: boolean, options: any) { - const parentIsObjectLiteral = this.parent && this.parent.parent && this.parent.parent.kind === ReflectionKind.ObjectLiteral; - if (parentIsObjectLiteral && truthy) { - return options.fn(this); - } - return !parentIsObjectLiteral && !truthy ? options.fn(this) : options.inverse(this); -} diff --git a/docs-gen/src/resources/helpers/if-parent-kind-is.ts b/docs-gen/src/resources/helpers/if-parent-kind-is.ts deleted file mode 100644 index 021e7a5c6b55c..0000000000000 --- a/docs-gen/src/resources/helpers/if-parent-kind-is.ts +++ /dev/null @@ -1,7 +0,0 @@ -import { DeclarationReflection } from 'typedoc'; - -export function ifParentKindIs(this: DeclarationReflection, kindString: string, truthy: boolean = true, options: any) { - const equals = this.parent && this.parent.kindString === kindString; - - return !equals && !truthy ? options.fn(this) : options.inverse(this); -} diff --git a/docs-gen/src/resources/helpers/if-sources.ts b/docs-gen/src/resources/helpers/if-sources.ts deleted file mode 100644 index 19a5ba9f11ce4..0000000000000 --- a/docs-gen/src/resources/helpers/if-sources.ts +++ /dev/null @@ -1,5 +0,0 @@ -import MarkdownTheme from '../../theme'; - -export function ifSources(options) { - return MarkdownTheme.handlebars.helpers.ifSources.call(this, options); -} diff --git a/docs-gen/src/resources/helpers/literal.ts b/docs-gen/src/resources/helpers/literal.ts deleted file mode 100644 index 56e7863537868..0000000000000 --- a/docs-gen/src/resources/helpers/literal.ts +++ /dev/null @@ -1,53 +0,0 @@ -import { DeclarationReflection } from 'typedoc'; -import { ReflectionKind, ReflectionType } from 'typedoc/dist/lib/models'; -import { declarationTitle } from './declaration-title'; -import { signatureTitle } from './signature-title'; -import { spaces } from './spaces'; - -export function literal(this: DeclarationReflection) { - const md = []; - - if (this.children) { - this.children.forEach(child => { - md.push(objectProperty(md, 0, child)); - }); - } - return md.join('') + '\n'; -} - -function objectProperty(md: any[], spaceLength: number, property: DeclarationReflection) { - if (property.type instanceof ReflectionType) { - md.push(`${spaces(spaceLength)}* ${signatureTitle.call(property, false)}\n\n`); - if (property.type.declaration) { - md.push(objectProperty(md, spaceLength + 2, property.type.declaration)); - } - if (property.type.declaration && property.type.declaration.signatures) { - property.type.declaration.signatures.forEach(signature => { - if (signature.kind !== ReflectionKind.CallSignature) { - md.push(`${spaces(spaceLength)}* ${signatureTitle.call(signature, false)}\n\n`); - if (signature.type instanceof ReflectionType) { - md.push(objectProperty(md, spaceLength + 2, signature.type.declaration)); - } - } - }); - } - } else { - if (property.signatures) { - property.signatures.forEach(signature => { - md.push(`${spaces(spaceLength)}* ${signatureTitle.call(signature, false)}\n\n`); - if (signature.type instanceof ReflectionType) { - md.push(objectProperty(md, spaceLength + 2, signature.type.declaration)); - } - }); - } else { - if (property.kind !== ReflectionKind.TypeLiteral) { - md.push(`${spaces(spaceLength)}* ${declarationTitle.call(property, false)}\n\n`); - } - } - } - if (property.children) { - property.children.forEach(child => { - md.push(objectProperty(md, property.kind === ReflectionKind.TypeLiteral ? spaceLength : spaceLength + 2, child)); - }); - } -} diff --git a/docs-gen/src/resources/helpers/member-symbol.ts b/docs-gen/src/resources/helpers/member-symbol.ts deleted file mode 100644 index bcc152442394c..0000000000000 --- a/docs-gen/src/resources/helpers/member-symbol.ts +++ /dev/null @@ -1,7 +0,0 @@ -import { - DeclarationReflection, -} from 'typedoc'; - -export function memberSymbol(this: DeclarationReflection) { - return '```'; -} diff --git a/docs-gen/src/resources/helpers/member-title.ts b/docs-gen/src/resources/helpers/member-title.ts deleted file mode 100644 index 0b491271edb1f..0000000000000 --- a/docs-gen/src/resources/helpers/member-title.ts +++ /dev/null @@ -1,33 +0,0 @@ -import { DeclarationReflection, ReflectionKind } from 'typedoc'; -import { heading } from './heading'; - -export function memberTitle(this: DeclarationReflection) { - if (this.parent?.kindOf(ReflectionKind.Enum)) { - return ''; - } - - const md = []; - let parentName = ''; - let headingLevel = 3; - if (!(this as any).stickToParent) { - if (this.parent?.kindOf(ReflectionKind.Module)) { - headingLevel = 2; - - if (this.kind === ReflectionKind.TypeAlias) { - // headingLevel = 4; - headingLevel = 3; - } - } - - const isParentTopLevel = this.parent.kind === 1; - const isHeadingLevel3 = headingLevel === 3; - parentName = isParentTopLevel - ? (isHeadingLevel3 ? 'Types' : '') - : this.parent.name.replace(/"/, ''); - } - - - md.push(heading(headingLevel)); - md.push(this.name); - return md.join(' '); -} diff --git a/docs-gen/src/resources/helpers/meta.ts b/docs-gen/src/resources/helpers/meta.ts deleted file mode 100644 index 046e1f3eafb39..0000000000000 --- a/docs-gen/src/resources/helpers/meta.ts +++ /dev/null @@ -1,44 +0,0 @@ -import { ProjectReflection } from 'typedoc'; -import { CommentTag, ContainerReflection } from 'typedoc/dist/lib/models'; - -export function meta(this: ProjectReflection) { - function findModuleRelection(reflection?: ContainerReflection) { - if (!reflection) { - return null; - } - - if (reflection?.comment) { - return reflection; - } - - return findModuleRelection(reflection?.children?.[0]); - } - - function tagConverter(tag: string) { - const tags = { - menucategory: 'category', - subcategory: 'subCategory', - menuorder: 'menuOrder' - }; - - return tags[tag] ?? tag; - } - - const moduleReflection = findModuleRelection(this); - - if (moduleReflection) { - const { comment } = moduleReflection; - const title = (comment?.tags || []).find((tag: CommentTag) => tag.tagName === 'title'); - const md = [`# ${title.text}`]; - const description = (comment?.tags || []).find((tag: CommentTag) => tag.tagName === 'description'); - - if (description) { - md.push(''); - md.push(description.text); - } - - return md.join('\n'); - } - - return ''; -} diff --git a/docs-gen/src/resources/helpers/new-line.ts b/docs-gen/src/resources/helpers/new-line.ts deleted file mode 100644 index ff762e35d080f..0000000000000 --- a/docs-gen/src/resources/helpers/new-line.ts +++ /dev/null @@ -1,3 +0,0 @@ -export function newline() { - return '\n'; -} diff --git a/docs-gen/src/resources/helpers/param-type-to-string.ts b/docs-gen/src/resources/helpers/param-type-to-string.ts deleted file mode 100644 index 80d0b7330ad47..0000000000000 --- a/docs-gen/src/resources/helpers/param-type-to-string.ts +++ /dev/null @@ -1,28 +0,0 @@ -import { ParameterReflection } from 'typedoc'; -import { ReflectionType, UnionType } from 'typedoc/dist/lib/models'; - -import { signatureTitle } from './signature-title'; -import { type } from './type'; - -export default function paramTypeToString(parameter: ParameterReflection) { - let typeOut; - - if (parameter.type instanceof ReflectionType && parameter.type.toString() === 'function') { - const declarations = parameter.type.declaration.signatures?.map((sig) => signatureTitle.call(sig, false, true)); - typeOut = declarations.join(' | ').replace(/\n/, ''); - } else if (parameter.type instanceof UnionType) { - typeOut = parameter.type.types - .map((currentType) => { - if (currentType instanceof ReflectionType) { - const declarations = currentType.declaration.signatures?.map((sig) => signatureTitle.call(sig, false, true)); - return declarations.join(' | ').replace(/\n/, ''); - } - return type.call(currentType); - }) - .join(' | '); - } else { - typeOut = type.call(parameter.type); - } - - return typeOut; -} diff --git a/docs-gen/src/resources/helpers/parameter-name-and-type.ts b/docs-gen/src/resources/helpers/parameter-name-and-type.ts deleted file mode 100644 index 042db2f40a9f9..0000000000000 --- a/docs-gen/src/resources/helpers/parameter-name-and-type.ts +++ /dev/null @@ -1,20 +0,0 @@ -import { ParameterReflection } from 'typedoc'; -import { type } from './type'; - -export function parameterNameAndType(this: ParameterReflection, displaySymbol = true) { - const md = []; - if (displaySymbol) { - md.push('-'); - } - if (this.flags && !this.flags.isRest) { - md.push(this.flags.map(flag => `\`${flag}\` `)); - } - md.push(`${this.flags.isRest ? '...' : ''} **${this.name}**`); - if (this.type) { - md.push(`: \`${type.call(this.type)}\``); - } - if (this.defaultValue) { - md.push(`= ${this.defaultValue}`); - } - return md.join(''); -} diff --git a/docs-gen/src/resources/helpers/parameter-table-jsx.ts b/docs-gen/src/resources/helpers/parameter-table-jsx.ts deleted file mode 100644 index ece933f8abf15..0000000000000 --- a/docs-gen/src/resources/helpers/parameter-table-jsx.ts +++ /dev/null @@ -1,73 +0,0 @@ -import { ParameterReflection } from 'typedoc'; - -import paramTypeToString from './param-type-to-string'; - -const printJson = (input) => JSON.stringify(input, null, 2); - -export function parameterTableJsx(this: ParameterReflection[], hideUncommented: boolean) { - - const defaultValues = this.map((param) => !!param.defaultValue); - const hasDefaultValues = !defaultValues.every((value) => !value); - - const comments = this.map( - (param) => (param.comment && !!param.comment.text) || (param.comment && !!param.comment.shortText) - ); - const hasComments = !comments.every((value) => !value); - - const columns = [ - 'Name', - 'Type', - ]; - - if (hasDefaultValues) { - columns.push('Default'); - } - - if (hasComments) { - columns.push('Description'); - } - - if (hideUncommented && !hasComments) { - return ''; - } - - const data = this.map((parameter) => { - const isOptional = parameter.flags.includes('Optional'); - - const paramName = `${parameter.flags.isRest ? '...' : ''}${parameter.name}${isOptional ? '?' : ''}`; - const typeOut = paramTypeToString(parameter); - const paramType = typeOut - ? typeOut.toString() - : ''; - const commentsText = []; - - if (hasComments) { - if (parameter.comment && parameter.comment.shortText) { - commentsText.push( - parameter.comment.shortText - ); - } - if (parameter.comment && parameter.comment.text) { - parameter.comment.text - } - } - - return { - Name: paramName, - Type: paramType, - Default: parameter.defaultValue ? parameter.defaultValue : '-', - Description: commentsText.join(''), - }; - }); - - return ` - - -`; -} diff --git a/docs-gen/src/resources/helpers/parameter-table.ts b/docs-gen/src/resources/helpers/parameter-table.ts deleted file mode 100644 index 85aeb9e52cbb3..0000000000000 --- a/docs-gen/src/resources/helpers/parameter-table.ts +++ /dev/null @@ -1,80 +0,0 @@ -import { ParameterReflection } from 'typedoc'; - -import MarkdownTheme from '../../theme'; -import { stripLineBreaks } from './strip-line-breaks'; -import paramTypeToString from './param-type-to-string'; - -const escape = (s: string) => { - const lookup = { - '&': '&', - '"': '"', - '\'': ''', - '<': '<', - '>': '>', - '|': '|' - }; - const regex = new RegExp(`[${Object.keys(lookup).join('')}]`, 'g'); - return s.replace( regex, c => lookup[c] ); -} - -const wrapInCodeTags = (s) => `${s}` - -export function parameterTable(this: ParameterReflection[], hideUncommented: boolean) { - const md = []; - const defaultValues = this.map((param) => !!param.defaultValue); - const hasDefaultValues = !defaultValues.every((value) => !value); - - const comments = this.map( - (param) => (param.comment && !!param.comment.text) || (param.comment && !!param.comment.shortText) - ); - const hasComments = !comments.every((value) => !value); - - const headers = ['Name', 'Type']; - - if (hasDefaultValues) { - headers.push('Default'); - } - - if (hasComments) { - headers.push('Description'); - } - - if (hideUncommented && !hasComments) { - return ''; - } - - if (hideUncommented) { - md.push('**Parameters:**\n'); - } - - const rows = this.map((parameter) => { - const isOptional = parameter.flags.includes('Optional'); - - const typeOut = paramTypeToString(parameter); - - const row = [ - wrapInCodeTags(`${parameter.flags.isRest ? '...' : ''}${parameter.name}${isOptional ? '?' : ''}`), - typeOut ? wrapInCodeTags(escape(typeOut.toString())) : '', - ]; - if (hasDefaultValues) { - row.push(parameter.defaultValue ? parameter.defaultValue : '-'); - } - if (hasComments) { - const commentsText = []; - if (parameter.comment && parameter.comment.shortText) { - commentsText.push( - MarkdownTheme.handlebars.helpers.comment.call(stripLineBreaks.call(parameter.comment.shortText)) - ); - } - if (parameter.comment && parameter.comment.text) { - commentsText.push(MarkdownTheme.handlebars.helpers.comment.call(stripLineBreaks.call(parameter.comment.text))); - } - row.push(commentsText.length > 0 ? commentsText.join(' ') : '-'); - } - return `| ${row.join(' | ')} |\n`; - }); - - md.push(`\n| ${headers.join(' | ')} |\n| ${headers.map(() => '------').join(' | ')} |\n${rows.join('')}`); - - return md.join(''); -} diff --git a/docs-gen/src/resources/helpers/reflection-title.ts b/docs-gen/src/resources/helpers/reflection-title.ts deleted file mode 100644 index 5893f23cef905..0000000000000 --- a/docs-gen/src/resources/helpers/reflection-title.ts +++ /dev/null @@ -1,14 +0,0 @@ -import { PageEvent } from 'typedoc/dist/lib/output/events'; - -export function reflectionTitle(this: PageEvent) { - const title = []; - if (this.model.kindString) { - title.push(`${this.model.kindString}:`); - } - title.push(this.model.name); - if (this.model.typeParameters) { - const typeParameters = this.model.typeParameters.map((typeParameter) => typeParameter.name).join(', '); - title.push(`‹**${typeParameters}**›`); - } - return title.join(' '); -} diff --git a/docs-gen/src/resources/helpers/relative-url.ts b/docs-gen/src/resources/helpers/relative-url.ts deleted file mode 100644 index 8d8d4b4a28623..0000000000000 --- a/docs-gen/src/resources/helpers/relative-url.ts +++ /dev/null @@ -1,5 +0,0 @@ -import MarkdownTheme from '../../theme'; - -export function relativeURL(url: string) { - return MarkdownTheme.handlebars.helpers.relativeURL(url); -} diff --git a/docs-gen/src/resources/helpers/signature-title.ts b/docs-gen/src/resources/helpers/signature-title.ts deleted file mode 100644 index 5e5fff0a61aa9..0000000000000 --- a/docs-gen/src/resources/helpers/signature-title.ts +++ /dev/null @@ -1,70 +0,0 @@ -import { SignatureReflection } from 'typedoc'; - -import { memberSymbol } from './member-symbol'; -import { type } from './type'; -import { ReflectionType } from 'typedoc/dist/lib/models'; -import paramTypeToString from './param-type-to-string'; - -export function signatureTitle(this: SignatureReflection, showSymbol: boolean = false) { - const md = []; - - if (showSymbol) { - md.push(`\n${memberSymbol.call(this)}typescript\n`); - } - - // eg: `static` - if (this.parent?.flags) { - md.push( - this.parent.flags - .map((flag) => `${flag} `) - .join(' ') - .toLowerCase() - ); - // md.push(' '); - } - - if (this.name === '__get') { - md.push(`**get ${this.parent.name}**`); - } else if (this.name === '__set') { - md.push(`**set ${this.parent.name}**`); - } else if (this.name !== '__call') { - md.push(this.name); - } - if (this.typeParameters) { - md.push(`<${this.typeParameters.map((typeParameter) => typeParameter.name).join(', ')}>`); - } - const params = this.parameters - ? this.parameters - .map((param) => { - const paramsmd = []; - if (param.flags.isRest) { - paramsmd.push('...'); - } - paramsmd.push(`${param.name}`); - if (param.flags.isOptional) { - paramsmd.push('?'); - } - paramsmd.push(`: ${paramTypeToString(param)}`); - return paramsmd.join(''); - }) - .join(', ') - : ''; - md.push(`(${params})`); - - if (this.type) { - md.push(!showSymbol ? ' =>' : ':'); - - if (this.type instanceof ReflectionType && type.call(this.type).toString() === 'function') { - const declarations = this.type.declaration.signatures?.map((sig) => signatureTitle.call(sig, false, true)); - md.push(declarations.join(' | ').replace(/\n/, '')); - } else { - md.push(` ${type.call(this.type)}`); - } - } - - if (showSymbol) { - md.push(`\n${memberSymbol.call(this)}\n`); - } - - return md.join('') + '\n'; -} diff --git a/docs-gen/src/resources/helpers/spaces.ts b/docs-gen/src/resources/helpers/spaces.ts deleted file mode 100644 index 414df8f9a0530..0000000000000 --- a/docs-gen/src/resources/helpers/spaces.ts +++ /dev/null @@ -1,3 +0,0 @@ -export function spaces(length: number) { - return `!spaces${[...Array(length)].map(() => ' ').join('')}`; -} diff --git a/docs-gen/src/resources/helpers/strip-line-breaks.ts b/docs-gen/src/resources/helpers/strip-line-breaks.ts deleted file mode 100644 index 23f592c540314..0000000000000 --- a/docs-gen/src/resources/helpers/strip-line-breaks.ts +++ /dev/null @@ -1,3 +0,0 @@ -export function stripLineBreaks(this: string) { - return this.replace(/\n/g, ' '); -} diff --git a/docs-gen/src/resources/helpers/type-and-parent.ts b/docs-gen/src/resources/helpers/type-and-parent.ts deleted file mode 100644 index c75dcfca77326..0000000000000 --- a/docs-gen/src/resources/helpers/type-and-parent.ts +++ /dev/null @@ -1,38 +0,0 @@ -import { SignatureReflection } from 'typedoc'; -import { ArrayType, ReferenceType } from 'typedoc/dist/lib/models/types'; - -import MarkdownTheme from '../../theme'; - -export function typeAndParent(this: ArrayType | ReferenceType) { - if (this instanceof ReferenceType && this.reflection) { - const md = []; - if (this.reflection instanceof SignatureReflection) { - if (this.reflection.parent.parent.url) { - md.push( - `[${this.reflection.parent.parent.name}](${MarkdownTheme.handlebars.helpers.relativeURL( - this.reflection.parent.parent.url, - )})`, - ); - } else { - md.push(this.reflection.parent.parent.name); - } - } else { - if (this.reflection.parent.url) { - md.push( - `[${this.reflection.parent.name}](${MarkdownTheme.handlebars.helpers.relativeURL( - this.reflection.parent.url, - )})`, - ); - } else { - md.push(this.reflection.parent.name); - } - if (this.reflection.url) { - md.push(`[${this.reflection.name}](${MarkdownTheme.handlebars.helpers.relativeURL(this.reflection.url)})`); - } else { - md.push(this.reflection.name); - } - } - return md.join('.'); - } - return 'void'; -} diff --git a/docs-gen/src/resources/helpers/type.ts b/docs-gen/src/resources/helpers/type.ts deleted file mode 100644 index 1c9effda55ced..0000000000000 --- a/docs-gen/src/resources/helpers/type.ts +++ /dev/null @@ -1,95 +0,0 @@ -import { - ArrayType, - IntersectionType, - IntrinsicType, - ReferenceType, - ReflectionType, - StringLiteralType, - TupleType, - TypeOperatorType, - UnionType, -} from 'typedoc/dist/lib/models/types'; - -import { LinkPlugin } from '../../plugins/LinkPlugin'; - -export function type( - this: - | ArrayType - | IntersectionType - | IntrinsicType - | ReferenceType - | StringLiteralType - | TupleType - | UnionType - | TypeOperatorType -) { - if (this instanceof ReferenceType && (this.reflection || (this.name && this.typeArguments))) { - return getReferenceType(this); - } - - if (this instanceof ArrayType && this.elementType) { - return getArrayType(this); - } - - if (this instanceof UnionType && this.types) { - return getUnionType(this); - } - - if (this instanceof IntersectionType && this.types) { - return getIntersectionType(this); - } - - if (this instanceof TupleType && this.elements) { - return getTupleType(this); - } - - if (this instanceof IntrinsicType && this.name) { - return getIntrinsicType(this); - } - - if (this instanceof StringLiteralType && this.value) { - return getStringLiteralType(this); - } - - if (this instanceof TypeOperatorType || this instanceof ReflectionType) { - return this; - } - - return this; -} - -function getReferenceType(model: ReferenceType) { - const md = []; - - md.push(model.name); - - if (model.typeArguments) { - md.push(`<${model.typeArguments.map((typeArgument) => `${type.call(typeArgument)}`).join(', ')}>`); - } - - return md.join(''); -} - -function getArrayType(model: ArrayType) { - return `${type.call(model.elementType)}[]`; -} - -function getUnionType(model: UnionType) { - return model.types.map((unionType) => type.call(unionType)).join(' | '); -} - -function getIntersectionType(model: IntersectionType) { - return model.types.map((intersectionType) => type.call(intersectionType)).join(' & '); -} - -function getTupleType(model: TupleType) { - return `[${model.elements.map((element) => type.call(element)).join(', ')}]`; -} - -function getIntrinsicType(model: IntrinsicType) { - return model.name; -} - -function getStringLiteralType(model: StringLiteralType) { - return `"${model.value}"`; -} diff --git a/docs-gen/src/resources/layouts/default.hbs b/docs-gen/src/resources/layouts/default.hbs deleted file mode 100755 index 4fdf50b11e9a6..0000000000000 --- a/docs-gen/src/resources/layouts/default.hbs +++ /dev/null @@ -1 +0,0 @@ -{{{contents}}} \ No newline at end of file diff --git a/docs-gen/src/resources/partials/comment.hbs b/docs-gen/src/resources/partials/comment.hbs deleted file mode 100755 index a4555b42b7ae1..0000000000000 --- a/docs-gen/src/resources/partials/comment.hbs +++ /dev/null @@ -1,37 +0,0 @@ -{{#with comment}} - -{{#if hasVisibleComponent}} - -{{#if shortText}} - -{{#with shortText}} - -{{{comment}}} - -{{/with}} - -{{/if}} - -{{#if text}} - -{{#with text}} - -{{{comment}}} - -{{/with}} - -{{/if}} - -{{#if tags}} - -{{#each tags}} - -**`{{tagName}}`** {{#if text}}{{#with text}}{{{comment}}}{{/with}}{{/if}} - -{{/each}} - -{{/if}} - -{{/if}} - -{{/with}} \ No newline at end of file diff --git a/docs-gen/src/resources/partials/header.hbs b/docs-gen/src/resources/partials/header.hbs deleted file mode 100644 index 9a2f133bf36bd..0000000000000 --- a/docs-gen/src/resources/partials/header.hbs +++ /dev/null @@ -1,5 +0,0 @@ -{{#ifBreadcrumbs}} -{{{breadcrumbs}}} -{{/ifBreadcrumbs}} - -{{heading 1}} {{{reflectionTitle}}} \ No newline at end of file diff --git a/docs-gen/src/resources/partials/hierarchy.hbs b/docs-gen/src/resources/partials/hierarchy.hbs deleted file mode 100755 index 3e9084b6c49c1..0000000000000 --- a/docs-gen/src/resources/partials/hierarchy.hbs +++ /dev/null @@ -1,23 +0,0 @@ -{{#each types}} - -{{#if ../isTarget}} - -{{hierachyLevel}} **{{this}}** - -{{else}} - -{{hierachyLevel}} {{{type}}} - -{{/if}} - -{{#if @last}} - -{{#with ../next}} - -{{> hierarchy}} - -{{/with}} - -{{/if}} - -{{/each}} \ No newline at end of file diff --git a/docs-gen/src/resources/partials/index.hbs b/docs-gen/src/resources/partials/index.hbs deleted file mode 100755 index 73bb574836a09..0000000000000 --- a/docs-gen/src/resources/partials/index.hbs +++ /dev/null @@ -1,31 +0,0 @@ -{{#if groups}} - -{{heading 2}} Index - -{{#each groups}} - -{{#if categories}} - -{{#each categories}} - -{{heading 3}} {{title}} {{../title}} - -{{#each children}} -* [{{{name}}}]({{relativeURL url}}) -{{/each}} - -{{/each}} - -{{else}} - -{{heading 3}} {{title}} - -{{#each children}} -* [{{{name}}}]({{relativeURL url}}) -{{/each}} - -{{/if}} - -{{/each}} - -{{/if}} \ No newline at end of file diff --git a/docs-gen/src/resources/partials/main.hbs b/docs-gen/src/resources/partials/main.hbs deleted file mode 100644 index 165bc8731494a..0000000000000 --- a/docs-gen/src/resources/partials/main.hbs +++ /dev/null @@ -1,3 +0,0 @@ -{{{meta}}} - -{{> members}} diff --git a/docs-gen/src/resources/partials/member.declaration.hbs b/docs-gen/src/resources/partials/member.declaration.hbs deleted file mode 100755 index 5595a1c766fe8..0000000000000 --- a/docs-gen/src/resources/partials/member.declaration.hbs +++ /dev/null @@ -1,13 +0,0 @@ -{{{declarationTitle true}}} - -{{> comment}} - -{{#if type.declaration}} - -{{#with type.declaration.children}} - -{{{parameterTable false}}} - -{{/with}} - -{{/if}} diff --git a/docs-gen/src/resources/partials/member.getterSetter.hbs b/docs-gen/src/resources/partials/member.getterSetter.hbs deleted file mode 100755 index 86f120171a281..0000000000000 --- a/docs-gen/src/resources/partials/member.getterSetter.hbs +++ /dev/null @@ -1,19 +0,0 @@ -{{#if getSignature}} - -{{#with getSignature}} - -{{> member.signature }} - -{{/with}} - -{{/if}} - -{{#if setSignature}} - -{{#with setSignature}} - -{{> member.signature }} - -{{/with}} - -{{/if}} \ No newline at end of file diff --git a/docs-gen/src/resources/partials/member.hbs b/docs-gen/src/resources/partials/member.hbs deleted file mode 100644 index 853cb29f8892e..0000000000000 --- a/docs-gen/src/resources/partials/member.hbs +++ /dev/null @@ -1,84 +0,0 @@ -{{#ifCond kindString '!=' 'Module'}} -{{#if name}} - - -{{{ memberTitle }}} - -{{!-- {{#ifParentIsModule true}} - -{{heading 2}} {{{ memberTitle }}} - -{{else}} - -{{#ifParentKindIs 'Enumeration' false}} - -{{heading 3}} {{{ memberTitle }}} - -{{/ifParentKindIs}} - -{{/ifParentIsModule}} --}} - -{{/if}} -{{/ifCond}} - -{{#if signatures}} - -{{#each signatures}} - -{{> member.signature }} - -{{/each}} - -{{else}} - -{{#if hasGetterOrSetter}} - -{{> member.getterSetter}} - -{{else}} - -{{#ifCond kindString '!=' 'Module'}} - -{{> member.declaration}} - -{{/ifCond}} - -{{/if}} - -{{/if}} - -{{#each groups}} - -{{#ifCond title '===' 'Type aliases'}} -## Types -{{/ifCond}} - -{{#each children}} - -{{#unless hasOwnDocument}} - -{{#ifIsLiteralType true}} - -{{> member.declaration}} - -{{{literal}}} - -{{/ifIsLiteralType}} - -{{#ifIsLiteralType false}} - -{{> member hideHr=true}} - -{{/ifIsLiteralType}} - -{{/unless}} - -{{/each}} - -{{/each}} - -{{#unless @last}} -{{#unless hideHr}} -___ -{{/unless}} -{{/unless}} \ No newline at end of file diff --git a/docs-gen/src/resources/partials/member.indexSignatures.hbs b/docs-gen/src/resources/partials/member.indexSignatures.hbs deleted file mode 100644 index 10489c8689ade..0000000000000 --- a/docs-gen/src/resources/partials/member.indexSignatures.hbs +++ /dev/null @@ -1,17 +0,0 @@ -* \[{{#each indexSignature.parameters}}{{parameterNameAndType false}}{{/each}}\]: {{#with indexSignature.type}}{{{type}}}{{/with}} - -{{#with indexSignature}} - -{{> comment}} - -{{/with}} - -{{#if indexSignature.type.declaration}} - -{{#with indexSignature.type.declaration}} - -{{> parameter}} - -{{/with}} - -{{/if}} \ No newline at end of file diff --git a/docs-gen/src/resources/partials/member.signature.hbs b/docs-gen/src/resources/partials/member.signature.hbs deleted file mode 100644 index 749c33856195d..0000000000000 --- a/docs-gen/src/resources/partials/member.signature.hbs +++ /dev/null @@ -1,63 +0,0 @@ -{{{signatureTitle true }}} - -{{#unless hideSources}} - -{{> member.sources}} - -{{/unless}} - -{{> comment}} - -{{#if typeParameters}} - -**Type parameters:** - -{{> typeParameters}} - -{{/if}} - -{{#if parameters}} - -{{#with parameters}} - -{{{parameterTable true}}} - -{{/with}} - -{{/if}} - -{{#if type}} - -{{#with type}} -{{#ifIsFunctionType false}} - -{{#unless hideSources}} - -{{#if comment.returns}} - -**Returns:** *{{{type}}}* - -{{#with comment.returns}} - -{{{comment}}} - -{{/with}} - -{{/if}} - -{{/unless}} - -{{#if type.declaration}} - -{{#with type.declaration}} - -{{> parameter}} - -{{/with}} - -{{/if}} - -{{/ifIsFunctionType}} -{{/with}} - -{{/if}} diff --git a/docs-gen/src/resources/partials/member.sources.hbs b/docs-gen/src/resources/partials/member.sources.hbs deleted file mode 100755 index d095579f66554..0000000000000 --- a/docs-gen/src/resources/partials/member.sources.hbs +++ /dev/null @@ -1,39 +0,0 @@ -{{#if implementationOf}} - -*Implementation of {{#with implementationOf}}{{typeAndParent}}{{/with}}* - -{{/if}} - -{{#if inheritedFrom}} - -*Inherited from {{#with inheritedFrom}}{{{typeAndParent}}}{{/with}}* - -{{/if}} - -{{#if overwrites}} - -*Overrides {{#with overwrites}}{{typeAndParent}}{{/with}}* - -{{/if}} - -{{#ifSources}} - -{{#if sources}} - -{{#each sources}} - -{{#if url}} - -*Defined in [{{fileName}}:{{line}}]({{url}})* - -{{else}} - -Defined in {{fileName}}:{{line}} - -{{/if}} - -{{/each}} - -{{/if}} - -{{/ifSources}} \ No newline at end of file diff --git a/docs-gen/src/resources/partials/members.group.hbs b/docs-gen/src/resources/partials/members.group.hbs deleted file mode 100755 index 49aad19051e95..0000000000000 --- a/docs-gen/src/resources/partials/members.group.hbs +++ /dev/null @@ -1,31 +0,0 @@ -{{#if categories}} - -{{#each categories}} - -{{#unless @first}} -___ -{{/unless}} - -{{heading 2}} {{title}} {{../title}} - -{{#each children}} - -{{> member}} - -{{/each}} - -{{/each}} - -{{else}} - -{{#each children}} - -{{#unless hasOwnDocument}} - -{{> member}} - -{{/unless}} - -{{/each}} - -{{/if}} \ No newline at end of file diff --git a/docs-gen/src/resources/partials/members.hbs b/docs-gen/src/resources/partials/members.hbs deleted file mode 100755 index 0123b04be668d..0000000000000 --- a/docs-gen/src/resources/partials/members.hbs +++ /dev/null @@ -1,9 +0,0 @@ -{{#each groups}} - -{{#unless allChildrenHaveOwnDocument}} - -{{> members.group}} - -{{/unless}} - -{{/each}} \ No newline at end of file diff --git a/docs-gen/src/resources/partials/parameter.hbs b/docs-gen/src/resources/partials/parameter.hbs deleted file mode 100644 index df5168ff86e36..0000000000000 --- a/docs-gen/src/resources/partials/parameter.hbs +++ /dev/null @@ -1,77 +0,0 @@ -{{!-- not used --}} - -{{#if signatures}} - -{{#each signatures}} - -{{> member.signature hideSources=true }} - -{{/each}} - -{{/if}} - -{{#if indexSignature}} - -{{> member.indexSignatures}} - -{{/if}} - -{{#ifIsLiteralType true}} - -{{{literal}}} - -{{/ifIsLiteralType}} - -{{#ifIsLiteralType false}} - -{{#each children}} - -{{#if signatures}} - -{{#if flags.isRest}}...{{/if}} {{{ name}}} {{#if isOptional}}?{{/if}}: function - -{{#each signatures}} - -{{> member.signature}} - -{{/each}} - -{{else}} -1111111 -{{{parameterNameAndType}}} - -{{> comment}} -2222222 -{{#if children}} - -{{> parameter}} - -{{/if}} - -{{#if type.declaration}} - -{{#with type.declaration}} - -## type.declaration - -{{#ifIsLiteralType true}} - -{{{literal}}} - -{{/ifIsLiteralType}} - -{{#ifIsLiteralType false}} - -{{> parameter}} - -{{/ifIsLiteralType}} - -{{/with}} - -{{/if}} - -{{/if}} - -{{/each}} - -{{/ifIsLiteralType}} \ No newline at end of file diff --git a/docs-gen/src/resources/partials/typeParameters.hbs b/docs-gen/src/resources/partials/typeParameters.hbs deleted file mode 100755 index 78dccf34b607a..0000000000000 --- a/docs-gen/src/resources/partials/typeParameters.hbs +++ /dev/null @@ -1,7 +0,0 @@ -{{#each typeParameters}} - -{{{parameterNameAndType}}} - -{{> comment}} - -{{/each}} \ No newline at end of file diff --git a/docs-gen/src/resources/templates/index.hbs b/docs-gen/src/resources/templates/index.hbs deleted file mode 100644 index b4e7f01c51be5..0000000000000 --- a/docs-gen/src/resources/templates/index.hbs +++ /dev/null @@ -1,9 +0,0 @@ -{{#if model.readme}} - -{{#with model.readme}} - -{{{comment}}} - -{{/with}} - -{{/if}} \ No newline at end of file diff --git a/docs-gen/src/resources/templates/reflection.hbs b/docs-gen/src/resources/templates/reflection.hbs deleted file mode 100755 index de8b2a20a873a..0000000000000 --- a/docs-gen/src/resources/templates/reflection.hbs +++ /dev/null @@ -1,85 +0,0 @@ -{{#with model}} - -{{#if hasComment}} - -{{> comment}} - -{{/if}} - -{{/with}} - -{{#if model.typeParameters}} - -{{heading 2}} Type parameters - -{{#with model}} -{{> typeParameters}} -{{/with}} - -{{/if}} - -{{#if model.typeHierarchy}} - -{{heading 2}} Hierarchy - -{{#with model.typeHierarchy}} - -{{> hierarchy}} - -{{/with}} - -{{/if}} - -{{#if model.implementedTypes}} - -{{heading 2}} Implements - -{{#each model.implementedTypes}} -* {{{type}}} -{{/each}} - -{{/if}} - -{{#if model.implementedBy}} - -{{heading 2}} Implemented by - -{{#each model.implementedBy}} -* {{{type}}} -{{/each}} - -{{/if}} - -{{#if model.signatures}} - -{{{heading 2}}} Callable - -{{#with model}} - -{{#each signatures}} - -{{> member.signature }} - -{{/each}} - -{{/with}} - -{{/if}} - -{{#if model.indexSignature}} - -{{heading 2}} Indexable - -{{#with model}} - -{{> member.indexSignatures}} - -{{/with}} - -{{/if}} - -{{#with model}} - -{{> main}} - -{{/with}} \ No newline at end of file diff --git a/docs-gen/src/subthemes/bitbucket/theme.ts b/docs-gen/src/subthemes/bitbucket/theme.ts deleted file mode 100644 index 7ea51c49e17ab..0000000000000 --- a/docs-gen/src/subthemes/bitbucket/theme.ts +++ /dev/null @@ -1,22 +0,0 @@ -import { Reflection } from 'typedoc/dist/lib/models'; -import { Renderer } from 'typedoc/dist/lib/output/renderer'; - -import MarkdownTheme from '../../theme'; - -export default class BitbucketTheme extends MarkdownTheme { - constructor(renderer: Renderer, basePath: string) { - super(renderer, basePath); - } - - toAnchorRef(reflection: Reflection) { - function parseAnchorRef(ref: string) { - return ref.replace(/"/g, '').replace(/ /g, '-'); - } - let anchorPrefix = ''; - reflection.flags.forEach(flag => (anchorPrefix += `${flag}-`)); - const prefixRef = parseAnchorRef(anchorPrefix); - const reflectionRef = parseAnchorRef(reflection.name); - const anchorRef = prefixRef + reflectionRef; - return 'markdown-header-' + anchorRef.toLowerCase(); - } -} diff --git a/docs-gen/src/subthemes/docusaurus/partials/header.hbs b/docs-gen/src/subthemes/docusaurus/partials/header.hbs deleted file mode 100644 index 54846b2f6472a..0000000000000 --- a/docs-gen/src/subthemes/docusaurus/partials/header.hbs +++ /dev/null @@ -1,3 +0,0 @@ -{{#ifBreadcrumbs}} -{{{breadcrumbs}}} -{{/ifBreadcrumbs}} \ No newline at end of file diff --git a/docs-gen/src/subthemes/docusaurus/theme.ts b/docs-gen/src/subthemes/docusaurus/theme.ts deleted file mode 100644 index abb43712626ee..0000000000000 --- a/docs-gen/src/subthemes/docusaurus/theme.ts +++ /dev/null @@ -1,96 +0,0 @@ -import * as fs from 'fs-extra'; -import * as path from 'path'; -import { RendererEvent } from 'typedoc/dist/lib/output/events'; -import { Renderer } from 'typedoc/dist/lib/output/renderer'; - -import { FrontMatterComponent } from '../../components/front-matter.component'; -import MarkdownTheme from '../../theme'; - -export default class DocusaurusTheme extends MarkdownTheme { - constructor(renderer: Renderer, basePath: string) { - super(renderer, basePath); - this.indexName = 'index'; - renderer.addComponent('frontmatter', new FrontMatterComponent(renderer)); - this.listenTo(renderer, RendererEvent.END, this.onRendererEnd, 1024); - } - - onRendererEnd(renderer: RendererEvent) { - if (!this.application.options.getValue('skipSidebar')) { - const docusarusRoot = this.findDocusaurusRoot(renderer.outputDirectory); - if (docusarusRoot === null) { - this.application.logger.warn( - `[typedoc-markdown-plugin] sidebars.json not written as could not locate docusaurus root directory. In order to to implemnent sidebars.json functionality, the output directory must be a child of a 'docs' directory.`, - ); - return; - } - this.writeSideBar(renderer, docusarusRoot); - } - } - - writeSideBar(renderer: RendererEvent, docusarusRoot: string) { - const childDirectory = renderer.outputDirectory.split(docusarusRoot + 'docs/')[1]; - const docsRoot = childDirectory ? childDirectory + '/' : ''; - const websitePath = docusarusRoot + 'website'; - const packageName = renderer.project.packageInfo.name; - const navObject = this.getNavObject(renderer, docsRoot); - const sidebarPath = websitePath + '/sidebars.json'; - let contents: any; - if (!fs.existsSync(sidebarPath)) { - contents = '{}'; - if (!fs.existsSync(websitePath)) { - fs.mkdirSync(websitePath); - } - } else { - contents = fs.readFileSync(sidebarPath); - } - const jsonContent = JSON.parse(contents.toString()); - const update = { - ...jsonContent, - [packageName]: navObject, - }; - try { - fs.writeFileSync(sidebarPath, JSON.stringify(update, null, 2)); - this.application.logger.write(`[typedoc-plugin-markdown] sidebars.json updated at ${sidebarPath}`); - } catch (e) { - this.application.logger.write(`[typedoc-plugin-markdown] failed to update sidebars.json at ${sidebarPath}`); - } - } - - getNavObject(renderer: RendererEvent, docsRoot: string) { - const projectUrls = [docsRoot + this.indexName.replace('.md', '')]; - if (renderer.project.url === 'globals.md') { - projectUrls.push(docsRoot + 'globals'); - } - const navObject = { - ['Introduction']: projectUrls, - }; - - this.getNavigation(renderer.project).children.forEach(rootNavigation => { - navObject[rootNavigation.title] = rootNavigation.children.map(item => { - return docsRoot + item.url.replace('.md', ''); - }); - }); - - return navObject; - } - - findDocusaurusRoot(outputDirectory: string) { - const docsName = 'docs'; - function splitPath(dir: string) { - const parts = dir.split(/(\/|\\)/); - if (!parts.length) { - return parts; - } - return !parts[0].length ? parts.slice(1) : parts; - } - function testDir(parts) { - if (parts.length === 0) { - return null; - } - const p = parts.join(''); - const itdoes = fs.existsSync(path.join(p, docsName)); - return itdoes ? p : testDir(parts.slice(0, -1)); - } - return testDir(splitPath(outputDirectory)); - } -} diff --git a/docs-gen/src/subthemes/docusaurus2/partials/header.hbs b/docs-gen/src/subthemes/docusaurus2/partials/header.hbs deleted file mode 100644 index 54846b2f6472a..0000000000000 --- a/docs-gen/src/subthemes/docusaurus2/partials/header.hbs +++ /dev/null @@ -1,3 +0,0 @@ -{{#ifBreadcrumbs}} -{{{breadcrumbs}}} -{{/ifBreadcrumbs}} \ No newline at end of file diff --git a/docs-gen/src/subthemes/docusaurus2/theme.ts b/docs-gen/src/subthemes/docusaurus2/theme.ts deleted file mode 100644 index 7213e7d3b48e1..0000000000000 --- a/docs-gen/src/subthemes/docusaurus2/theme.ts +++ /dev/null @@ -1,99 +0,0 @@ -import * as fs from 'fs-extra'; -import * as path from 'path'; -import { RendererEvent } from 'typedoc/dist/lib/output/events'; -import { Renderer } from 'typedoc/dist/lib/output/renderer'; - -import { FrontMatterComponent } from '../../components/front-matter.component'; -import MarkdownTheme from '../../theme'; - -export default class Docusaurus2Theme extends MarkdownTheme { - sidebarName: string; - constructor(renderer: Renderer, basePath: string) { - super(renderer, basePath); - this.indexName = 'index'; - this.sidebarName = 'sidebars.js'; - renderer.addComponent('frontmatter', new FrontMatterComponent(renderer)); - this.listenTo(renderer, RendererEvent.END, this.onRendererEnd, 1024); - } - - onRendererEnd(renderer: RendererEvent) { - if (!this.application.options.getValue('skipSidebar')) { - const docusarusRoot = this.findDocusaurus2Root(renderer.outputDirectory); - if (docusarusRoot === null) { - this.application.logger.warn( - `[typedoc-markdown-plugin] ${this.sidebarName} not written as could not locate docusaurus root directory. In order to to implemnent ${this.sidebarName} functionality, the output directory must be a child of a 'docs' directory.`, - ); - return; - } - this.writeSideBar(renderer, docusarusRoot); - } - } - - writeSideBar(renderer: RendererEvent, docusarusRoot: string) { - const childDirectory = renderer.outputDirectory.split(docusarusRoot + 'docs/')[1]; - const docsRoot = childDirectory ? childDirectory + '/' : ''; - const websitePath = docusarusRoot; - const navObject = this.getNavObject(renderer, docsRoot); - const sidebarPath = websitePath + this.sidebarName; - let jsonContent: any; - if (!fs.existsSync(sidebarPath)) { - if (!fs.existsSync(websitePath)) { - fs.mkdirSync(websitePath); - } - jsonContent = JSON.parse('{}'); - } else { - jsonContent = require(sidebarPath); - } - let firstKey = Object.keys(jsonContent)[0]; - if (!firstKey) { - firstKey = 'docs'; - } - jsonContent[firstKey] = Object.assign({}, jsonContent[firstKey], navObject); - try { - fs.writeFileSync(sidebarPath, 'module.exports = ' + JSON.stringify(jsonContent, null, 2) + ';'); - this.application.logger.write(`[typedoc-plugin-markdown] ${this.sidebarName} updated at ${sidebarPath}`); - } catch (e) { - this.application.logger.write(`[typedoc-plugin-markdown] failed to update ${this.sidebarName} at ${sidebarPath}`); - } - } - - getNavObject(renderer: RendererEvent, docsRoot: string) { - const navObject = {}; - let url = ''; - let navKey = ''; - this.getNavigation(renderer.project).children.forEach(rootNavigation => { - rootNavigation.children.map(item => { - url = item.url.replace('.md', ''); - navKey = url.substring(0, url.indexOf('/')); - if (navKey !== undefined && navKey.length) { - navKey = navKey[0].toUpperCase() + navKey.slice(1); - } - if (navObject[navKey] === undefined) { - navObject[navKey] = []; - } - navObject[navKey].push(docsRoot + url); - }); - }); - return navObject; - } - - findDocusaurus2Root(outputDirectory: string) { - const docsName = 'docs'; - function splitPath(dir: string) { - const parts = dir.split(/(\/|\\)/); - if (!parts.length) { - return parts; - } - return !parts[0].length ? parts.slice(1) : parts; - } - function testDir(parts) { - if (parts.length === 0) { - return null; - } - const p = parts.join(''); - const itdoes = fs.existsSync(path.join(p, docsName)); - return itdoes ? p : testDir(parts.slice(0, -1)); - } - return testDir(splitPath(outputDirectory)); - } -} diff --git a/docs-gen/src/subthemes/gitbook/partials/header.hbs b/docs-gen/src/subthemes/gitbook/partials/header.hbs deleted file mode 100644 index ee818e8652f0b..0000000000000 --- a/docs-gen/src/subthemes/gitbook/partials/header.hbs +++ /dev/null @@ -1 +0,0 @@ -{{heading 1}} {{{reflectionTitle}}} \ No newline at end of file diff --git a/docs-gen/src/subthemes/gitbook/theme.ts b/docs-gen/src/subthemes/gitbook/theme.ts deleted file mode 100644 index f64ad6a351e62..0000000000000 --- a/docs-gen/src/subthemes/gitbook/theme.ts +++ /dev/null @@ -1,41 +0,0 @@ -import * as fs from 'fs-extra'; -import { RendererEvent } from 'typedoc/dist/lib/output/events'; -import { Renderer } from 'typedoc/dist/lib/output/renderer'; - -import MarkdownTheme from '../../theme'; - -export default class GitbookTheme extends MarkdownTheme { - constructor(renderer: Renderer, basePath: string) { - super(renderer, basePath); - this.listenTo(renderer, RendererEvent.END, this.writeSummary, 1024); - } - - writeSummary(renderer: RendererEvent) { - const outputDirectory = renderer.outputDirectory; - const summaryMarkdown = this.getSummaryMarkdown(renderer); - try { - fs.writeFileSync(`${outputDirectory}/SUMMARY.md`, summaryMarkdown); - this.application.logger.write(`[typedoc-plugin-markdown] SUMMARY.md written to ${outputDirectory}`); - } catch (e) { - this.application.logger.write(`[typedoc-plugin-markdown] failed to write SUMMARY at ${outputDirectory}`); - } - } - - getSummaryMarkdown(renderer: RendererEvent) { - const md = []; - md.push(`* [Globals](globals.md)`); - this.getNavigation(renderer.project).children.forEach(rootNavigation => { - if (rootNavigation.children) { - md.push(`* [${rootNavigation.title}](${rootNavigation.url})`); - rootNavigation.children.forEach(item => { - md.push(` * [${item.title}](${item.url})`); - }); - } - }); - return md.join('\n'); - } - - allowedDirectoryListings() { - return ['README.md', 'globals.md', 'classes', 'enums', 'interfaces', 'modules', 'media', '.DS_Store', 'SUMMARY.md']; - } -} diff --git a/docs-gen/src/subthemes/vuepress/partials/header.hbs b/docs-gen/src/subthemes/vuepress/partials/header.hbs deleted file mode 100644 index ee818e8652f0b..0000000000000 --- a/docs-gen/src/subthemes/vuepress/partials/header.hbs +++ /dev/null @@ -1 +0,0 @@ -{{heading 1}} {{{reflectionTitle}}} \ No newline at end of file diff --git a/docs-gen/src/subthemes/vuepress/partials/main.hbs b/docs-gen/src/subthemes/vuepress/partials/main.hbs deleted file mode 100644 index b13ea23b23e09..0000000000000 --- a/docs-gen/src/subthemes/vuepress/partials/main.hbs +++ /dev/null @@ -1 +0,0 @@ -{{> members}} \ No newline at end of file diff --git a/docs-gen/src/subthemes/vuepress/theme.ts b/docs-gen/src/subthemes/vuepress/theme.ts deleted file mode 100644 index 4d96cd1aadffb..0000000000000 --- a/docs-gen/src/subthemes/vuepress/theme.ts +++ /dev/null @@ -1,118 +0,0 @@ -import * as fs from 'fs-extra'; -import * as path from 'path'; -import { RendererEvent } from 'typedoc/dist/lib/output/events'; -import { Renderer } from 'typedoc/dist/lib/output/renderer'; - -import MarkdownTheme from '../../theme'; - -/** - * Creates `api-sidebar.json` in `.vuepress` directory. - * May be used in `.vuepress/config.json` as follows: - * @example - * const apiSideBar = require("./api-sidebar.json"); - * const apiSideBarRelative = require('./api-sidebar-relative.json'); - * - * // Without groups - * module.exports = { - * themeConfig: { - * sidebar: ["some-content", ...apiSideBar] - * } - * }; - * - * // With groups - * module.exports = { - * themeConfig: { - * sidebar: ["some-content", { title: "API", children: apiSideBar }] - * } - * }; - * - * // Multiple Sidebar - * module.exports = { - * themeConfig: { - * sidebar: { - * '/guide/': ['some-content'], - * '/api/': apiSideBarRelative, - * '/': ['other'], - * }, - * }, - * }; - */ -export default class VuePressTheme extends MarkdownTheme { - constructor(renderer: Renderer, basePath: string) { - super(renderer, basePath); - this.listenTo(renderer, RendererEvent.END, this.onRendererEnd, 1024); - } - - onRendererEnd(renderer: RendererEvent) { - const root = this.findRoot(renderer.outputDirectory); - if (root === null) { - this.application.logger.warn( - `[typedoc-markdown-plugin] sidebars.json not written as could not locate VuePress root directory. In order to to implemnent sidebars.json functionality, the output directory must be a child of a 'docs' directory.`, - ); - return; - } - this.writeSideBar(renderer, root); - } - - writeSideBar(renderer: RendererEvent, root: string) { - const childDirectory = renderer.outputDirectory.split(root + 'docs/')[1]; - const docsRoot = childDirectory ? childDirectory + '/' : ''; - const vuePressRoot = root + 'docs/.vuepress'; - const navObject = this.getNavObject(renderer, docsRoot); - const sidebarPath = vuePressRoot + '/api-sidebar.json'; - const relativeNavObject = this.getNavObject(renderer); - const relativeSidebarPath = vuePressRoot + '/api-sidebar-relative.json'; - - if (!fs.existsSync(vuePressRoot)) { - fs.mkdirSync(vuePressRoot); - } - - try { - fs.writeFileSync(sidebarPath, JSON.stringify(navObject, null, 2)); - fs.writeFileSync(relativeSidebarPath, JSON.stringify(relativeNavObject, null, 2)); - this.application.logger.write(`[typedoc-plugin-markdown] sidebars.json updated at ${sidebarPath}`); - } catch (e) { - this.application.logger.write(`[typedoc-plugin-markdown] failed to update sidebars.json at ${sidebarPath}`); - } - } - - getNavObject(renderer: RendererEvent, docsRoot: string = '') { - const projectUrls = [docsRoot + this.indexName.replace('.md', '')]; - if (renderer.project.url === 'globals.md') { - projectUrls.push(docsRoot + 'globals'); - } - - // const packageName = MarkdownPlugin.project.packageInfo.name; - const navObject = []; // [{ title: packageName, children: projectUrls }] - - this.getNavigation(renderer.project).children.forEach(rootNavigation => { - navObject.push({ - title: rootNavigation.title, - children: rootNavigation.children.map(item => { - return docsRoot + item.url.replace('.md', ''); - }), - }); - }); - return navObject; - } - - findRoot(outputDirectory: string) { - const docsName = 'docs'; - function splitPath(dir: string) { - const parts = dir.split(/(\/|\\)/); - if (!parts.length) { - return parts; - } - return !parts[0].length ? parts.slice(1) : parts; - } - function testDir(parts) { - if (parts.length === 0) { - return null; - } - const p = parts.join(''); - const itdoes = fs.existsSync(path.join(p, docsName)); - return itdoes ? p : testDir(parts.slice(0, -1)); - } - return testDir(splitPath(outputDirectory)); - } -} diff --git a/docs-gen/src/theme.spec.ts b/docs-gen/src/theme.spec.ts deleted file mode 100644 index a557555f6a285..0000000000000 --- a/docs-gen/src/theme.spec.ts +++ /dev/null @@ -1,160 +0,0 @@ -import * as fs from 'fs-extra'; -import * as path from 'path'; -import { Application, UrlMapping } from 'typedoc'; - -describe(`MarkdownTheme`, () => { - function getExpectedUrls(urlMappings: UrlMapping[]) { - const expectedUrls = []; - urlMappings.forEach(urlMapping => { - expectedUrls.push(urlMapping.url); - urlMapping.model.children.forEach(reflection => { - expectedUrls.push(reflection.url); - }); - }); - return expectedUrls; - } - - let app; - let project; - let theme; - const out = path.join(__dirname, 'tmp'); - beforeAll(() => { - app = new Application(); - app.bootstrap({ - module: 'CommonJS', - target: 'ES5', - readme: 'none', - theme: 'markdown', - logger: 'none', - plugin: path.join(__dirname, '../dist/index'), - }); - project = app.convert(app.expandInputFiles(['./test/stubs/'])); - app.generateDocs(project, out); - theme = app.renderer.theme; - }); - - afterAll(() => { - fs.removeSync(out); - }); - - describe(`getUrls`, () => { - test(`should getUrls'`, () => { - const urlMappings = theme.getUrls(project); - expect(getExpectedUrls(urlMappings)).toMatchSnapshot(); - }); - - test(`should getUrls when readme is defined`, () => { - const spy = jest.spyOn(app.options, 'getValue').mockReturnValue('./README.md'); - const urlMappings = theme.getUrls(project); - expect(getExpectedUrls(urlMappings)).toMatchSnapshot(); - spy.mockRestore(); - }); - - test(`should get navigation`, () => { - expect(theme.getNavigation(project)).toMatchSnapshot(); - }); - }); - - describe(`output directory`, () => { - let directoryListingSpy; - - beforeAll(() => { - directoryListingSpy = jest.spyOn(fs, 'readdirSync'); - }); - - test(`should test output directory true with all allowed files and directories`, () => { - directoryListingSpy.mockReturnValue([ - '.DS_Store', - 'README.md', - 'globals.md', - 'classes', - 'enums', - 'interfaces', - 'media', - 'modules', - ]); - expect(theme.isOutputDirectory('/path')).toBeTruthy(); - }); - - test(`should test output directory true with some files directories`, () => { - directoryListingSpy.mockReturnValue(['README.md', 'classes', 'media', 'modules']); - expect(theme.isOutputDirectory('/path')).toBeTruthy(); - }); - - test(`should test output directory true with just index`, () => { - directoryListingSpy.mockReturnValue(['README.md']); - expect(theme.isOutputDirectory('/path')).toBeTruthy(); - }); - - test(`should test output directory false with unkown index`, () => { - directoryListingSpy.mockReturnValue(['Unrecognised.md', 'classes', 'enums', 'interfaces', 'media', 'modules']); - expect(theme.isOutputDirectory('/path')).toBeFalsy(); - }); - - test(`should test output directory false with hidden files`, () => { - directoryListingSpy.mockReturnValue(['.git', 'classes', 'enums', 'interfaces', 'media', 'modules']); - expect(theme.isOutputDirectory('/path')).toBeFalsy(); - }); - - test(`should test output directory false without an index`, () => { - directoryListingSpy.mockReturnValue(['globals.md', 'classes', 'enums', 'interfaces', 'media', 'modules']); - expect(theme.isOutputDirectory('/path')).toBeFalsy(); - }); - - test(`should test output directory false with unknown folder`, () => { - directoryListingSpy.mockReturnValue(['README.md', 'folder']); - expect(theme.isOutputDirectory('/path')).toBeFalsy(); - }); - }); - - describe(`output directory`, () => { - let directoryListingSpy; - beforeAll(() => { - directoryListingSpy = jest.spyOn(fs, 'readdirSync'); - }); - - test(`should test output directory true with all allowed files and directories`, () => { - directoryListingSpy.mockReturnValue([ - '.DS_Store', - 'README.md', - 'globals.md', - 'classes', - 'enums', - 'interfaces', - 'media', - 'modules', - ]); - expect(theme.isOutputDirectory('/path')).toBeTruthy(); - }); - - test(`should test output directory true with some files directories`, () => { - directoryListingSpy.mockReturnValue(['README.md', 'classes', 'media', 'modules']); - expect(theme.isOutputDirectory('/path')).toBeTruthy(); - }); - - test(`should test output directory true with just index`, () => { - directoryListingSpy.mockReturnValue(['README.md']); - expect(theme.isOutputDirectory('/path')).toBeTruthy(); - }); - - test(`should test output directory false with unkown index`, () => { - directoryListingSpy.mockReturnValue(['Unrecognised.md', 'classes', 'enums', 'interfaces', 'media', 'modules']); - expect(theme.isOutputDirectory('/path')).toBeFalsy(); - }); - - test(`should test output directory false with hidden files`, () => { - directoryListingSpy.mockReturnValue(['.git', 'classes', 'enums', 'interfaces', 'media', 'modules']); - expect(theme.isOutputDirectory('/path')).toBeFalsy(); - }); - - test(`should test output directory false without an index`, () => { - directoryListingSpy.mockReturnValue(['globals.md', 'classes', 'enums', 'interfaces', 'media', 'modules']); - expect(theme.isOutputDirectory('/path')).toBeFalsy(); - }); - - test(`should test output directory false with unknown folder`, () => { - directoryListingSpy.mockReturnValue(['README.md', 'folder']); - expect(theme.isOutputDirectory('/path')).toBeFalsy(); - }); - }); -}); diff --git a/docs-gen/src/theme.ts b/docs-gen/src/theme.ts deleted file mode 100644 index dfb8bebadce4b..0000000000000 --- a/docs-gen/src/theme.ts +++ /dev/null @@ -1,378 +0,0 @@ -import * as Handlebars from 'handlebars'; -import { - ContainerReflection, - DeclarationReflection, - NavigationItem, - ProjectReflection, - Reflection, - ReflectionKind, - Renderer, - UrlMapping, -} from 'typedoc'; -import { PageEvent } from 'typedoc/dist/lib/output/events'; -import { Theme } from 'typedoc/dist/lib/output/theme'; -import { TemplateMapping } from 'typedoc/dist/lib/output/themes/DefaultTheme'; - -import { ContextAwareHelpersComponent } from './components/helpers.component'; -import { OptionsComponent } from './components/options.component'; - -/** - * The MarkdownTheme is based on TypeDoc's DefaultTheme @see https://github.com/TypeStrong/typedoc/blob/master/src/lib/output/themes/DefaultTheme.ts. - * - html specific components are removed from the renderer - * - markdown specefic components have been added - */ - -export default class MarkdownTheme extends Theme { - /** - * @See DefaultTheme.MAPPINGS - */ - static MAPPINGS: TemplateMapping[] = [ - { - kind: [ReflectionKind.Class], - isLeaf: false, - directory: 'classes', - template: 'reflection.hbs', - }, - { - kind: [ReflectionKind.Interface], - isLeaf: false, - directory: 'interfaces', - template: 'reflection.hbs', - }, - { - kind: [ReflectionKind.Enum], - isLeaf: false, - directory: 'enums', - template: 'reflection.hbs', - }, - { - kind: [ReflectionKind.Namespace, ReflectionKind.Module], - isLeaf: false, - directory: 'modules', - template: 'reflection.hbs' - }, - ]; - - /** - * @See DefaultTheme.URL_PREFIX - */ - static URL_PREFIX: RegExp = /^(http|ftp)s?:\/\//; - - // creates an isolated Handlebars environment to store context aware helpers - static handlebars = Handlebars.create(); - - // is documentation generated as a single output file - static isSingleFile = false; - - // the root of generated docs - indexName = ''; - - // the file extension of the generated docs - fileExt = '.md'; - - constructor(renderer: Renderer, basePath: string) { - super(renderer, basePath); - this.listenTo(renderer, PageEvent.END, this.onPageEnd, 1024); - - // cleanup html specific components - renderer.removeComponent('assets'); - renderer.removeComponent('javascript-index'); - renderer.removeComponent('toc'); - renderer.removeComponent('pretty-print'); - - // add markdown related componenets - renderer.addComponent('helpers', new ContextAwareHelpersComponent(renderer)); - renderer.addComponent('options', new OptionsComponent(renderer)); - - this.indexName = this.application.options.getValue('name'); - - if (!this.indexName) { - throw new Error('`--name` must be provided') - } - } - - /** - * Test if directory is output directory - * @param outputDirectory - */ - isOutputDirectory(outputDirectory: string) { - return true; - } - - /** - * This method is essentially a copy of the TypeDocs DefaultTheme.getUrls with extensions swapped out to .md - * Map the models of the given project to the desired output files. - * - * @param project The project whose urls should be generated. - * @returns A list of [[UrlMapping]] instances defining which models - * should be rendered to which files. - */ - getUrls(project: ProjectReflection): UrlMapping[] { - const urls: UrlMapping[] = []; - const entryPoint = this.getEntryPoint(project); - const inlineGroupTitles = ['Functions', 'Variables', 'Object literals']; - - if (project.groups) { - MarkdownTheme.isSingleFile = - project.groups && project.groups.every((group) => inlineGroupTitles.includes(group.title)); - } - - - entryPoint.url = this.indexName + this.fileExt; - urls.push( - new UrlMapping( - this.indexName + this.fileExt, - { ...entryPoint, displayReadme: MarkdownTheme.isSingleFile }, - 'reflection.hbs', - ), - ); - - - if (entryPoint.children) { - entryPoint.children.forEach((child: Reflection) => { - if (child instanceof DeclarationReflection) { - this.buildUrls(child, urls); - } - }); - } - - return urls; - } - - /** - * This is mostly a copy of the TypeDoc DefaultTheme.buildUrls method with .html ext switched to .md - * Builds the url for the the given reflection and all of its children. - * - * @param reflection The reflection the url should be created for. - * @param urls The array the url should be appended to. - * @returns The altered urls array. - */ - - buildUrls(reflection: DeclarationReflection, urls: UrlMapping[]): UrlMapping[] { - return []; - const mapping = MarkdownTheme.getMapping(reflection); - if (mapping) { - if (!reflection.url || !MarkdownTheme.URL_PREFIX.test(reflection.url)) { - const url = this.toUrl(mapping, reflection); - urls.push(new UrlMapping(url, reflection, mapping.template)); - reflection.url = url; - reflection.hasOwnDocument = true; - } - for (const child of reflection.children || []) { - if (mapping.isLeaf) { - this.applyAnchorUrl(child, reflection); - } else { - this.buildUrls(child, urls); - } - } - } else if (reflection.parent) { - this.applyAnchorUrl(reflection, reflection.parent); - } - return urls; - } - - /** - * Returns the full url of a given mapping and reflection - * @param mapping - * @param reflection - */ - toUrl(mapping: TemplateMapping, reflection: DeclarationReflection) { - return mapping.directory + '/' + this.getUrl(reflection) + this.fileExt; - } - - /** - * @see DefaultTheme.getUrl - * Return a url for the given reflection. - * - * @param reflection The reflection the url should be generated for. - * @param relative The parent reflection the url generation should stop on. - * @param separator The separator used to generate the url. - * @returns The generated url. - */ - getUrl(reflection: Reflection, relative?: Reflection, separator: string = '.'): string { - let url = reflection.getAlias(); - - if (reflection.parent && reflection.parent !== relative && !(reflection.parent instanceof ProjectReflection)) { - url = this.getUrl(reflection.parent, relative, separator) + separator + url; - } - - return url; - } - - /** - * Similar to DefaultTheme.applyAnchorUrl method with added but the anchors are computed from the reflection structure - * Generate an anchor url for the given reflection and all of its children. - * - * @param reflection The reflection an anchor url should be created for. - * @param container The nearest reflection having an own document. - */ - applyAnchorUrl(reflection: Reflection, container: Reflection) { - if (!reflection.url || !MarkdownTheme.URL_PREFIX.test(reflection.url)) { - const anchor = this.toAnchorRef(reflection); - reflection.url = container.url + '#' + anchor; - reflection.anchor = anchor; - reflection.hasOwnDocument = false; - } - reflection.traverse((child) => { - if (child instanceof DeclarationReflection) { - this.applyAnchorUrl(child, container); - } - }); - } - - /** - * Converts a reflection to anchor ref - * @param reflection - */ - toAnchorRef(reflection: Reflection) { - function parseAnchorRef(ref: string) { - return ref.replace(/["\$]/g, '').replace(/ /g, '-'); - } - let anchorPrefix = ''; - reflection.flags.forEach((flag) => (anchorPrefix += `${flag}-`)); - const prefixRef = parseAnchorRef(anchorPrefix); - const reflectionRef = parseAnchorRef(reflection.name); - const anchorRef = prefixRef + reflectionRef; - return anchorRef.toLowerCase(); - } - - /** - * Copy of default theme DefaultTheme.getEntryPoint - * @param project - */ - getEntryPoint(project: ProjectReflection): ContainerReflection { - const entryPoint = this.owner.entryPoint; - - // if (project.children?.[0].comment.tags?.[0] !== undefined) { - // project.children[0].comment.tags[0].text = 'hello , World'; - - // return project.children[0].children[0]; - // } - - if (entryPoint) { - const reflection = project.getChildByName(entryPoint); - if (reflection) { - if (reflection instanceof ContainerReflection) { - return reflection; - } else { - this.application.logger.warn('The given entry point `%s` is not a container.', entryPoint); - } - } else { - this.application.logger.warn('The entry point `%s` could not be found.', entryPoint); - } - } - return project; - } - - getNavigation(project: ProjectReflection) { - function createNavigationGroup(name: string, url = null) { - const navigationGroup = new NavigationItem(name, url); - navigationGroup.children = []; - delete navigationGroup.cssClasses; - delete navigationGroup.reflection; - return navigationGroup; - } - - function getNavigationGroup(reflection: DeclarationReflection) { - if (reflection.kind === ReflectionKind.Namespace) { - return namespacesNavigation; - } - // if (reflection.kind === ReflectionKind.Module) { - // return modulesNavigation; - // } - if (reflection.kind === ReflectionKind.Class) { - return classesNavigation; - } - if (reflection.kind === ReflectionKind.Enum) { - return enumsNavigation; - } - if (reflection.kind === ReflectionKind.Interface) { - return interfacesNavigation; - } - return null; - } - - function addNavigationItem( - longTitle: boolean, - reflection: DeclarationReflection, - parentNavigationItem?: NavigationItem, - group?, - ) { - let navigationGroup: NavigationItem; - if (group) { - navigationGroup = group; - } else { - navigationGroup = getNavigationGroup(reflection); - } - let titlePrefix = ''; - if (longTitle && parentNavigationItem && parentNavigationItem.title) { - titlePrefix = parentNavigationItem.title.replace(/\"/g, '') + '.'; - } - - const title = titlePrefix + reflection.name.replace(/\"/g, ''); - const url = reflection.url; - const nav = new NavigationItem(title, url, parentNavigationItem); - nav.parent = parentNavigationItem; - - navigationGroup.children.push(nav); - if (reflection.children) { - reflection.children.forEach((reflectionChild) => { - if (reflectionChild.hasOwnDocument) { - addNavigationItem(longTitle, reflectionChild as DeclarationReflection, nav, navigationGroup); - } - }); - } - delete nav.cssClasses; - delete nav.reflection; - return nav; - } - const isModules = this.application.options.getValue('mode') === 1; - const isLongTitle = this.application.options.getValue('longTitle') as boolean; - const navigation = createNavigationGroup(project.name, this.indexName + this.fileExt); - // const modulesNavigation = createNavigationGroup('Modules'); - const namespacesNavigation = createNavigationGroup('Namespaces'); - const classesNavigation = createNavigationGroup('Classes'); - const enumsNavigation = createNavigationGroup('Enums'); - const interfacesNavigation = createNavigationGroup('Interfaces'); - - if (project.groups) { - if (!isModules) { - project.groups.forEach((group) => { - group.children.forEach((reflection) => { - if (reflection.hasOwnDocument) { - addNavigationItem(isLongTitle, reflection as DeclarationReflection); - } - }); - }); - } - } - - navigation.children = []; - - return navigation; - } - - private onPageEnd(page: PageEvent) { - page.contents = page.contents ? MarkdownTheme.formatContents(page.contents) : ''; - } - - /** - * @see DefaultTheme.getMapping - * Return the template mapping for the given reflection. - * - * @param reflection The reflection whose mapping should be resolved. - * @returns The found mapping or undefined if no mapping could be found. - */ - static getMapping(reflection: DeclarationReflection): TemplateMapping | undefined { - return MarkdownTheme.MAPPINGS.find((mapping) => reflection.kindOf(mapping.kind)); - } - - static formatContents(contents: string) { - return ( - contents - .replace(/[\r\n]{3,}/g, '\n\n') - .replace(/!spaces/g, '') - .replace(/^\s+|\s+$/g, '') + '\n' - ); - } -} diff --git a/docs-gen/tasks/fixtures.js b/docs-gen/tasks/fixtures.js deleted file mode 100644 index de3d1d8ceeb86..0000000000000 --- a/docs-gen/tasks/fixtures.js +++ /dev/null @@ -1,44 +0,0 @@ -const fs = require('fs'); -const path = require('path'); -const { Application } = require('typedoc'); - -const app = new Application({ - mode: 'Modules', - module: 'CommonJS', - experimentalDecorators: true, - jsx: true, - target: 'es2015', -}); - -const fixturesDir = './test/fixtures'; -const inputFiles = app.expandInputFiles(['./test/stubs']); - -if (!fs.existsSync(fixturesDir)) { - fs.mkdirSync(fixturesDir); -} - -inputFiles.forEach(file => { - const result = app.convert(app.expandInputFiles([file])); - fs.writeFileSync(`${fixturesDir}/${path.basename(file)}.json`, JSON.stringify(result, replacer)); - console.log(`[typedoc-plugin-markdown(task:fixtures)] writing ${path.basename(file)}.json fixture`); -}); - -function replacer(key, value) { - if ( - key === 'parent' || - key === 'reflection' || - key === 'reflections' || - key === 'symbolMapping' || - key === 'file' || - key === 'cssClasses' || - key === '_alias' || - key === '_aliases' || - key === 'directory' || - key === 'packageInfo' || - key === 'files' || - key === 'readme' - ) { - return null; - } - return value; -} diff --git a/docs-gen/tasks/generate.js b/docs-gen/tasks/generate.js deleted file mode 100644 index c4d518af3d067..0000000000000 --- a/docs-gen/tasks/generate.js +++ /dev/null @@ -1,64 +0,0 @@ -const TypeDoc = require('typedoc'); -const path = require('path'); -const fs = require('fs-extra'); - -const outputDir = '../docs/docs-new/pages/reference/frontend'; - -const app = new TypeDoc.Application(); - -app.options.addReader(new TypeDoc.TSConfigReader()); - -app.bootstrap({ - excludeExternals: true, - includeDeclarations: true, - plugin: ['./dist/index.js'], - hideSources: true, - hideIndexes: true, - name: 'docs', -}); - -const projects = [ - { - name: '@cubejs-client-core', - docsPath: '../packages/cubejs-client-core/index.d.ts', - outputDir, - }, - { - name: '@cubejs-client-react', - docsPath: '../packages/cubejs-client-react/index.d.ts', - outputDir, - }, - { - name: '@cubejs-client-ws-transport', - docsPath: '../packages/cubejs-client-ws-transport/index.d.ts', - outputDir, - }, -]; - -let failure = false; - -projects.forEach(({ name, docsPath, outputDir }) => { - const tmpDir = path.join(outputDir, 'tmp'); - const project = app.convert(app.expandInputFiles([docsPath])); - - if (project) { - app.generateDocs(project, tmpDir); - - if (fs.existsSync(tmpDir)) { - const [tmpFileName] = fs.readdirSync(tmpDir); - - const pathArr = tmpDir.split('/'); - pathArr.splice(-1, 1); - const out = path.join(...pathArr); - const currentPath = path.join(out, `${name.replace('@', '')}.mdx`); - - fs.copyFileSync(path.join(tmpDir, tmpFileName), currentPath); - fs.removeSync(tmpDir); - } - } else { - console.error(`Error while generating '${name}' docs`); - failure = true; - } -}); - -process.exit(failure ? 1 : 0); diff --git a/docs-gen/tasks/link-plugin.js b/docs-gen/tasks/link-plugin.js deleted file mode 100644 index e108e4d9e4771..0000000000000 --- a/docs-gen/tasks/link-plugin.js +++ /dev/null @@ -1,7 +0,0 @@ -const fs = require('fs'); -const { join } = require('path'); -// symlink to self for running local examples/tests -const pluginPath = join(__dirname, '..', 'node_modules/typedoc-plugin-markdown'); -if (!fs.existsSync(pluginPath)) { - fs.symlinkSync(join(__dirname, '..'), pluginPath); -} diff --git a/docs-gen/template/publish.js b/docs-gen/template/publish.js deleted file mode 100644 index f03d1368d6c59..0000000000000 --- a/docs-gen/template/publish.js +++ /dev/null @@ -1,145 +0,0 @@ -const fs = require('fs-extra'); -const inline = require('jsdoc/tag/inline'); -const inflection = require('inflection'); - -let typeDefs = []; -let knownClassNames = []; - -const anchorName = (link) => inflection.dasherize(inflection.underscore(link.replace(/#/g, '-'))); - -const resolveInlineLinks = (str) => inline.replaceInlineTags(str, { - link: (string, { completeTag, text }) => string.replace(completeTag, `[${text}](#${anchorName(text)})`), -}).newString; - -const renderLinks = (p) => { - if (p.type.names[0] === '*') { - return '*'; - } - if (p.type && knownClassNames.indexOf(p.type.names.join('#')) !== -1) { - return `[${p.type.names.join('#')}](#${anchorName(p.type.names.join('-'))})`; - } - if (p.type) { - return `\`${p.type.names.join('|')}\``; - } - return p; -}; - -function generateParams(doclet, field = 'params') { - const params = doclet[field].map((p) => { - const optional = p.optional ? '**Optional**' : null; - const defaultValue = p.defaultvalue ? `**Default:** \`${p.defaultvalue}\`` : null; - const type = p.type && p.type.parsedType && - (p.type.parsedType.name || p.type.parsedType.parsedExpression || p.type.parsedType.typeExpression); - const formattedType = type ? `: ${type}` : ''; - const options = [optional, defaultValue].filter((f) => !!f); - - if (!p.description && typeDefs.find((td) => td.name === type)) { - p.description = `See {@link ${type}}`; - } - - p.description = (p.description || '').replace(/\n/g, ' ').trim(); - - return `- \`${p.name}${formattedType}\`${options.length ? ` (${options.join(', ')})` : ''}${ - p.description ? ` - ${resolveInlineLinks(p.description)}` : '' - }`; - }); - - if (field === 'properties') { - return `**Properties:**\n\n${params.join('\n')}\n`; - } - - return `**Parameters:**\n\n${params.join('\n')}\n`; -} - -function generateTypeDefs(doclets) { - if (!doclets.length) { - return ''; - } - - const res = doclets.map((doclet) => [`## ${doclet.name}`, doclet.description, generateParams(doclet, 'properties'), '\n'] - .filter((d) => !!d) - .join('\n')); - - return res.join('\n'); -} - -const generateFunctionDocletSection = (doclet, isConstructor) => { - const title = doclet.name; - const header = `##${doclet.longname.indexOf('#') !== -1 || isConstructor ? '#' : ''} ${title}${ - isConstructor ? ' Constructor' : '' - }\n`; - const args = - (doclet.params && - doclet.params - .filter((p) => p.name.indexOf('.') === -1) - .map((p) => (p.optional ? `[${p.name}]` : p.name)) - .join(', ')) || - ''; - const signature = `\`${isConstructor ? 'new ' : ''}${doclet.meta.code.name || doclet.name}(${args})\`\n`; - const params = doclet.params ? generateParams(doclet) : ''; - const returns = doclet.returns ? - `**Returns:** ${doclet.returns.map( - (p) => `${p.type ? renderLinks(p) : ''}${p.description ? ` ${resolveInlineLinks(p.description)}` : ''}` - )}` : - ''; - return [header, signature, doclet.description && `${resolveInlineLinks(doclet.description)}\n`, params, returns, '\n'] - .filter((f) => !!f) - .join('\n'); -}; - -const generateClassSection = (doclet) => { - const header = `## ${doclet.name}\n`; - let classSection = [header, (doclet.classdesc || doclet.description).trim(), '\n'].join('\n'); - if (doclet.params && doclet.params.length) { - classSection = classSection.concat(generateFunctionDocletSection(doclet, true)); - } - return classSection; -}; - -const tagValue = (doclet, tagOriginalTitle) => { - const tag = doclet.tags && doclet.tags.find((t) => t.originalTitle === tagOriginalTitle); - return tag && tag.value; -}; - -const generateModuleSection = (doclet) => `# ${doclet.name} - -${doclet.description}\n\n`; - -const generateMarkDown = (doclets, parent) => { - if (!parent) { - const rootModule = doclets.find((d) => d.kind === 'module' && d.description); - return generateModuleSection(rootModule).concat(generateMarkDown(doclets, rootModule)); - } - const children = doclets.filter((d) => d.memberof === parent.longname); - const order = (doclet) => parseInt(tagValue(doclet, 'order'), 10) || 0; - children.sort((a, b) => order(a) - order(b)); - return children - .map((child) => { - if (child.kind === 'class') { - return generateClassSection(child) - .concat(generateMarkDown(doclets, child)) - .concat(generateTypeDefs(typeDefs.filter((td) => td.memberof === child.name))); - } else if (child.kind === 'function' || child.kind === 'member') { - return generateFunctionDocletSection(child); - } - return null; - }) - .filter((markdown) => !!markdown) - .join(''); -}; - -const classNamesFrom = (doclets) => doclets.filter((d) => d.kind === 'class').map((d) => d.name); - -exports.publish = (data, { destination }) => { - knownClassNames = classNamesFrom(data().get()); - typeDefs = data() - .get() - .filter((d) => d.kind === 'typedef'); - - const markDown = generateMarkDown( - data() - .get() - .filter((d) => !d.undocumented && d.kind !== 'typedef') - ); - fs.writeFile(destination, markDown); -}; diff --git a/docs-gen/test/options.json b/docs-gen/test/options.json deleted file mode 100644 index 8aa2a00cbf7aa..0000000000000 --- a/docs-gen/test/options.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "inputFiles": "./test/stubs/src", - "target": "ES5", - "name": "My API", - "readme": "none", - "includes": "./test/stubs/inc/", - "media": "./test/stubs/media/" -} diff --git a/docs-gen/test/stubs/README.md b/docs-gen/test/stubs/README.md deleted file mode 100644 index a380dbf8c1e03..0000000000000 --- a/docs-gen/test/stubs/README.md +++ /dev/null @@ -1 +0,0 @@ -MY README diff --git a/docs-gen/test/stubs/inc/README.md b/docs-gen/test/stubs/inc/README.md deleted file mode 100644 index e845566c06f9b..0000000000000 --- a/docs-gen/test/stubs/inc/README.md +++ /dev/null @@ -1 +0,0 @@ -README diff --git a/docs-gen/test/stubs/inc/class-example.hbs b/docs-gen/test/stubs/inc/class-example.hbs deleted file mode 100755 index ad06e87b1cfe5..0000000000000 --- a/docs-gen/test/stubs/inc/class-example.hbs +++ /dev/null @@ -1 +0,0 @@ -This is a simple example on a handlebars file. \ No newline at end of file diff --git a/docs-gen/test/stubs/inc/class-example.md b/docs-gen/test/stubs/inc/class-example.md deleted file mode 100755 index def04801d72c9..0000000000000 --- a/docs-gen/test/stubs/inc/class-example.md +++ /dev/null @@ -1,5 +0,0 @@ -This is a simple example on how to use include. - -![My image alt text](media://logo.png) - -![My not found image](media://VOID.png) diff --git a/docs-gen/test/stubs/media/logo.png b/docs-gen/test/stubs/media/logo.png deleted file mode 100755 index 120c55ad67864..0000000000000 Binary files a/docs-gen/test/stubs/media/logo.png and /dev/null differ diff --git a/docs-gen/test/stubs/src/categories.ts b/docs-gen/test/stubs/src/categories.ts deleted file mode 100644 index 33778a7dbf0db..0000000000000 --- a/docs-gen/test/stubs/src/categories.ts +++ /dev/null @@ -1,72 +0,0 @@ -/** - * @category CategoryA - */ -let categoryVariableA1 = true; - -/** - * @category CategoryA - */ -let categoryVariableA2 = true; - -/** - * @category CategoryA - */ -function categoryFunctionA1() { - return true; -} - -/** - * @category CategoryA - */ -function categoryFunctionA2() { - return true; -} - -/** - * @category CategoryA - */ -function categoryFunctionA3() { - return true; -} - -/** - * @category CategoryA - */ -interface CategoryInterfaceA1 { - value: string; -} - -/** - * @category CategoryA - */ -interface CategoryInterfaceA2 { - value: string; -} - -/** - * @category CategoryB - */ -let categoryVariableB1 = true; - -/** - * @category CategoryB - */ -function categoryFunctionB2() { - return true; -} - -let otherVariableA = true; - -function otherFunctionA() { - return true; -} - -let otherVariableB = true; - -function otherFunctionB() { - return true; -} - -interface OtherInterfaceA { - value: string; -} diff --git a/docs-gen/test/stubs/src/classes.ts b/docs-gen/test/stubs/src/classes.ts deleted file mode 100644 index 1b9b293842880..0000000000000 --- a/docs-gen/test/stubs/src/classes.ts +++ /dev/null @@ -1,291 +0,0 @@ -/** - * This is a simple interface. - */ -export interface INameInterface { - /** - * This is a interface member of INameInterface. - * - * It should be inherited by all subinterfaces. - */ - name: string; - - /** - * This is a interface function of INameInterface. - * - * It should be inherited by all subinterfaces. - */ - getName(): string; -} - -/** - * This is a simple interface. - */ -export interface IPrintInterface { - /** - * This is a interface function of IPrintInterface - * - * It should be inherited by all subinterfaces. - */ - print(value: string): void; -} - -/** - * This is a interface inheriting from two other interfaces. - */ -export interface IPrintNameInterface extends INameInterface, IPrintInterface { - /** - * This is a interface function of IPrintNameInterface - */ - printName(): void; -} - -/** - * This is a simple base class. - * - * [[include:class-example.md]] - */ -export abstract class BaseClass implements INameInterface { - /** - * This is a simple public member. - */ - public name: string; - - /** - * This is a simple protected member. - */ - protected kind: number; - - /** - * This is a static member. - * - * Static members should not be inherited. - */ - static instance: BaseClass; - static instances: BaseClass[]; - - /** - * This is an instance member of an internal class. - */ - private internalClass: InternalClass; - - constructor(name: string); - constructor(source: BaseClass); - constructor() { - if (arguments.length > 0) { - if (typeof arguments[0] == 'string') { - this.name = arguments[0]; - } else if (arguments[0] instanceof BaseClass) { - this.name = arguments[0].name; - } - } - - this.checkName(); - } - - public abstract abstractMethod(): void; - - /** - * This is a simple member function. - * - * It should be inherited by all subclasses. This class has a static - * member with the same name, both should be documented. - * - * @returns Return the name. - */ - public getName(): string { - return this.name; - } - - /** - * This is a simple static member function. - * - * Static functions should not be inherited. This class has a - * member with the same name, both should be documented. - * - * @returns Return the name. - */ - static getName(): string { - return 'A name'; - } - - /** - * This is a simple member function. - * - * It should be inherited by all subclasses. - * - * @param name The new name. - */ - public setName(name: string) { - this.name = name; - this.checkName(); - } - - /** - * This is a simple fat arrow function. - * - * @param param1 The first parameter needed by this function. - * @param param2 The second parameter needed by this function. - * @see https://github.com/sebastian-lenz/typedoc/issues/37 - */ - public arrowFunction = (param2: string, param1: number): void => {}; - - /** - * This is a private function. - */ - private checkName() { - return true; - } - - /** - * This is a static function. - * - * Static functions should not be inherited. - * - * @returns An instance of BaseClass. - */ - static getInstance(): BaseClass { - return BaseClass.instance; - } - - /** - * @see https://github.com/sebastian-lenz/typedoc/issues/42 - */ - public static caTest( - originalValues: BaseClass, - newRecord: any, - fieldNames: string[], - mandatoryFields: string[], - ): string { - var returnval = ''; - var updates: string[] = []; - var allFields: string[] = fieldNames; - for (var j = 0; j < allFields.length; j++) { - var field = allFields[j]; - var oldValue = originalValues[field]; - var newValue = newRecord[field]; - } - return returnval; - } -} - -/** - * This is an internal class, it is not exported. - */ -class InternalClass { - constructor(options: { name: string }) {} -} - -/** - * This is a class that extends another class. - * - * This class has no own constructor, so its constructor should be inherited - * from BaseClass. - */ -export class SubClassA extends BaseClass implements IPrintNameInterface { - public name: string; - - /** - * This is a simple interface function. - */ - public print(value: string): void {} - - /** - * @inheritdoc - */ - public printName(): void { - this.print(this.getName()); - } - - /** - * Returns the name. See [[BaseClass.name]]. - * - * @returns The return value. - */ - public get nameProperty(): string { - return this.name; - } - - /** - * Sets the name. See [[BaseClass.name]]. - * - * @param value The new name. - * @returns The return value. - */ - public set nameProperty(value: string) { - this.name = value; - } - - /** - * Returns the name. See [[BaseClass.name]]. - * - * @returns The return value. - */ - public get readOnlyNameProperty(): string { - return this.name; - } - - /** - * Sets the name. See [[BaseClass.name]]. - * - * @param value The new name. - * @returns The return value. - */ - public set writeOnlyNameProperty(value: string) { - this.name = value; - } - - public abstractMethod(): void {} -} - -/** - * This is a class that extends another class. - * - * The constructor of the original class should be overwritten. - */ -export class SubClassB extends BaseClass { - public name: string; - - constructor(name: string) { - super(name); - } - - abstractMethod(): void {} - - doSomething(value: [string, SubClassA, SubClassB]) {} -} - -/** - * This is a generic class. - * - * @param T This a type parameter. - */ -export class GenericClass { - public value: T; - - /** - * Constructor short text. - * - * @param p1 Constructor param - * @param p2 Private string property - * @param p3 Public number property - * @param p4 Public implicit any property - * @param p5 Readonly property - */ - constructor(p1, protected p2: T, public p3: number, private p4: number, readonly p5: string) {} - - /** - * @param value [[getValue]] is the counterpart. - */ - public setValue(value: T) { - this.value = value; - } - - public getValue(): T { - return this.value; - } -} - -/** - * This a non generic class derived from a [[GenericClass|generic class]]. - */ -export class NonGenericClass extends GenericClass {} diff --git a/docs-gen/test/stubs/src/comments.ts b/docs-gen/test/stubs/src/comments.ts deleted file mode 100755 index 26e77eb3d13e3..0000000000000 --- a/docs-gen/test/stubs/src/comments.ts +++ /dev/null @@ -1,61 +0,0 @@ -/** - * Additionally you can link to other classes, members or functions using double square brackets. - * - * - Link to an external reflection: [[BaseClass]] - * - Link to an internal reflection: [[commentsInReturn]] - * - Link to an undefined reflection: [[VOID]] - */ - -export let commentsWithSymbolLinks = true; - -/** - * This is an example of include - * - * [[include:class-example.md]] [[include:VOID]] - * - * This is an example of handlebars include - * - * [[include:class-example.hbs]] - */ -export let commentsWithIncludes = true; - -/** - * @name Tag description on same line - * @description - * Tag description on new line - * - Tag description on another line - * - * @deprecated - * Another tag description - */ -export let commentsWithTags = true; - -/** - * Some comments with fence blocks - * ```typescript - * someFunction() - * ``` - * ```js - * anotherFunction() - * ``` - */ -export let commentsWithFencedBlock = true; - -/** - * Comments with a return definition - * @returns Return comments - */ -export function commentsInReturn() { - return; -} - -/** - * See {@linkcode INameInterface} and [INameInterface's name property]{@link INameInterface.name}. - * Also, check out {@link https://www.google.com|Google} and - * {@link https://github.com GitHub}. - * - * Taken from http://usejsdoc.org/tags-inline-link.html. - */ -export function functionWithDocLink() { - return; -} diff --git a/docs-gen/test/stubs/src/destrucuting.ts b/docs-gen/test/stubs/src/destrucuting.ts deleted file mode 100644 index 22d6a99831d88..0000000000000 --- a/docs-gen/test/stubs/src/destrucuting.ts +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Destructuring objects. - */ -const { destructObjectA, destructObjectB, destructObjectC } = { - destructObjectA: 0, - destructObjectB: 'string', - destructObjectC: 0, -}; - -/** - * Destructuring arrays. - */ -const [destructArrayA, destructArrayB, destructArrayC = 10] = [0, 'string', 0]; - -/** - * Array Destructuring with rest - */ -const [destructArrayWithRestA, destructArrayWithRestB, ...destructArrayWithRest] = [1, 2, 3, 4]; - -/** - * Array Destructuring with ignores - */ -const [destructArrayWithIgnoresA, , ...destructArrayWithIgnoresRest] = [1, 2, 3, 4]; - -/** - * Destructuring function parameters. - */ -function drawText({ text = '', location: [x, y] = [0, 0], bold = false }) {} - -interface I { - name: string; -} diff --git a/docs-gen/test/stubs/src/functions.ts b/docs-gen/test/stubs/src/functions.ts deleted file mode 100644 index 438c96cb393d9..0000000000000 --- a/docs-gen/test/stubs/src/functions.ts +++ /dev/null @@ -1,148 +0,0 @@ -export function buildName(firstName: string, lastName?: string) { - if (lastName) { - return firstName + ' ' + lastName; - } else { - return firstName; - } -} - -/** - * This is an internal function. - */ -export function internalFunction(): void {} - -/** - * This is a simple exported function. - */ -export function exportedFunction(): void {} - -/** - * This is a function with multiple arguments and a return value. - * - * @param paramZ - This is a string parameter. - * @param paramG - This is a parameter flagged with any. - * This sentence is placed in the next line. - * - * @param paramA - * This is a **parameter** pointing to an interface. - * - * ~~~ - * const value:BaseClass = new BaseClass('test'); - * functionWithArguments('arg', 0, value); - * ~~~ - * - * @returns This is the return value of the function. - */ -export function functionWithParameters(paramZ: string, paramG: any, paramA: Object): number { - return 0; -} - -/** - * This is a function that is assigned to a variable. - * - * @param someParam This is some numeric parameter. - * @return This is the return value of the function. - */ -export const variableFunction = function(someParam: number): number { - return 0; -}; - -/** - * This is a function with a parameter that is optional. - * - * @param requiredParam A normal parameter. - * @param optionalParam An optional parameter. - */ -export function functionWithOptionalValue(requiredParam: string, optionalParam?: string) {} - -/** - * This is a function with a parameter that has a default value. - * - * @param valueA A parameter with a default string value. - * @param valueB A parameter with a default numeric value. - * @param valueC A parameter with a default NaN value. - * @param valueD A parameter with a default boolean value. - * @param valueE A parameter with a default null value. - * @return This is the return value of the function. - */ -export function functionWithDefaults( - valueA: string = 'defaultValue', - valueB: number = 100, - valueC: number = Number.NaN, - valueD: boolean = true, - valueE: boolean = null, -): string { - return valueA; -} - -/** - * This is a function with rest parameter. - * - * @param rest The rest parameter. - * @return This is the return value of the function. - */ -export function functionWithRest(...rest: string[]): string { - return rest.join(', '); -} - -/** - * This is the first signature of a function with multiple signatures. - * - * @param value The name value. - */ -export function multipleSignatures(value: string): string; - -/** - * This is the second signature of a function with multiple signatures. - * - * @param value An object containing the name value. - * @param value.name A value of the object. - */ -export function multipleSignatures(value: { name: string }): string; - -/** - * This is the actual implementation, this comment will not be visible - * in the generated documentation. - * - * @return This is the return value of the function. - */ -export function multipleSignatures(): string { - if (arguments.length > 0) { - if (typeof arguments[0] == 'object') { - return arguments[0].name; - } else { - return arguments[0]; - } - } - - return ''; -} - -/** - * This is a function that is extended by a module. - * - * @param arg An argument. - */ -export function moduleFunction(arg: string): string { - return ''; -} - -/** - * This is the module extending the function moduleFunction(). - */ -export module moduleFunction { - /** - * This variable is appended to a function. - */ - export let functionVariable: string; - - /** - * This function is appended to another function. - */ - export function append() {} - - /** - * This function is appended to another function. - */ - export function prepend() {} -} diff --git a/docs-gen/test/stubs/src/literals.ts b/docs-gen/test/stubs/src/literals.ts deleted file mode 100644 index 0c615b0376349..0000000000000 --- a/docs-gen/test/stubs/src/literals.ts +++ /dev/null @@ -1,27 +0,0 @@ -const objectLiteral = { - valueZ: 'foo', - valueY: function() { - return 'foo'; - }, - valueX: { - valueZ: 'foo', - valueY: (z: string) => { - return { a: 'test', b: z }; - }, - valueA: [100, 200, 300], - }, - valueA: 100, - valueB: true, -}; - -let typeLiteral: { - valueZ: string; - valueY: { (): string }; - valueX: { - valueZ: string; - valueY: { (z: string): { a: string; b: string } }; - valueA: number[]; - }; - valueA?: number; - valueB?: boolean; -}; diff --git a/docs-gen/test/stubs/src/variables.ts b/docs-gen/test/stubs/src/variables.ts deleted file mode 100644 index a3e8e6b3a8d4e..0000000000000 --- a/docs-gen/test/stubs/src/variables.ts +++ /dev/null @@ -1,15 +0,0 @@ -let isDone: boolean = false; - -let decimal: number = 6; - -let color: string = 'blue'; - -let list: number[] = [1, 2, 3]; - -let x: [string, number]; - -let notSure: any = 4; - -let u: undefined; - -let n: null = null; diff --git a/docs-gen/test/stubs/tsconfig.json b/docs-gen/test/stubs/tsconfig.json deleted file mode 100644 index 6bc28569a0aca..0000000000000 --- a/docs-gen/test/stubs/tsconfig.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "compilerOptions": { - "experimentalDecorators": true, - "lib": ["es2016", "dom"], - "module": "commonjs", - "moduleResolution": "node", - "noImplicitAny": false, - "noUnusedLocals": false, - "noUnusedParameters": false, - "strictNullChecks": false, - "target": "es2015" - } -} diff --git a/docs-gen/tsconfig.json b/docs-gen/tsconfig.json deleted file mode 100644 index a43517fdc041b..0000000000000 --- a/docs-gen/tsconfig.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "compilerOptions": { - "declaration": true, - "experimentalDecorators": true, - "lib": ["es2016", "dom"], - "module": "commonjs", - "moduleResolution": "node", - "noImplicitAny": false, - "noUnusedLocals": false, - "outDir": "./dist", - "removeComments": true, - "sourceMap": true, - "strictNullChecks": false, - "target": "es2015", - "baseUrl": ".", - "paths": { - "react": ["node_modules/@types/react"], - "@cubejs-client/core": ["../packages/cubejs-client-core"] - }, - "skipLibCheck": true - }, - "exclude": ["./dist", "./test", "**/*.spec.ts"] -} diff --git a/docs-gen/tslint.json b/docs-gen/tslint.json deleted file mode 100644 index c50f67a4622dd..0000000000000 --- a/docs-gen/tslint.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "extends": "tslint:recommended", - "rules": { - "quotemark": [true, "single"], - "interface-over-type-literal": false, - "no-string-literal": false, - "no-console": false, - "no-var-requires": false, - "max-line-length": [true, 1140], - "no-submodule-imports": false, - "align": false, - "no-unused-expression": false, - "interface-name": false, - "object-literal-sort-keys": false, - "arrow-parens": false, - "member-ordering": false, - "member-access": false, - "object-literal-key-quotes": false, - "max-classes-per-file": false - } -} diff --git a/docs-gen/yarn.lock b/docs-gen/yarn.lock deleted file mode 100644 index 07c65b030c35b..0000000000000 --- a/docs-gen/yarn.lock +++ /dev/null @@ -1,4163 +0,0 @@ -# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. -# yarn lockfile v1 - - -"@babel/code-frame@^7.0.0", "@babel/code-frame@^7.5.5": - version "7.5.5" - resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.5.5.tgz#bc0782f6d69f7b7d49531219699b988f669a8f9d" - integrity sha512-27d4lZoomVyo51VegxI20xZPuSHusqbQag/ztrBC7wegWoQ1nLREPVSKSW8byhTlzTKyNE4ifaTA6lCp7JjpFw== - dependencies: - "@babel/highlight" "^7.0.0" - -"@babel/code-frame@^7.10.3": - version "7.10.3" - resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.10.3.tgz#324bcfd8d35cd3d47dae18cde63d752086435e9a" - integrity sha512-fDx9eNW0qz0WkUeqL6tXEXzVlPh6Y5aCDEZesl0xBGA8ndRukX91Uk44ZqnkECp01NAZUdCAl+aiQNGi0k88Eg== - dependencies: - "@babel/highlight" "^7.10.3" - -"@babel/core@^7.1.0", "@babel/core@^7.7.5": - version "7.10.3" - resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.10.3.tgz#73b0e8ddeec1e3fdd7a2de587a60e17c440ec77e" - integrity sha512-5YqWxYE3pyhIi84L84YcwjeEgS+fa7ZjK6IBVGTjDVfm64njkR2lfDhVR5OudLk8x2GK59YoSyVv+L/03k1q9w== - dependencies: - "@babel/code-frame" "^7.10.3" - "@babel/generator" "^7.10.3" - "@babel/helper-module-transforms" "^7.10.1" - "@babel/helpers" "^7.10.1" - "@babel/parser" "^7.10.3" - "@babel/template" "^7.10.3" - "@babel/traverse" "^7.10.3" - "@babel/types" "^7.10.3" - convert-source-map "^1.7.0" - debug "^4.1.0" - gensync "^1.0.0-beta.1" - json5 "^2.1.2" - lodash "^4.17.13" - resolve "^1.3.2" - semver "^5.4.1" - source-map "^0.5.0" - -"@babel/generator@^7.10.3": - version "7.10.3" - resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.10.3.tgz#32b9a0d963a71d7a54f5f6c15659c3dbc2a523a5" - integrity sha512-drt8MUHbEqRzNR0xnF8nMehbY11b1SDkRw03PSNH/3Rb2Z35oxkddVSi3rcaak0YJQ86PCuE7Qx1jSFhbLNBMA== - dependencies: - "@babel/types" "^7.10.3" - jsesc "^2.5.1" - lodash "^4.17.13" - source-map "^0.5.0" - -"@babel/generator@^7.6.2": - version "7.6.2" - resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.6.2.tgz#dac8a3c2df118334c2a29ff3446da1636a8f8c03" - integrity sha512-j8iHaIW4gGPnViaIHI7e9t/Hl8qLjERI6DcV9kEpAIDJsAOrcnXqRS7t+QbhL76pwbtqP+QCQLL0z1CyVmtjjQ== - dependencies: - "@babel/types" "^7.6.0" - jsesc "^2.5.1" - lodash "^4.17.13" - source-map "^0.5.0" - -"@babel/helper-function-name@^7.1.0": - version "7.1.0" - resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.1.0.tgz#a0ceb01685f73355d4360c1247f582bfafc8ff53" - integrity sha512-A95XEoCpb3TO+KZzJ4S/5uW5fNe26DjBGqf1o9ucyLyCmi1dXq/B3c8iaWTfBk3VvetUxl16e8tIrd5teOCfGw== - dependencies: - "@babel/helper-get-function-arity" "^7.0.0" - "@babel/template" "^7.1.0" - "@babel/types" "^7.0.0" - -"@babel/helper-function-name@^7.10.3": - version "7.10.3" - resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.10.3.tgz#79316cd75a9fa25ba9787ff54544307ed444f197" - integrity sha512-FvSj2aiOd8zbeqijjgqdMDSyxsGHaMt5Tr0XjQsGKHD3/1FP3wksjnLAWzxw7lvXiej8W1Jt47SKTZ6upQNiRw== - dependencies: - "@babel/helper-get-function-arity" "^7.10.3" - "@babel/template" "^7.10.3" - "@babel/types" "^7.10.3" - -"@babel/helper-get-function-arity@^7.0.0": - version "7.0.0" - resolved "https://registry.yarnpkg.com/@babel/helper-get-function-arity/-/helper-get-function-arity-7.0.0.tgz#83572d4320e2a4657263734113c42868b64e49c3" - integrity sha512-r2DbJeg4svYvt3HOS74U4eWKsUAMRH01Z1ds1zx8KNTPtpTL5JAsdFv8BNyOpVqdFhHkkRDIg5B4AsxmkjAlmQ== - dependencies: - "@babel/types" "^7.0.0" - -"@babel/helper-get-function-arity@^7.10.3": - version "7.10.3" - resolved "https://registry.yarnpkg.com/@babel/helper-get-function-arity/-/helper-get-function-arity-7.10.3.tgz#3a28f7b28ccc7719eacd9223b659fdf162e4c45e" - integrity sha512-iUD/gFsR+M6uiy69JA6fzM5seno8oE85IYZdbVVEuQaZlEzMO2MXblh+KSPJgsZAUx0EEbWXU0yJaW7C9CdAVg== - dependencies: - "@babel/types" "^7.10.3" - -"@babel/helper-member-expression-to-functions@^7.10.1": - version "7.10.3" - resolved "https://registry.yarnpkg.com/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.10.3.tgz#bc3663ac81ac57c39148fef4c69bf48a77ba8dd6" - integrity sha512-q7+37c4EPLSjNb2NmWOjNwj0+BOyYlssuQ58kHEWk1Z78K5i8vTUsteq78HMieRPQSl/NtpQyJfdjt3qZ5V2vw== - dependencies: - "@babel/types" "^7.10.3" - -"@babel/helper-module-imports@^7.10.1": - version "7.10.3" - resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.10.3.tgz#766fa1d57608e53e5676f23ae498ec7a95e1b11a" - integrity sha512-Jtqw5M9pahLSUWA+76nhK9OG8nwYXzhQzVIGFoNaHnXF/r4l7kz4Fl0UAW7B6mqC5myoJiBP5/YQlXQTMfHI9w== - dependencies: - "@babel/types" "^7.10.3" - -"@babel/helper-module-transforms@^7.10.1": - version "7.10.1" - resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.10.1.tgz#24e2f08ee6832c60b157bb0936c86bef7210c622" - integrity sha512-RLHRCAzyJe7Q7sF4oy2cB+kRnU4wDZY/H2xJFGof+M+SJEGhZsb+GFj5j1AD8NiSaVBJ+Pf0/WObiXu/zxWpFg== - dependencies: - "@babel/helper-module-imports" "^7.10.1" - "@babel/helper-replace-supers" "^7.10.1" - "@babel/helper-simple-access" "^7.10.1" - "@babel/helper-split-export-declaration" "^7.10.1" - "@babel/template" "^7.10.1" - "@babel/types" "^7.10.1" - lodash "^4.17.13" - -"@babel/helper-optimise-call-expression@^7.10.1": - version "7.10.3" - resolved "https://registry.yarnpkg.com/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.10.3.tgz#f53c4b6783093195b0f69330439908841660c530" - integrity sha512-kT2R3VBH/cnSz+yChKpaKRJQJWxdGoc6SjioRId2wkeV3bK0wLLioFpJROrX0U4xr/NmxSSAWT/9Ih5snwIIzg== - dependencies: - "@babel/types" "^7.10.3" - -"@babel/helper-plugin-utils@^7.0.0": - version "7.0.0" - resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.0.0.tgz#bbb3fbee98661c569034237cc03967ba99b4f250" - integrity sha512-CYAOUCARwExnEixLdB6sDm2dIJ/YgEAKDM1MOeMeZu9Ld/bDgVo8aiWrXwcY7OBh+1Ea2uUcVRcxKk0GJvW7QA== - -"@babel/helper-plugin-utils@^7.10.1", "@babel/helper-plugin-utils@^7.8.0": - version "7.10.3" - resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.10.3.tgz#aac45cccf8bc1873b99a85f34bceef3beb5d3244" - integrity sha512-j/+j8NAWUTxOtx4LKHybpSClxHoq6I91DQ/mKgAXn5oNUPIUiGppjPIX3TDtJWPrdfP9Kfl7e4fgVMiQR9VE/g== - -"@babel/helper-replace-supers@^7.10.1": - version "7.10.1" - resolved "https://registry.yarnpkg.com/@babel/helper-replace-supers/-/helper-replace-supers-7.10.1.tgz#ec6859d20c5d8087f6a2dc4e014db7228975f13d" - integrity sha512-SOwJzEfpuQwInzzQJGjGaiG578UYmyi2Xw668klPWV5n07B73S0a9btjLk/52Mlcxa+5AdIYqws1KyXRfMoB7A== - dependencies: - "@babel/helper-member-expression-to-functions" "^7.10.1" - "@babel/helper-optimise-call-expression" "^7.10.1" - "@babel/traverse" "^7.10.1" - "@babel/types" "^7.10.1" - -"@babel/helper-simple-access@^7.10.1": - version "7.10.1" - resolved "https://registry.yarnpkg.com/@babel/helper-simple-access/-/helper-simple-access-7.10.1.tgz#08fb7e22ace9eb8326f7e3920a1c2052f13d851e" - integrity sha512-VSWpWzRzn9VtgMJBIWTZ+GP107kZdQ4YplJlCmIrjoLVSi/0upixezHCDG8kpPVTBJpKfxTH01wDhh+jS2zKbw== - dependencies: - "@babel/template" "^7.10.1" - "@babel/types" "^7.10.1" - -"@babel/helper-split-export-declaration@^7.10.1": - version "7.10.1" - resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.10.1.tgz#c6f4be1cbc15e3a868e4c64a17d5d31d754da35f" - integrity sha512-UQ1LVBPrYdbchNhLwj6fetj46BcFwfS4NllJo/1aJsT+1dLTEnXJL0qHqtY7gPzF8S2fXBJamf1biAXV3X077g== - dependencies: - "@babel/types" "^7.10.1" - -"@babel/helper-split-export-declaration@^7.4.4": - version "7.4.4" - resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.4.4.tgz#ff94894a340be78f53f06af038b205c49d993677" - integrity sha512-Ro/XkzLf3JFITkW6b+hNxzZ1n5OQ80NvIUdmHspih1XAhtN3vPTuUFT4eQnela+2MaZ5ulH+iyP513KJrxbN7Q== - dependencies: - "@babel/types" "^7.4.4" - -"@babel/helper-validator-identifier@^7.10.3": - version "7.10.3" - resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.10.3.tgz#60d9847f98c4cea1b279e005fdb7c28be5412d15" - integrity sha512-bU8JvtlYpJSBPuj1VUmKpFGaDZuLxASky3LhaKj3bmpSTY6VWooSM8msk+Z0CZoErFye2tlABF6yDkT3FOPAXw== - -"@babel/helpers@^7.10.1": - version "7.10.1" - resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.10.1.tgz#a6827b7cb975c9d9cef5fd61d919f60d8844a973" - integrity sha512-muQNHF+IdU6wGgkaJyhhEmI54MOZBKsFfsXFhboz1ybwJ1Kl7IHlbm2a++4jwrmY5UYsgitt5lfqo1wMFcHmyw== - dependencies: - "@babel/template" "^7.10.1" - "@babel/traverse" "^7.10.1" - "@babel/types" "^7.10.1" - -"@babel/highlight@^7.0.0": - version "7.5.0" - resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.5.0.tgz#56d11312bd9248fa619591d02472be6e8cb32540" - integrity sha512-7dV4eu9gBxoM0dAnj/BCFDW9LFU0zvTrkq0ugM7pnHEgguOEeOz1so2ZghEdzviYzQEED0r4EAgpsBChKy1TRQ== - dependencies: - chalk "^2.0.0" - esutils "^2.0.2" - js-tokens "^4.0.0" - -"@babel/highlight@^7.10.3": - version "7.10.3" - resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.10.3.tgz#c633bb34adf07c5c13156692f5922c81ec53f28d" - integrity sha512-Ih9B/u7AtgEnySE2L2F0Xm0GaM729XqqLfHkalTsbjXGyqmf/6M0Cu0WpvqueUlW+xk88BHw9Nkpj49naU+vWw== - dependencies: - "@babel/helper-validator-identifier" "^7.10.3" - chalk "^2.0.0" - js-tokens "^4.0.0" - -"@babel/parser@^7.1.0", "@babel/parser@^7.10.3": - version "7.10.3" - resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.10.3.tgz#7e71d892b0d6e7d04a1af4c3c79d72c1f10f5315" - integrity sha512-oJtNJCMFdIMwXGmx+KxuaD7i3b8uS7TTFYW/FNG2BT8m+fmGHoiPYoH0Pe3gya07WuFmM5FCDIr1x0irkD/hyA== - -"@babel/parser@^7.6.0", "@babel/parser@^7.6.2": - version "7.6.2" - resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.6.2.tgz#205e9c95e16ba3b8b96090677a67c9d6075b70a1" - integrity sha512-mdFqWrSPCmikBoaBYMuBulzTIKuXVPtEISFbRRVNwMWpCms/hmE2kRq0bblUHaNRKrjRlmVbx1sDHmjmRgD2Xg== - -"@babel/plugin-syntax-async-generators@^7.8.4": - version "7.8.4" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz#a983fb1aeb2ec3f6ed042a210f640e90e786fe0d" - integrity sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw== - dependencies: - "@babel/helper-plugin-utils" "^7.8.0" - -"@babel/plugin-syntax-bigint@^7.8.3": - version "7.8.3" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-bigint/-/plugin-syntax-bigint-7.8.3.tgz#4c9a6f669f5d0cdf1b90a1671e9a146be5300cea" - integrity sha512-wnTnFlG+YxQm3vDxpGE57Pj0srRU4sHE/mDkt1qv2YJJSeUAec2ma4WLUnUPeKjyrfntVwe/N6dCXpU+zL3Npg== - dependencies: - "@babel/helper-plugin-utils" "^7.8.0" - -"@babel/plugin-syntax-class-properties@^7.8.3": - version "7.10.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.10.1.tgz#d5bc0645913df5b17ad7eda0fa2308330bde34c5" - integrity sha512-Gf2Yx/iRs1JREDtVZ56OrjjgFHCaldpTnuy9BHla10qyVT3YkIIGEtoDWhyop0ksu1GvNjHIoYRBqm3zoR1jyQ== - dependencies: - "@babel/helper-plugin-utils" "^7.10.1" - -"@babel/plugin-syntax-import-meta@^7.8.3": - version "7.10.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.1.tgz#3e59120ed8b3c2ccc5abb1cfc7aaa3ea01cd36b6" - integrity sha512-ypC4jwfIVF72og0dgvEcFRdOM2V9Qm1tu7RGmdZOlhsccyK0wisXmMObGuWEOd5jQ+K9wcIgSNftCpk2vkjUfQ== - dependencies: - "@babel/helper-plugin-utils" "^7.10.1" - -"@babel/plugin-syntax-json-strings@^7.8.3": - version "7.8.3" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz#01ca21b668cd8218c9e640cb6dd88c5412b2c96a" - integrity sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA== - dependencies: - "@babel/helper-plugin-utils" "^7.8.0" - -"@babel/plugin-syntax-logical-assignment-operators@^7.8.3": - version "7.10.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.1.tgz#fffee77b4934ce77f3b427649ecdddbec1958550" - integrity sha512-XyHIFa9kdrgJS91CUH+ccPVTnJShr8nLGc5bG2IhGXv5p1Rd+8BleGE5yzIg2Nc1QZAdHDa0Qp4m6066OL96Iw== - dependencies: - "@babel/helper-plugin-utils" "^7.10.1" - -"@babel/plugin-syntax-nullish-coalescing-operator@^7.8.3": - version "7.8.3" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz#167ed70368886081f74b5c36c65a88c03b66d1a9" - integrity sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ== - dependencies: - "@babel/helper-plugin-utils" "^7.8.0" - -"@babel/plugin-syntax-numeric-separator@^7.8.3": - version "7.10.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.1.tgz#25761ee7410bc8cf97327ba741ee94e4a61b7d99" - integrity sha512-uTd0OsHrpe3tH5gRPTxG8Voh99/WCU78vIm5NMRYPAqC8lR4vajt6KkCAknCHrx24vkPdd/05yfdGSB4EIY2mg== - dependencies: - "@babel/helper-plugin-utils" "^7.10.1" - -"@babel/plugin-syntax-object-rest-spread@^7.8.3": - version "7.8.3" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz#60e225edcbd98a640332a2e72dd3e66f1af55871" - integrity sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA== - dependencies: - "@babel/helper-plugin-utils" "^7.8.0" - -"@babel/plugin-syntax-optional-catch-binding@^7.8.3": - version "7.8.3" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz#6111a265bcfb020eb9efd0fdfd7d26402b9ed6c1" - integrity sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q== - dependencies: - "@babel/helper-plugin-utils" "^7.8.0" - -"@babel/plugin-syntax-optional-chaining@^7.8.3": - version "7.8.3" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz#4f69c2ab95167e0180cd5336613f8c5788f7d48a" - integrity sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg== - dependencies: - "@babel/helper-plugin-utils" "^7.8.0" - -"@babel/template@^7.1.0": - version "7.6.0" - resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.6.0.tgz#7f0159c7f5012230dad64cca42ec9bdb5c9536e6" - integrity sha512-5AEH2EXD8euCk446b7edmgFdub/qfH1SN6Nii3+fyXP807QRx9Q73A2N5hNwRRslC2H9sNzaFhsPubkS4L8oNQ== - dependencies: - "@babel/code-frame" "^7.0.0" - "@babel/parser" "^7.6.0" - "@babel/types" "^7.6.0" - -"@babel/template@^7.10.1", "@babel/template@^7.10.3", "@babel/template@^7.3.3": - version "7.10.3" - resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.10.3.tgz#4d13bc8e30bf95b0ce9d175d30306f42a2c9a7b8" - integrity sha512-5BjI4gdtD+9fHZUsaxPHPNpwa+xRkDO7c7JbhYn2afvrkDu5SfAAbi9AIMXw2xEhO/BR35TqiW97IqNvCo/GqA== - dependencies: - "@babel/code-frame" "^7.10.3" - "@babel/parser" "^7.10.3" - "@babel/types" "^7.10.3" - -"@babel/traverse@^7.1.0": - version "7.6.2" - resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.6.2.tgz#b0e2bfd401d339ce0e6c05690206d1e11502ce2c" - integrity sha512-8fRE76xNwNttVEF2TwxJDGBLWthUkHWSldmfuBzVRmEDWOtu4XdINTgN7TDWzuLg4bbeIMLvfMFD9we5YcWkRQ== - dependencies: - "@babel/code-frame" "^7.5.5" - "@babel/generator" "^7.6.2" - "@babel/helper-function-name" "^7.1.0" - "@babel/helper-split-export-declaration" "^7.4.4" - "@babel/parser" "^7.6.2" - "@babel/types" "^7.6.0" - debug "^4.1.0" - globals "^11.1.0" - lodash "^4.17.13" - -"@babel/traverse@^7.10.1", "@babel/traverse@^7.10.3": - version "7.10.3" - resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.10.3.tgz#0b01731794aa7b77b214bcd96661f18281155d7e" - integrity sha512-qO6623eBFhuPm0TmmrUFMT1FulCmsSeJuVGhiLodk2raUDFhhTECLd9E9jC4LBIWziqt4wgF6KuXE4d+Jz9yug== - dependencies: - "@babel/code-frame" "^7.10.3" - "@babel/generator" "^7.10.3" - "@babel/helper-function-name" "^7.10.3" - "@babel/helper-split-export-declaration" "^7.10.1" - "@babel/parser" "^7.10.3" - "@babel/types" "^7.10.3" - debug "^4.1.0" - globals "^11.1.0" - lodash "^4.17.13" - -"@babel/types@^7.0.0", "@babel/types@^7.3.0", "@babel/types@^7.4.4", "@babel/types@^7.6.0": - version "7.6.1" - resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.6.1.tgz#53abf3308add3ac2a2884d539151c57c4b3ac648" - integrity sha512-X7gdiuaCmA0uRjCmRtYJNAVCc/q+5xSgsfKJHqMN4iNLILX39677fJE1O40arPMh0TTtS9ItH67yre6c7k6t0g== - dependencies: - esutils "^2.0.2" - lodash "^4.17.13" - to-fast-properties "^2.0.0" - -"@babel/types@^7.10.1", "@babel/types@^7.10.3", "@babel/types@^7.3.3": - version "7.10.3" - resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.10.3.tgz#6535e3b79fea86a6b09e012ea8528f935099de8e" - integrity sha512-nZxaJhBXBQ8HVoIcGsf9qWep3Oh3jCENK54V4mRF7qaJabVsAYdbTtmSD8WmAp1R6ytPiu5apMwSXyxB1WlaBA== - dependencies: - "@babel/helper-validator-identifier" "^7.10.3" - lodash "^4.17.13" - to-fast-properties "^2.0.0" - -"@bcoe/v8-coverage@^0.2.3": - version "0.2.3" - resolved "https://registry.yarnpkg.com/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz#75a2e8b51cb758a7553d6804a5932d7aace75c39" - integrity sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw== - -"@cnakazawa/watch@^1.0.3": - version "1.0.4" - resolved "https://registry.yarnpkg.com/@cnakazawa/watch/-/watch-1.0.4.tgz#f864ae85004d0fcab6f50be9141c4da368d1656a" - integrity sha512-v9kIhKwjeZThiWrLmj0y17CWoyddASLj9O2yvbZkbvw/N3rWOYy9zkV66ursAoVr0mV15bL8g0c4QZUE6cdDoQ== - dependencies: - exec-sh "^0.3.2" - minimist "^1.2.0" - -"@istanbuljs/load-nyc-config@^1.0.0": - version "1.1.0" - resolved "https://registry.yarnpkg.com/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz#fd3db1d59ecf7cf121e80650bb86712f9b55eced" - integrity sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ== - dependencies: - camelcase "^5.3.1" - find-up "^4.1.0" - get-package-type "^0.1.0" - js-yaml "^3.13.1" - resolve-from "^5.0.0" - -"@istanbuljs/schema@^0.1.2": - version "0.1.2" - resolved "https://registry.yarnpkg.com/@istanbuljs/schema/-/schema-0.1.2.tgz#26520bf09abe4a5644cd5414e37125a8954241dd" - integrity sha512-tsAQNx32a8CoFhjhijUIhI4kccIAgmGhy8LZMZgGfmXcpMbPRUqn5LWmgRttILi6yeGmBJd2xsPkFMs0PzgPCw== - -"@jest/console@^26.1.0": - version "26.1.0" - resolved "https://registry.yarnpkg.com/@jest/console/-/console-26.1.0.tgz#f67c89e4f4d04dbcf7b052aed5ab9c74f915b954" - integrity sha512-+0lpTHMd/8pJp+Nd4lyip+/Iyf2dZJvcCqrlkeZQoQid+JlThA4M9vxHtheyrQ99jJTMQam+es4BcvZ5W5cC3A== - dependencies: - "@jest/types" "^26.1.0" - chalk "^4.0.0" - jest-message-util "^26.1.0" - jest-util "^26.1.0" - slash "^3.0.0" - -"@jest/core@^26.1.0": - version "26.1.0" - resolved "https://registry.yarnpkg.com/@jest/core/-/core-26.1.0.tgz#4580555b522de412a7998b3938c851e4f9da1c18" - integrity sha512-zyizYmDJOOVke4OO/De//aiv8b07OwZzL2cfsvWF3q9YssfpcKfcnZAwDY8f+A76xXSMMYe8i/f/LPocLlByfw== - dependencies: - "@jest/console" "^26.1.0" - "@jest/reporters" "^26.1.0" - "@jest/test-result" "^26.1.0" - "@jest/transform" "^26.1.0" - "@jest/types" "^26.1.0" - ansi-escapes "^4.2.1" - chalk "^4.0.0" - exit "^0.1.2" - graceful-fs "^4.2.4" - jest-changed-files "^26.1.0" - jest-config "^26.1.0" - jest-haste-map "^26.1.0" - jest-message-util "^26.1.0" - jest-regex-util "^26.0.0" - jest-resolve "^26.1.0" - jest-resolve-dependencies "^26.1.0" - jest-runner "^26.1.0" - jest-runtime "^26.1.0" - jest-snapshot "^26.1.0" - jest-util "^26.1.0" - jest-validate "^26.1.0" - jest-watcher "^26.1.0" - micromatch "^4.0.2" - p-each-series "^2.1.0" - rimraf "^3.0.0" - slash "^3.0.0" - strip-ansi "^6.0.0" - -"@jest/environment@^26.1.0": - version "26.1.0" - resolved "https://registry.yarnpkg.com/@jest/environment/-/environment-26.1.0.tgz#378853bcdd1c2443b4555ab908cfbabb851e96da" - integrity sha512-86+DNcGongbX7ai/KE/S3/NcUVZfrwvFzOOWX/W+OOTvTds7j07LtC+MgGydH5c8Ri3uIrvdmVgd1xFD5zt/xA== - dependencies: - "@jest/fake-timers" "^26.1.0" - "@jest/types" "^26.1.0" - jest-mock "^26.1.0" - -"@jest/fake-timers@^26.1.0": - version "26.1.0" - resolved "https://registry.yarnpkg.com/@jest/fake-timers/-/fake-timers-26.1.0.tgz#9a76b7a94c351cdbc0ad53e5a748789f819a65fe" - integrity sha512-Y5F3kBVWxhau3TJ825iuWy++BAuQzK/xEa+wD9vDH3RytW9f2DbMVodfUQC54rZDX3POqdxCgcKdgcOL0rYUpA== - dependencies: - "@jest/types" "^26.1.0" - "@sinonjs/fake-timers" "^6.0.1" - jest-message-util "^26.1.0" - jest-mock "^26.1.0" - jest-util "^26.1.0" - -"@jest/globals@^26.1.0": - version "26.1.0" - resolved "https://registry.yarnpkg.com/@jest/globals/-/globals-26.1.0.tgz#6cc5d7cbb79b76b120f2403d7d755693cf063ab1" - integrity sha512-MKiHPNaT+ZoG85oMaYUmGHEqu98y3WO2yeIDJrs2sJqHhYOy3Z6F7F/luzFomRQ8SQ1wEkmahFAz2291Iv8EAw== - dependencies: - "@jest/environment" "^26.1.0" - "@jest/types" "^26.1.0" - expect "^26.1.0" - -"@jest/reporters@^26.1.0": - version "26.1.0" - resolved "https://registry.yarnpkg.com/@jest/reporters/-/reporters-26.1.0.tgz#08952e90c90282e14ff49e927bdf1873617dae78" - integrity sha512-SVAysur9FOIojJbF4wLP0TybmqwDkdnFxHSPzHMMIYyBtldCW9gG+Q5xWjpMFyErDiwlRuPyMSJSU64A67Pazg== - dependencies: - "@bcoe/v8-coverage" "^0.2.3" - "@jest/console" "^26.1.0" - "@jest/test-result" "^26.1.0" - "@jest/transform" "^26.1.0" - "@jest/types" "^26.1.0" - chalk "^4.0.0" - collect-v8-coverage "^1.0.0" - exit "^0.1.2" - glob "^7.1.2" - graceful-fs "^4.2.4" - istanbul-lib-coverage "^3.0.0" - istanbul-lib-instrument "^4.0.3" - istanbul-lib-report "^3.0.0" - istanbul-lib-source-maps "^4.0.0" - istanbul-reports "^3.0.2" - jest-haste-map "^26.1.0" - jest-resolve "^26.1.0" - jest-util "^26.1.0" - jest-worker "^26.1.0" - slash "^3.0.0" - source-map "^0.6.0" - string-length "^4.0.1" - terminal-link "^2.0.0" - v8-to-istanbul "^4.1.3" - optionalDependencies: - node-notifier "^7.0.0" - -"@jest/source-map@^26.1.0": - version "26.1.0" - resolved "https://registry.yarnpkg.com/@jest/source-map/-/source-map-26.1.0.tgz#a6a020d00e7d9478f4b690167c5e8b77e63adb26" - integrity sha512-XYRPYx4eEVX15cMT9mstnO7hkHP3krNtKfxUYd8L7gbtia8JvZZ6bMzSwa6IQJENbudTwKMw5R1BePRD+bkEmA== - dependencies: - callsites "^3.0.0" - graceful-fs "^4.2.4" - source-map "^0.6.0" - -"@jest/test-result@^26.1.0": - version "26.1.0" - resolved "https://registry.yarnpkg.com/@jest/test-result/-/test-result-26.1.0.tgz#a93fa15b21ad3c7ceb21c2b4c35be2e407d8e971" - integrity sha512-Xz44mhXph93EYMA8aYDz+75mFbarTV/d/x0yMdI3tfSRs/vh4CqSxgzVmCps1fPkHDCtn0tU8IH9iCKgGeGpfw== - dependencies: - "@jest/console" "^26.1.0" - "@jest/types" "^26.1.0" - "@types/istanbul-lib-coverage" "^2.0.0" - collect-v8-coverage "^1.0.0" - -"@jest/test-sequencer@^26.1.0": - version "26.1.0" - resolved "https://registry.yarnpkg.com/@jest/test-sequencer/-/test-sequencer-26.1.0.tgz#41a6fc8b850c3f33f48288ea9ea517c047e7f14e" - integrity sha512-Z/hcK+rTq56E6sBwMoQhSRDVjqrGtj1y14e2bIgcowARaIE1SgOanwx6gvY4Q9gTKMoZQXbXvptji+q5GYxa6Q== - dependencies: - "@jest/test-result" "^26.1.0" - graceful-fs "^4.2.4" - jest-haste-map "^26.1.0" - jest-runner "^26.1.0" - jest-runtime "^26.1.0" - -"@jest/transform@^26.1.0": - version "26.1.0" - resolved "https://registry.yarnpkg.com/@jest/transform/-/transform-26.1.0.tgz#697f48898c2a2787c9b4cb71d09d7e617464e509" - integrity sha512-ICPm6sUXmZJieq45ix28k0s+d/z2E8CHDsq+WwtWI6kW8m7I8kPqarSEcUN86entHQ570ZBRci5OWaKL0wlAWw== - dependencies: - "@babel/core" "^7.1.0" - "@jest/types" "^26.1.0" - babel-plugin-istanbul "^6.0.0" - chalk "^4.0.0" - convert-source-map "^1.4.0" - fast-json-stable-stringify "^2.0.0" - graceful-fs "^4.2.4" - jest-haste-map "^26.1.0" - jest-regex-util "^26.0.0" - jest-util "^26.1.0" - micromatch "^4.0.2" - pirates "^4.0.1" - slash "^3.0.0" - source-map "^0.6.1" - write-file-atomic "^3.0.0" - -"@jest/types@^25.5.0": - version "25.5.0" - resolved "https://registry.yarnpkg.com/@jest/types/-/types-25.5.0.tgz#4d6a4793f7b9599fc3680877b856a97dbccf2a9d" - integrity sha512-OXD0RgQ86Tu3MazKo8bnrkDRaDXXMGUqd+kTtLtK1Zb7CRzQcaSRPPPV37SvYTdevXEBVxe0HXylEjs8ibkmCw== - dependencies: - "@types/istanbul-lib-coverage" "^2.0.0" - "@types/istanbul-reports" "^1.1.1" - "@types/yargs" "^15.0.0" - chalk "^3.0.0" - -"@jest/types@^26.1.0": - version "26.1.0" - resolved "https://registry.yarnpkg.com/@jest/types/-/types-26.1.0.tgz#f8afaaaeeb23b5cad49dd1f7779689941dcb6057" - integrity sha512-GXigDDsp6ZlNMhXQDeuy/iYCDsRIHJabWtDzvnn36+aqFfG14JmFV0e/iXxY4SP9vbXSiPNOWdehU5MeqrYHBQ== - dependencies: - "@types/istanbul-lib-coverage" "^2.0.0" - "@types/istanbul-reports" "^1.1.1" - "@types/yargs" "^15.0.0" - chalk "^4.0.0" - -"@sinonjs/commons@^1.7.0": - version "1.8.0" - resolved "https://registry.yarnpkg.com/@sinonjs/commons/-/commons-1.8.0.tgz#c8d68821a854c555bba172f3b06959a0039b236d" - integrity sha512-wEj54PfsZ5jGSwMX68G8ZXFawcSglQSXqCftWX3ec8MDUzQdHgcKvw97awHbY0efQEL5iKUOAmmVtoYgmrSG4Q== - dependencies: - type-detect "4.0.8" - -"@sinonjs/fake-timers@^6.0.1": - version "6.0.1" - resolved "https://registry.yarnpkg.com/@sinonjs/fake-timers/-/fake-timers-6.0.1.tgz#293674fccb3262ac782c7aadfdeca86b10c75c40" - integrity sha512-MZPUxrmFubI36XS1DI3qmI0YdN1gks62JtFZvxR67ljjSNCeK6U08Zx4msEWOXuofgqUt6zPHSi1H9fbjR/NRA== - dependencies: - "@sinonjs/commons" "^1.7.0" - -"@types/babel__core@^7.0.0", "@types/babel__core@^7.1.7": - version "7.1.9" - resolved "https://registry.yarnpkg.com/@types/babel__core/-/babel__core-7.1.9.tgz#77e59d438522a6fb898fa43dc3455c6e72f3963d" - integrity sha512-sY2RsIJ5rpER1u3/aQ8OFSI7qGIy8o1NEEbgb2UaJcvOtXOMpd39ko723NBpjQFg9SIX7TXtjejZVGeIMLhoOw== - dependencies: - "@babel/parser" "^7.1.0" - "@babel/types" "^7.0.0" - "@types/babel__generator" "*" - "@types/babel__template" "*" - "@types/babel__traverse" "*" - -"@types/babel__generator@*": - version "7.6.1" - resolved "https://registry.yarnpkg.com/@types/babel__generator/-/babel__generator-7.6.1.tgz#4901767b397e8711aeb99df8d396d7ba7b7f0e04" - integrity sha512-bBKm+2VPJcMRVwNhxKu8W+5/zT7pwNEqeokFOmbvVSqGzFneNxYcEBro9Ac7/N9tlsaPYnZLK8J1LWKkMsLAew== - dependencies: - "@babel/types" "^7.0.0" - -"@types/babel__template@*": - version "7.0.2" - resolved "https://registry.yarnpkg.com/@types/babel__template/-/babel__template-7.0.2.tgz#4ff63d6b52eddac1de7b975a5223ed32ecea9307" - integrity sha512-/K6zCpeW7Imzgab2bLkLEbz0+1JlFSrUMdw7KoIIu+IUdu51GWaBZpd3y1VXGVXzynvGa4DaIaxNZHiON3GXUg== - dependencies: - "@babel/parser" "^7.1.0" - "@babel/types" "^7.0.0" - -"@types/babel__traverse@*", "@types/babel__traverse@^7.0.6": - version "7.0.12" - resolved "https://registry.yarnpkg.com/@types/babel__traverse/-/babel__traverse-7.0.12.tgz#22f49a028e69465390f87bb103ebd61bd086b8f5" - integrity sha512-t4CoEokHTfcyfb4hUaF9oOHu9RmmNWnm1CP0YmMqOOfClKascOmvlEM736vlqeScuGvBDsHkf8R2INd4DWreQA== - dependencies: - "@babel/types" "^7.3.0" - -"@types/color-name@^1.1.1": - version "1.1.1" - resolved "https://registry.yarnpkg.com/@types/color-name/-/color-name-1.1.1.tgz#1c1261bbeaa10a8055bbc5d8ab84b7b2afc846a0" - integrity sha512-rr+OQyAjxze7GgWrSaJwydHStIhHq2lvY3BOC2Mj7KnzI7XK0Uw1TOOdI9lDoajEbSWLiYgoo4f1R51erQfhPQ== - -"@types/fs-extra@^9.0.1": - version "9.0.1" - resolved "https://registry.yarnpkg.com/@types/fs-extra/-/fs-extra-9.0.1.tgz#91c8fc4c51f6d5dbe44c2ca9ab09310bd00c7918" - integrity sha512-B42Sxuaz09MhC3DDeW5kubRcQ5by4iuVQ0cRRWM2lggLzAa/KVom0Aft/208NgMvNQQZ86s5rVcqDdn/SH0/mg== - dependencies: - "@types/node" "*" - -"@types/graceful-fs@^4.1.2": - version "4.1.3" - resolved "https://registry.yarnpkg.com/@types/graceful-fs/-/graceful-fs-4.1.3.tgz#039af35fe26bec35003e8d86d2ee9c586354348f" - integrity sha512-AiHRaEB50LQg0pZmm659vNBb9f4SJ0qrAnteuzhSeAUcJKxoYgEnprg/83kppCnc2zvtCKbdZry1a5pVY3lOTQ== - dependencies: - "@types/node" "*" - -"@types/istanbul-lib-coverage@*", "@types/istanbul-lib-coverage@^2.0.0", "@types/istanbul-lib-coverage@^2.0.1": - version "2.0.3" - resolved "https://registry.yarnpkg.com/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.3.tgz#4ba8ddb720221f432e443bd5f9117fd22cfd4762" - integrity sha512-sz7iLqvVUg1gIedBOvlkxPlc8/uVzyS5OwGz1cKjXzkl3FpL3al0crU8YGU1WoHkxn0Wxbw5tyi6hvzJKNzFsw== - -"@types/istanbul-lib-report@*": - version "3.0.0" - resolved "https://registry.yarnpkg.com/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.0.tgz#c14c24f18ea8190c118ee7562b7ff99a36552686" - integrity sha512-plGgXAPfVKFoYfa9NpYDAkseG+g6Jr294RqeqcqDixSbU34MZVJRi/P+7Y8GDpzkEwLaGZZOpKIEmeVZNtKsrg== - dependencies: - "@types/istanbul-lib-coverage" "*" - -"@types/istanbul-reports@^1.1.1": - version "1.1.2" - resolved "https://registry.yarnpkg.com/@types/istanbul-reports/-/istanbul-reports-1.1.2.tgz#e875cc689e47bce549ec81f3df5e6f6f11cfaeb2" - integrity sha512-P/W9yOX/3oPZSpaYOCQzGqgCQRXn0FFO/V8bWrCQs+wLmvVVxk6CRBXALEvNs9OHIatlnlFokfhuDo2ug01ciw== - dependencies: - "@types/istanbul-lib-coverage" "*" - "@types/istanbul-lib-report" "*" - -"@types/jest@^25.2.3": - version "25.2.3" - resolved "https://registry.yarnpkg.com/@types/jest/-/jest-25.2.3.tgz#33d27e4c4716caae4eced355097a47ad363fdcaf" - integrity sha512-JXc1nK/tXHiDhV55dvfzqtmP4S3sy3T3ouV2tkViZgxY/zeUkcpQcQPGRlgF4KmWzWW5oiWYSZwtCB+2RsE4Fw== - dependencies: - jest-diff "^25.2.1" - pretty-format "^25.2.1" - -"@types/node@*", "@types/node@^14.0.9": - version "14.0.14" - resolved "https://registry.yarnpkg.com/@types/node/-/node-14.0.14.tgz#24a0b5959f16ac141aeb0c5b3cd7a15b7c64cbce" - integrity sha512-syUgf67ZQpaJj01/tRTknkMNoBBLWJOBODF0Zm4NrXmiSuxjymFrxnTu1QVYRubhVkRcZLYZG8STTwJRdVm/WQ== - -"@types/normalize-package-data@^2.4.0": - version "2.4.0" - resolved "https://registry.yarnpkg.com/@types/normalize-package-data/-/normalize-package-data-2.4.0.tgz#e486d0d97396d79beedd0a6e33f4534ff6b4973e" - integrity sha512-f5j5b/Gf71L+dbqxIpQ4Z2WlmI/mPJ0fOkGGmFgtb6sAu97EPczzbS3/tJKxmcYDj55OX6ssqwDAWOHIYDRDGA== - -"@types/prettier@^2.0.0": - version "2.0.1" - resolved "https://registry.yarnpkg.com/@types/prettier/-/prettier-2.0.1.tgz#b6e98083f13faa1e5231bfa3bdb1b0feff536b6d" - integrity sha512-boy4xPNEtiw6N3abRhBi/e7hNvy3Tt8E9ZRAQrwAGzoCGZS/1wjo9KY7JHhnfnEsG5wSjDbymCozUM9a3ea7OQ== - -"@types/prop-types@*": - version "15.7.3" - resolved "https://registry.yarnpkg.com/@types/prop-types/-/prop-types-15.7.3.tgz#2ab0d5da2e5815f94b0b9d4b95d1e5f243ab2ca7" - integrity sha512-KfRL3PuHmqQLOG+2tGpRO26Ctg+Cq1E01D2DMriKEATHgWLfeNDmq9e29Q9WIky0dQ3NPkd1mzYH8Lm936Z9qw== - -"@types/react@^16.9.41": - version "16.9.49" - resolved "https://registry.yarnpkg.com/@types/react/-/react-16.9.49.tgz#09db021cf8089aba0cdb12a49f8021a69cce4872" - integrity sha512-DtLFjSj0OYAdVLBbyjhuV9CdGVHCkHn2R+xr3XkBvK2rS1Y1tkc14XSGjYgm5Fjjr90AxH9tiSzc1pCFMGO06g== - dependencies: - "@types/prop-types" "*" - csstype "^3.0.2" - -"@types/stack-utils@^1.0.1": - version "1.0.1" - resolved "https://registry.yarnpkg.com/@types/stack-utils/-/stack-utils-1.0.1.tgz#0a851d3bd96498fa25c33ab7278ed3bd65f06c3e" - integrity sha512-l42BggppR6zLmpfU6fq9HEa2oGPEI8yrSPL3GITjfRInppYFahObbIQOQK3UGxEnyQpltZLaPe75046NOZQikw== - -"@types/yargs-parser@*": - version "15.0.0" - resolved "https://registry.yarnpkg.com/@types/yargs-parser/-/yargs-parser-15.0.0.tgz#cb3f9f741869e20cce330ffbeb9271590483882d" - integrity sha512-FA/BWv8t8ZWJ+gEOnLLd8ygxH/2UFbAvgEonyfN6yWGLKc7zVjbpl2Y4CTjid9h2RfgPP6SEt6uHwEOply00yw== - -"@types/yargs@^15.0.0": - version "15.0.5" - resolved "https://registry.yarnpkg.com/@types/yargs/-/yargs-15.0.5.tgz#947e9a6561483bdee9adffc983e91a6902af8b79" - integrity sha512-Dk/IDOPtOgubt/IaevIUbTgV7doaKkoorvOyYM2CMwuDyP89bekI7H4xLIwunNYiK9jhCkmc6pUrJk3cj2AB9w== - dependencies: - "@types/yargs-parser" "*" - -abab@^2.0.3: - version "2.0.3" - resolved "https://registry.yarnpkg.com/abab/-/abab-2.0.3.tgz#623e2075e02eb2d3f2475e49f99c91846467907a" - integrity sha512-tsFzPpcttalNjFBCFMqsKYQcWxxen1pgJR56by//QwvJc4/OUS3kPOOttx2tSIfjsylB0pYu7f5D3K1RCxUnUg== - -acorn-globals@^6.0.0: - version "6.0.0" - resolved "https://registry.yarnpkg.com/acorn-globals/-/acorn-globals-6.0.0.tgz#46cdd39f0f8ff08a876619b55f5ac8a6dc770b45" - integrity sha512-ZQl7LOWaF5ePqqcX4hLuv/bLXYQNfNWw2c0/yX/TsPRKamzHcTGQnlCjHT3TsmkOUVEPS3crCxiPfdzE/Trlhg== - dependencies: - acorn "^7.1.1" - acorn-walk "^7.1.1" - -acorn-walk@^7.1.1: - version "7.2.0" - resolved "https://registry.yarnpkg.com/acorn-walk/-/acorn-walk-7.2.0.tgz#0de889a601203909b0fbe07b8938dc21d2e967bc" - integrity sha512-OPdCF6GsMIP+Az+aWfAAOEt2/+iVDKE7oy6lJ098aoe59oAmK76qV6Gw60SbZ8jHuG2wH058GF4pLFbYamYrVA== - -acorn@^7.1.1: - version "7.3.1" - resolved "https://registry.yarnpkg.com/acorn/-/acorn-7.3.1.tgz#85010754db53c3fbaf3b9ea3e083aa5c5d147ffd" - integrity sha512-tLc0wSnatxAQHVHUapaHdz72pi9KUyHjq5KyHjGg9Y8Ifdc79pTh2XvI6I1/chZbnM7QtNKzh66ooDogPZSleA== - -ajv@^6.5.5: - version "6.12.2" - resolved "https://registry.yarnpkg.com/ajv/-/ajv-6.12.2.tgz#c629c5eced17baf314437918d2da88c99d5958cd" - integrity sha512-k+V+hzjm5q/Mr8ef/1Y9goCmlsK4I6Sm74teeyGvFk1XrOsbsKLjEdrvny42CZ+a8sXbk8KWpY/bDwS+FLL2UQ== - dependencies: - fast-deep-equal "^3.1.1" - fast-json-stable-stringify "^2.0.0" - json-schema-traverse "^0.4.1" - uri-js "^4.2.2" - -ansi-escapes@^4.2.1: - version "4.3.1" - resolved "https://registry.yarnpkg.com/ansi-escapes/-/ansi-escapes-4.3.1.tgz#a5c47cc43181f1f38ffd7076837700d395522a61" - integrity sha512-JWF7ocqNrp8u9oqpgV+wH5ftbt+cfvv+PTjOvKLT3AdYly/LmORARfEVT1iyjwN+4MqE5UmVKoAdIBqeoCHgLA== - dependencies: - type-fest "^0.11.0" - -ansi-regex@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-5.0.0.tgz#388539f55179bf39339c81af30a654d69f87cb75" - integrity sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg== - -ansi-styles@^3.2.1: - version "3.2.1" - resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-3.2.1.tgz#41fbb20243e50b12be0f04b8dedbf07520ce841d" - integrity sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA== - dependencies: - color-convert "^1.9.0" - -ansi-styles@^4.0.0, ansi-styles@^4.1.0: - version "4.2.1" - resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-4.2.1.tgz#90ae75c424d008d2624c5bf29ead3177ebfcf359" - integrity sha512-9VGjrMsG1vePxcSweQsN20KY/c4zN0h9fLjqAbwbPfahM3t+NL+M9HC8xeXG2I8pX5NoamTGNuomEUFI7fcUjA== - dependencies: - "@types/color-name" "^1.1.1" - color-convert "^2.0.1" - -anymatch@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-2.0.0.tgz#bcb24b4f37934d9aa7ac17b4adaf89e7c76ef2eb" - integrity sha512-5teOsQWABXHHBFP9y3skS5P3d/WfWXpv3FUpy+LorMrNYaT9pI4oLMQX7jzQ2KklNpGpWHzdCXTDT2Y3XGlZBw== - dependencies: - micromatch "^3.1.4" - normalize-path "^2.1.1" - -anymatch@^3.0.3: - version "3.1.1" - resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-3.1.1.tgz#c55ecf02185e2469259399310c173ce31233b142" - integrity sha512-mM8522psRCqzV+6LhomX5wgp25YVibjh8Wj23I5RPkPppSVSjyKD2A2mBJmWGa+KN7f2D6LNh9jkBCeyLktzjg== - dependencies: - normalize-path "^3.0.0" - picomatch "^2.0.4" - -argparse@^1.0.7: - version "1.0.10" - resolved "https://registry.yarnpkg.com/argparse/-/argparse-1.0.10.tgz#bcd6791ea5ae09725e17e5ad988134cd40b3d911" - integrity sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg== - dependencies: - sprintf-js "~1.0.2" - -arr-diff@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/arr-diff/-/arr-diff-4.0.0.tgz#d6461074febfec71e7e15235761a329a5dc7c520" - integrity sha1-1kYQdP6/7HHn4VI1dhoyml3HxSA= - -arr-flatten@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/arr-flatten/-/arr-flatten-1.1.0.tgz#36048bbff4e7b47e136644316c99669ea5ae91f1" - integrity sha512-L3hKV5R/p5o81R7O02IGnwpDmkp6E982XhtbuwSe3O4qOtMMMtodicASA1Cny2U+aCXcNpml+m4dPsvsJ3jatg== - -arr-union@^3.1.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/arr-union/-/arr-union-3.1.0.tgz#e39b09aea9def866a8f206e288af63919bae39c4" - integrity sha1-45sJrqne+Gao8gbiiK9jkZuuOcQ= - -array-unique@^0.3.2: - version "0.3.2" - resolved "https://registry.yarnpkg.com/array-unique/-/array-unique-0.3.2.tgz#a894b75d4bc4f6cd679ef3244a9fd8f46ae2d428" - integrity sha1-qJS3XUvE9s1nnvMkSp/Y9Gri1Cg= - -asn1@~0.2.3: - version "0.2.4" - resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.4.tgz#8d2475dfab553bb33e77b54e59e880bb8ce23136" - integrity sha512-jxwzQpLQjSmWXgwaCZE9Nz+glAG01yF1QnWgbhGwHI5A6FRIEY6IVqtHhIepHqI7/kyEyQEagBC5mBEFlIYvdg== - dependencies: - safer-buffer "~2.1.0" - -assert-plus@1.0.0, assert-plus@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-1.0.0.tgz#f12e0f3c5d77b0b1cdd9146942e4e96c1e4dd525" - integrity sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU= - -assign-symbols@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/assign-symbols/-/assign-symbols-1.0.0.tgz#59667f41fadd4f20ccbc2bb96b8d4f7f78ec0367" - integrity sha1-WWZ/QfrdTyDMvCu5a41Pf3jsA2c= - -asynckit@^0.4.0: - version "0.4.0" - resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79" - integrity sha1-x57Zf380y48robyXkLzDZkdLS3k= - -at-least-node@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/at-least-node/-/at-least-node-1.0.0.tgz#602cd4b46e844ad4effc92a8011a3c46e0238dc2" - integrity sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg== - -atob@^2.1.2: - version "2.1.2" - resolved "https://registry.yarnpkg.com/atob/-/atob-2.1.2.tgz#6d9517eb9e030d2436666651e86bd9f6f13533c9" - integrity sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg== - -aws-sign2@~0.7.0: - version "0.7.0" - resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.7.0.tgz#b46e890934a9591f2d2f6f86d7e6a9f1b3fe76a8" - integrity sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg= - -aws4@^1.8.0: - version "1.10.0" - resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.10.0.tgz#a17b3a8ea811060e74d47d306122400ad4497ae2" - integrity sha512-3YDiu347mtVtjpyV3u5kVqQLP242c06zwDOgpeRnybmXlYYsLbtTrUBUm8i8srONt+FWobl5aibnU1030PeeuA== - -babel-jest@^26.1.0: - version "26.1.0" - resolved "https://registry.yarnpkg.com/babel-jest/-/babel-jest-26.1.0.tgz#b20751185fc7569a0f135730584044d1cb934328" - integrity sha512-Nkqgtfe7j6PxLO6TnCQQlkMm8wdTdnIF8xrdpooHCuD5hXRzVEPbPneTJKknH5Dsv3L8ip9unHDAp48YQ54Dkg== - dependencies: - "@jest/transform" "^26.1.0" - "@jest/types" "^26.1.0" - "@types/babel__core" "^7.1.7" - babel-plugin-istanbul "^6.0.0" - babel-preset-jest "^26.1.0" - chalk "^4.0.0" - graceful-fs "^4.2.4" - slash "^3.0.0" - -babel-plugin-istanbul@^6.0.0: - version "6.0.0" - resolved "https://registry.yarnpkg.com/babel-plugin-istanbul/-/babel-plugin-istanbul-6.0.0.tgz#e159ccdc9af95e0b570c75b4573b7c34d671d765" - integrity sha512-AF55rZXpe7trmEylbaE1Gv54wn6rwU03aptvRoVIGP8YykoSxqdVLV1TfwflBCE/QtHmqtP8SWlTENqbK8GCSQ== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - "@istanbuljs/load-nyc-config" "^1.0.0" - "@istanbuljs/schema" "^0.1.2" - istanbul-lib-instrument "^4.0.0" - test-exclude "^6.0.0" - -babel-plugin-jest-hoist@^26.1.0: - version "26.1.0" - resolved "https://registry.yarnpkg.com/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-26.1.0.tgz#c6a774da08247a28285620a64dfadbd05dd5233a" - integrity sha512-qhqLVkkSlqmC83bdMhM8WW4Z9tB+JkjqAqlbbohS9sJLT5Ha2vfzuKqg5yenXrAjOPG2YC0WiXdH3a9PvB+YYw== - dependencies: - "@babel/template" "^7.3.3" - "@babel/types" "^7.3.3" - "@types/babel__core" "^7.0.0" - "@types/babel__traverse" "^7.0.6" - -babel-preset-current-node-syntax@^0.1.2: - version "0.1.3" - resolved "https://registry.yarnpkg.com/babel-preset-current-node-syntax/-/babel-preset-current-node-syntax-0.1.3.tgz#b4b547acddbf963cba555ba9f9cbbb70bfd044da" - integrity sha512-uyexu1sVwcdFnyq9o8UQYsXwXflIh8LvrF5+cKrYam93ned1CStffB3+BEcsxGSgagoA3GEyjDqO4a/58hyPYQ== - dependencies: - "@babel/plugin-syntax-async-generators" "^7.8.4" - "@babel/plugin-syntax-bigint" "^7.8.3" - "@babel/plugin-syntax-class-properties" "^7.8.3" - "@babel/plugin-syntax-import-meta" "^7.8.3" - "@babel/plugin-syntax-json-strings" "^7.8.3" - "@babel/plugin-syntax-logical-assignment-operators" "^7.8.3" - "@babel/plugin-syntax-nullish-coalescing-operator" "^7.8.3" - "@babel/plugin-syntax-numeric-separator" "^7.8.3" - "@babel/plugin-syntax-object-rest-spread" "^7.8.3" - "@babel/plugin-syntax-optional-catch-binding" "^7.8.3" - "@babel/plugin-syntax-optional-chaining" "^7.8.3" - -babel-preset-jest@^26.1.0: - version "26.1.0" - resolved "https://registry.yarnpkg.com/babel-preset-jest/-/babel-preset-jest-26.1.0.tgz#612f714e5b457394acfd863793c564cbcdb7d1c1" - integrity sha512-na9qCqFksknlEj5iSdw1ehMVR06LCCTkZLGKeEtxDDdhg8xpUF09m29Kvh1pRbZ07h7AQ5ttLYUwpXL4tO6w7w== - dependencies: - babel-plugin-jest-hoist "^26.1.0" - babel-preset-current-node-syntax "^0.1.2" - -balanced-match@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.0.tgz#89b4d199ab2bee49de164ea02b89ce462d71b767" - integrity sha1-ibTRmasr7kneFk6gK4nORi1xt2c= - -base@^0.11.1: - version "0.11.2" - resolved "https://registry.yarnpkg.com/base/-/base-0.11.2.tgz#7bde5ced145b6d551a90db87f83c558b4eb48a8f" - integrity sha512-5T6P4xPgpp0YDFvSWwEZ4NoE3aM4QBQXDzmVbraCkFj8zHM+mba8SyqB5DbZWyR7mYHo6Y7BdQo3MoA4m0TeQg== - dependencies: - cache-base "^1.0.1" - class-utils "^0.3.5" - component-emitter "^1.2.1" - define-property "^1.0.0" - isobject "^3.0.1" - mixin-deep "^1.2.0" - pascalcase "^0.1.1" - -bcrypt-pbkdf@^1.0.0: - version "1.0.2" - resolved "https://registry.yarnpkg.com/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz#a4301d389b6a43f9b67ff3ca11a3f6637e360e9e" - integrity sha1-pDAdOJtqQ/m2f/PKEaP2Y342Dp4= - dependencies: - tweetnacl "^0.14.3" - -brace-expansion@^1.1.7: - version "1.1.11" - resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd" - integrity sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA== - dependencies: - balanced-match "^1.0.0" - concat-map "0.0.1" - -braces@^2.3.1: - version "2.3.2" - resolved "https://registry.yarnpkg.com/braces/-/braces-2.3.2.tgz#5979fd3f14cd531565e5fa2df1abfff1dfaee729" - integrity sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w== - dependencies: - arr-flatten "^1.1.0" - array-unique "^0.3.2" - extend-shallow "^2.0.1" - fill-range "^4.0.0" - isobject "^3.0.1" - repeat-element "^1.1.2" - snapdragon "^0.8.1" - snapdragon-node "^2.0.1" - split-string "^3.0.2" - to-regex "^3.0.1" - -braces@^3.0.1: - version "3.0.2" - resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.2.tgz#3454e1a462ee8d599e236df336cd9ea4f8afe107" - integrity sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A== - dependencies: - fill-range "^7.0.1" - -browser-process-hrtime@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/browser-process-hrtime/-/browser-process-hrtime-1.0.0.tgz#3c9b4b7d782c8121e56f10106d84c0d0ffc94626" - integrity sha512-9o5UecI3GhkpM6DrXr69PblIuWxPKk9Y0jHBRhdocZ2y7YECBFCsHm79Pr3OyR2AvjhDkabFJaDJMYRazHgsow== - -bs-logger@0.x: - version "0.2.6" - resolved "https://registry.yarnpkg.com/bs-logger/-/bs-logger-0.2.6.tgz#eb7d365307a72cf974cc6cda76b68354ad336bd8" - integrity sha512-pd8DCoxmbgc7hyPKOvxtqNcjYoOsABPQdcCUjGp3d42VR2CX1ORhk2A87oqqu5R1kk+76nsxZupkmyd+MVtCog== - dependencies: - fast-json-stable-stringify "2.x" - -bser@2.1.1: - version "2.1.1" - resolved "https://registry.yarnpkg.com/bser/-/bser-2.1.1.tgz#e6787da20ece9d07998533cfd9de6f5c38f4bc05" - integrity sha512-gQxTNE/GAfIIrmHLUE3oJyp5FO6HRBfhjnw4/wMmA63ZGDJnWBmgY/lyQBpnDUkGmAhbSe39tx2d/iTOAfglwQ== - dependencies: - node-int64 "^0.4.0" - -buffer-from@1.x, buffer-from@^1.0.0: - version "1.1.1" - resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.1.tgz#32713bc028f75c02fdb710d7c7bcec1f2c6070ef" - integrity sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A== - -builtin-modules@^1.1.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/builtin-modules/-/builtin-modules-1.1.1.tgz#270f076c5a72c02f5b65a47df94c5fe3a278892f" - integrity sha1-Jw8HbFpywC9bZaR9+Uxf46J4iS8= - -cache-base@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/cache-base/-/cache-base-1.0.1.tgz#0a7f46416831c8b662ee36fe4e7c59d76f666ab2" - integrity sha512-AKcdTnFSWATd5/GCPRxr2ChwIJ85CeyrEyjRHlKxQ56d4XJMGym0uAiKn0xbLOGOl3+yRpOTi484dVCEc5AUzQ== - dependencies: - collection-visit "^1.0.0" - component-emitter "^1.2.1" - get-value "^2.0.6" - has-value "^1.0.0" - isobject "^3.0.1" - set-value "^2.0.0" - to-object-path "^0.3.0" - union-value "^1.0.0" - unset-value "^1.0.0" - -callsites@^3.0.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/callsites/-/callsites-3.1.0.tgz#b3630abd8943432f54b3f0519238e33cd7df2f73" - integrity sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ== - -camelcase@^5.0.0, camelcase@^5.3.1: - version "5.3.1" - resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-5.3.1.tgz#e3c9b31569e106811df242f715725a1f4c494320" - integrity sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg== - -camelcase@^6.0.0: - version "6.0.0" - resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-6.0.0.tgz#5259f7c30e35e278f1bdc2a4d91230b37cad981e" - integrity sha512-8KMDF1Vz2gzOq54ONPJS65IvTUaB1cHJ2DMM7MbPmLZljDH1qpzzLsWdiN9pHh6qvkRVDTi/07+eNGch/oLU4w== - -capture-exit@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/capture-exit/-/capture-exit-2.0.0.tgz#fb953bfaebeb781f62898239dabb426d08a509a4" - integrity sha512-PiT/hQmTonHhl/HFGN+Lx3JJUznrVYJ3+AQsnthneZbvW7x+f08Tk7yLJTLEOUvBTbduLeeBkxEaYXUOUrRq6g== - dependencies: - rsvp "^4.8.4" - -caseless@~0.12.0: - version "0.12.0" - resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.12.0.tgz#1b681c21ff84033c826543090689420d187151dc" - integrity sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw= - -chalk@^2.0.0, chalk@^2.3.0: - version "2.4.2" - resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.2.tgz#cd42541677a54333cf541a49108c1432b44c9424" - integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ== - dependencies: - ansi-styles "^3.2.1" - escape-string-regexp "^1.0.5" - supports-color "^5.3.0" - -chalk@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/chalk/-/chalk-3.0.0.tgz#3f73c2bf526591f574cc492c51e2456349f844e4" - integrity sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg== - dependencies: - ansi-styles "^4.1.0" - supports-color "^7.1.0" - -chalk@^4.0.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/chalk/-/chalk-4.1.0.tgz#4e14870a618d9e2edd97dd8345fd9d9dc315646a" - integrity sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A== - dependencies: - ansi-styles "^4.1.0" - supports-color "^7.1.0" - -char-regex@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/char-regex/-/char-regex-1.0.2.tgz#d744358226217f981ed58f479b1d6bcc29545dcf" - integrity sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw== - -ci-info@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/ci-info/-/ci-info-2.0.0.tgz#67a9e964be31a51e15e5010d58e6f12834002f46" - integrity sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ== - -class-utils@^0.3.5: - version "0.3.6" - resolved "https://registry.yarnpkg.com/class-utils/-/class-utils-0.3.6.tgz#f93369ae8b9a7ce02fd41faad0ca83033190c463" - integrity sha512-qOhPa/Fj7s6TY8H8esGu5QNpMMQxz79h+urzrNYN6mn+9BnxlDGf5QZ+XeCDsxSjPqsSR56XOZOJmpeurnLMeg== - dependencies: - arr-union "^3.1.0" - define-property "^0.2.5" - isobject "^3.0.0" - static-extend "^0.1.1" - -cliui@^6.0.0: - version "6.0.0" - resolved "https://registry.yarnpkg.com/cliui/-/cliui-6.0.0.tgz#511d702c0c4e41ca156d7d0e96021f23e13225b1" - integrity sha512-t6wbgtoCXvAzst7QgXxJYqPt0usEfbgQdftEPbLL/cvv6HPE5VgvqCuAIDR0NgU52ds6rFwqrgakNLrHEjCbrQ== - dependencies: - string-width "^4.2.0" - strip-ansi "^6.0.0" - wrap-ansi "^6.2.0" - -co@^4.6.0: - version "4.6.0" - resolved "https://registry.yarnpkg.com/co/-/co-4.6.0.tgz#6ea6bdf3d853ae54ccb8e47bfa0bf3f9031fb184" - integrity sha1-bqa989hTrlTMuOR7+gvz+QMfsYQ= - -collect-v8-coverage@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/collect-v8-coverage/-/collect-v8-coverage-1.0.1.tgz#cc2c8e94fc18bbdffe64d6534570c8a673b27f59" - integrity sha512-iBPtljfCNcTKNAto0KEtDfZ3qzjJvqE3aTGZsbhjSBlorqpXJlaWWtPO35D+ZImoC3KWejX64o+yPGxhWSTzfg== - -collection-visit@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/collection-visit/-/collection-visit-1.0.0.tgz#4bc0373c164bc3291b4d368c829cf1a80a59dca0" - integrity sha1-S8A3PBZLwykbTTaMgpzxqApZ3KA= - dependencies: - map-visit "^1.0.0" - object-visit "^1.0.0" - -color-convert@^1.9.0: - version "1.9.3" - resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-1.9.3.tgz#bb71850690e1f136567de629d2d5471deda4c1e8" - integrity sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg== - dependencies: - color-name "1.1.3" - -color-convert@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-2.0.1.tgz#72d3a68d598c9bdb3af2ad1e84f21d896abd4de3" - integrity sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ== - dependencies: - color-name "~1.1.4" - -color-name@1.1.3: - version "1.1.3" - resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.3.tgz#a7d0558bd89c42f795dd42328f740831ca53bc25" - integrity sha1-p9BVi9icQveV3UIyj3QIMcpTvCU= - -color-name@~1.1.4: - version "1.1.4" - resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2" - integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA== - -combined-stream@^1.0.6, combined-stream@~1.0.6: - version "1.0.8" - resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.8.tgz#c3d45a8b34fd730631a110a8a2520682b31d5a7f" - integrity sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg== - dependencies: - delayed-stream "~1.0.0" - -commander@^2.12.1: - version "2.20.3" - resolved "https://registry.yarnpkg.com/commander/-/commander-2.20.3.tgz#fd485e84c03eb4881c20722ba48035e8531aeb33" - integrity sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ== - -component-emitter@^1.2.1: - version "1.3.0" - resolved "https://registry.yarnpkg.com/component-emitter/-/component-emitter-1.3.0.tgz#16e4070fba8ae29b679f2215853ee181ab2eabc0" - integrity sha512-Rd3se6QB+sO1TwqZjscQrurpEPIfO0/yYnSin6Q/rD3mOutHvUrCAhJub3r90uNb+SESBuE0QYoB90YdfatsRg== - -concat-map@0.0.1: - version "0.0.1" - resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b" - integrity sha1-2Klr13/Wjfd5OnMDajug1UBdR3s= - -convert-source-map@^1.4.0, convert-source-map@^1.6.0, convert-source-map@^1.7.0: - version "1.7.0" - resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.7.0.tgz#17a2cb882d7f77d3490585e2ce6c524424a3a442" - integrity sha512-4FJkXzKXEDB1snCFZlLP4gpC3JILicCpGbzG9f9G7tGqGCzETQ2hWPrcinA9oU4wtf2biUaEH5065UnMeR33oA== - dependencies: - safe-buffer "~5.1.1" - -copy-descriptor@^0.1.0: - version "0.1.1" - resolved "https://registry.yarnpkg.com/copy-descriptor/-/copy-descriptor-0.1.1.tgz#676f6eb3c39997c2ee1ac3a924fd6124748f578d" - integrity sha1-Z29us8OZl8LuGsOpJP1hJHSPV40= - -copyfiles@^2.3.0: - version "2.3.0" - resolved "https://registry.yarnpkg.com/copyfiles/-/copyfiles-2.3.0.tgz#1c26ebbe3d46bba2d309a3fd8e3aaccf53af8c76" - integrity sha512-73v7KFuDFJ/ofkQjZBMjMBFWGgkS76DzXvBMUh7djsMOE5EELWtAO/hRB6Wr5Vj5Zg+YozvoHemv0vnXpqxmOQ== - dependencies: - glob "^7.0.5" - minimatch "^3.0.3" - mkdirp "^1.0.4" - noms "0.0.0" - through2 "^2.0.1" - yargs "^15.3.1" - -core-util-is@1.0.2, core-util-is@~1.0.0: - version "1.0.2" - resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7" - integrity sha1-tf1UIgqivFq1eqtxQMlAdUUDwac= - -cross-spawn@^6.0.0: - version "6.0.5" - resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-6.0.5.tgz#4a5ec7c64dfae22c3a14124dbacdee846d80cbc4" - integrity sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ== - dependencies: - nice-try "^1.0.4" - path-key "^2.0.1" - semver "^5.5.0" - shebang-command "^1.2.0" - which "^1.2.9" - -cross-spawn@^7.0.0, cross-spawn@^7.0.3: - version "7.0.3" - resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6" - integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w== - dependencies: - path-key "^3.1.0" - shebang-command "^2.0.0" - which "^2.0.1" - -cssom@^0.4.4: - version "0.4.4" - resolved "https://registry.yarnpkg.com/cssom/-/cssom-0.4.4.tgz#5a66cf93d2d0b661d80bf6a44fb65f5c2e4e0a10" - integrity sha512-p3pvU7r1MyyqbTk+WbNJIgJjG2VmTIaB10rI93LzVPrmDJKkzKYMtxxyAvQXR/NS6otuzveI7+7BBq3SjBS2mw== - -cssom@~0.3.6: - version "0.3.8" - resolved "https://registry.yarnpkg.com/cssom/-/cssom-0.3.8.tgz#9f1276f5b2b463f2114d3f2c75250af8c1a36f4a" - integrity sha512-b0tGHbfegbhPJpxpiBPU2sCkigAqtM9O121le6bbOlgyV+NyGyCmVfJ6QW9eRjz8CpNfWEOYBIMIGRYkLwsIYg== - -cssstyle@^2.2.0: - version "2.3.0" - resolved "https://registry.yarnpkg.com/cssstyle/-/cssstyle-2.3.0.tgz#ff665a0ddbdc31864b09647f34163443d90b0852" - integrity sha512-AZL67abkUzIuvcHqk7c09cezpGNcxUxU4Ioi/05xHk4DQeTkWmGYftIE6ctU6AEt+Gn4n1lDStOtj7FKycP71A== - dependencies: - cssom "~0.3.6" - -csstype@^3.0.2: - version "3.0.3" - resolved "https://registry.yarnpkg.com/csstype/-/csstype-3.0.3.tgz#2b410bbeba38ba9633353aff34b05d9755d065f8" - integrity sha512-jPl+wbWPOWJ7SXsWyqGRk3lGecbar0Cb0OvZF/r/ZU011R4YqiRehgkQ9p4eQfo9DSDLqLL3wHwfxeJiuIsNag== - -dashdash@^1.12.0: - version "1.14.1" - resolved "https://registry.yarnpkg.com/dashdash/-/dashdash-1.14.1.tgz#853cfa0f7cbe2fed5de20326b8dd581035f6e2f0" - integrity sha1-hTz6D3y+L+1d4gMmuN1YEDX24vA= - dependencies: - assert-plus "^1.0.0" - -data-urls@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/data-urls/-/data-urls-2.0.0.tgz#156485a72963a970f5d5821aaf642bef2bf2db9b" - integrity sha512-X5eWTSXO/BJmpdIKCRuKUgSCgAN0OwliVK3yPKbwIWU1Tdw5BRajxlzMidvh+gwko9AfQ9zIj52pzF91Q3YAvQ== - dependencies: - abab "^2.0.3" - whatwg-mimetype "^2.3.0" - whatwg-url "^8.0.0" - -debug@^2.2.0, debug@^2.3.3: - version "2.6.9" - resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.9.tgz#5d128515df134ff327e90a4c93f4e077a536341f" - integrity sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA== - dependencies: - ms "2.0.0" - -debug@^4.1.0, debug@^4.1.1: - version "4.1.1" - resolved "https://registry.yarnpkg.com/debug/-/debug-4.1.1.tgz#3b72260255109c6b589cee050f1d516139664791" - integrity sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw== - dependencies: - ms "^2.1.1" - -decamelize@^1.2.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290" - integrity sha1-9lNNFRSCabIDUue+4m9QH5oZEpA= - -decimal.js@^10.2.0: - version "10.2.0" - resolved "https://registry.yarnpkg.com/decimal.js/-/decimal.js-10.2.0.tgz#39466113a9e036111d02f82489b5fd6b0b5ed231" - integrity sha512-vDPw+rDgn3bZe1+F/pyEwb1oMG2XTlRVgAa6B4KccTEpYgF8w6eQllVbQcfIJnZyvzFtFpxnpGtx8dd7DJp/Rw== - -decode-uri-component@^0.2.0: - version "0.2.0" - resolved "https://registry.yarnpkg.com/decode-uri-component/-/decode-uri-component-0.2.0.tgz#eb3913333458775cb84cd1a1fae062106bb87545" - integrity sha1-6zkTMzRYd1y4TNGh+uBiEGu4dUU= - -deep-is@~0.1.3: - version "0.1.3" - resolved "https://registry.yarnpkg.com/deep-is/-/deep-is-0.1.3.tgz#b369d6fb5dbc13eecf524f91b070feedc357cf34" - integrity sha1-s2nW+128E+7PUk+RsHD+7cNXzzQ= - -deepmerge@^4.2.2: - version "4.2.2" - resolved "https://registry.yarnpkg.com/deepmerge/-/deepmerge-4.2.2.tgz#44d2ea3679b8f4d4ffba33f03d865fc1e7bf4955" - integrity sha512-FJ3UgI4gIl+PHZm53knsuSFpE+nESMr7M4v9QcgB7S63Kj/6WqMiFQJpBBYz1Pt+66bZpP3Q7Lye0Oo9MPKEdg== - -define-property@^0.2.5: - version "0.2.5" - resolved "https://registry.yarnpkg.com/define-property/-/define-property-0.2.5.tgz#c35b1ef918ec3c990f9a5bc57be04aacec5c8116" - integrity sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY= - dependencies: - is-descriptor "^0.1.0" - -define-property@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/define-property/-/define-property-1.0.0.tgz#769ebaaf3f4a63aad3af9e8d304c9bbe79bfb0e6" - integrity sha1-dp66rz9KY6rTr56NMEybvnm/sOY= - dependencies: - is-descriptor "^1.0.0" - -define-property@^2.0.2: - version "2.0.2" - resolved "https://registry.yarnpkg.com/define-property/-/define-property-2.0.2.tgz#d459689e8d654ba77e02a817f8710d702cb16e9d" - integrity sha512-jwK2UV4cnPpbcG7+VRARKTZPUWowwXA8bzH5NP6ud0oeAxyYPuGZUAC7hMugpCdz4BeSZl2Dl9k66CHJ/46ZYQ== - dependencies: - is-descriptor "^1.0.2" - isobject "^3.0.1" - -delayed-stream@~1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619" - integrity sha1-3zrhmayt+31ECqrgsp4icrJOxhk= - -detect-newline@^3.0.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/detect-newline/-/detect-newline-3.1.0.tgz#576f5dfc63ae1a192ff192d8ad3af6308991b651" - integrity sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA== - -diff-sequences@^25.2.6: - version "25.2.6" - resolved "https://registry.yarnpkg.com/diff-sequences/-/diff-sequences-25.2.6.tgz#5f467c00edd35352b7bca46d7927d60e687a76dd" - integrity sha512-Hq8o7+6GaZeoFjtpgvRBUknSXNeJiCx7V9Fr94ZMljNiCr9n9L8H8aJqgWOQiDDGdyn29fRNcDdRVJ5fdyihfg== - -diff-sequences@^26.0.0: - version "26.0.0" - resolved "https://registry.yarnpkg.com/diff-sequences/-/diff-sequences-26.0.0.tgz#0760059a5c287637b842bd7085311db7060e88a6" - integrity sha512-JC/eHYEC3aSS0vZGjuoc4vHA0yAQTzhQQldXMeMF+JlxLGJlCO38Gma82NV9gk1jGFz8mDzUMeaKXvjRRdJ2dg== - -diff@^4.0.1: - version "4.0.2" - resolved "https://registry.yarnpkg.com/diff/-/diff-4.0.2.tgz#60f3aecb89d5fae520c11aa19efc2bb982aade7d" - integrity sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A== - -domexception@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/domexception/-/domexception-2.0.1.tgz#fb44aefba793e1574b0af6aed2801d057529f304" - integrity sha512-yxJ2mFy/sibVQlu5qHjOkf9J3K6zgmCxgJ94u2EdvDOV09H+32LtRswEcUsmUWN72pVLOEnTSRaIVVzVQgS0dg== - dependencies: - webidl-conversions "^5.0.0" - -duplexer@~0.1.1: - version "0.1.1" - resolved "https://registry.yarnpkg.com/duplexer/-/duplexer-0.1.1.tgz#ace6ff808c1ce66b57d1ebf97977acb02334cfc1" - integrity sha1-rOb/gIwc5mtX0ev5eXessCM0z8E= - -ecc-jsbn@~0.1.1: - version "0.1.2" - resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz#3a83a904e54353287874c564b7549386849a98c9" - integrity sha1-OoOpBOVDUyh4dMVkt1SThoSamMk= - dependencies: - jsbn "~0.1.0" - safer-buffer "^2.1.0" - -emoji-regex@^8.0.0: - version "8.0.0" - resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-8.0.0.tgz#e818fd69ce5ccfcb404594f842963bf53164cc37" - integrity sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A== - -end-of-stream@^1.1.0: - version "1.4.4" - resolved "https://registry.yarnpkg.com/end-of-stream/-/end-of-stream-1.4.4.tgz#5ae64a5f45057baf3626ec14da0ca5e4b2431eb0" - integrity sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q== - dependencies: - once "^1.4.0" - -error-ex@^1.3.1: - version "1.3.2" - resolved "https://registry.yarnpkg.com/error-ex/-/error-ex-1.3.2.tgz#b4ac40648107fdcdcfae242f428bea8a14d4f1bf" - integrity sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g== - dependencies: - is-arrayish "^0.2.1" - -escape-string-regexp@^1.0.5: - version "1.0.5" - resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4" - integrity sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ= - -escape-string-regexp@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz#a30304e99daa32e23b2fd20f51babd07cffca344" - integrity sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w== - -escodegen@^1.14.1: - version "1.14.3" - resolved "https://registry.yarnpkg.com/escodegen/-/escodegen-1.14.3.tgz#4e7b81fba61581dc97582ed78cab7f0e8d63f503" - integrity sha512-qFcX0XJkdg+PB3xjZZG/wKSuT1PnQWx57+TVSjIMmILd2yC/6ByYElPwJnslDsuWuSAp4AwJGumarAAmJch5Kw== - dependencies: - esprima "^4.0.1" - estraverse "^4.2.0" - esutils "^2.0.2" - optionator "^0.8.1" - optionalDependencies: - source-map "~0.6.1" - -esprima@^4.0.0, esprima@^4.0.1: - version "4.0.1" - resolved "https://registry.yarnpkg.com/esprima/-/esprima-4.0.1.tgz#13b04cdb3e6c5d19df91ab6987a8695619b0aa71" - integrity sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A== - -estraverse@^4.2.0: - version "4.3.0" - resolved "https://registry.yarnpkg.com/estraverse/-/estraverse-4.3.0.tgz#398ad3f3c5a24948be7725e83d11a7de28cdbd1d" - integrity sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw== - -esutils@^2.0.2: - version "2.0.3" - resolved "https://registry.yarnpkg.com/esutils/-/esutils-2.0.3.tgz#74d2eb4de0b8da1293711910d50775b9b710ef64" - integrity sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g== - -event-stream@=3.3.4: - version "3.3.4" - resolved "https://registry.yarnpkg.com/event-stream/-/event-stream-3.3.4.tgz#4ab4c9a0f5a54db9338b4c34d86bfce8f4b35571" - integrity sha1-SrTJoPWlTbkzi0w02Gv86PSzVXE= - dependencies: - duplexer "~0.1.1" - from "~0" - map-stream "~0.1.0" - pause-stream "0.0.11" - split "0.3" - stream-combiner "~0.0.4" - through "~2.3.1" - -exec-sh@^0.3.2: - version "0.3.4" - resolved "https://registry.yarnpkg.com/exec-sh/-/exec-sh-0.3.4.tgz#3a018ceb526cc6f6df2bb504b2bfe8e3a4934ec5" - integrity sha512-sEFIkc61v75sWeOe72qyrqg2Qg0OuLESziUDk/O/z2qgS15y2gWVFrI6f2Qn/qw/0/NCfCEsmNA4zOjkwEZT1A== - -execa@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/execa/-/execa-1.0.0.tgz#c6236a5bb4df6d6f15e88e7f017798216749ddd8" - integrity sha512-adbxcyWV46qiHyvSp50TKt05tB4tK3HcmF7/nxfAdhnox83seTDbwnaqKO4sXRy7roHAIFqJP/Rw/AuEbX61LA== - dependencies: - cross-spawn "^6.0.0" - get-stream "^4.0.0" - is-stream "^1.1.0" - npm-run-path "^2.0.0" - p-finally "^1.0.0" - signal-exit "^3.0.0" - strip-eof "^1.0.0" - -execa@^4.0.0: - version "4.0.2" - resolved "https://registry.yarnpkg.com/execa/-/execa-4.0.2.tgz#ad87fb7b2d9d564f70d2b62d511bee41d5cbb240" - integrity sha512-QI2zLa6CjGWdiQsmSkZoGtDx2N+cQIGb3yNolGTdjSQzydzLgYYf8LRuagp7S7fPimjcrzUDSUFd/MgzELMi4Q== - dependencies: - cross-spawn "^7.0.0" - get-stream "^5.0.0" - human-signals "^1.1.1" - is-stream "^2.0.0" - merge-stream "^2.0.0" - npm-run-path "^4.0.0" - onetime "^5.1.0" - signal-exit "^3.0.2" - strip-final-newline "^2.0.0" - -exit@^0.1.2: - version "0.1.2" - resolved "https://registry.yarnpkg.com/exit/-/exit-0.1.2.tgz#0632638f8d877cc82107d30a0fff1a17cba1cd0c" - integrity sha1-BjJjj42HfMghB9MKD/8aF8uhzQw= - -expand-brackets@^2.1.4: - version "2.1.4" - resolved "https://registry.yarnpkg.com/expand-brackets/-/expand-brackets-2.1.4.tgz#b77735e315ce30f6b6eff0f83b04151a22449622" - integrity sha1-t3c14xXOMPa27/D4OwQVGiJEliI= - dependencies: - debug "^2.3.3" - define-property "^0.2.5" - extend-shallow "^2.0.1" - posix-character-classes "^0.1.0" - regex-not "^1.0.0" - snapdragon "^0.8.1" - to-regex "^3.0.1" - -expect@^26.1.0: - version "26.1.0" - resolved "https://registry.yarnpkg.com/expect/-/expect-26.1.0.tgz#8c62e31d0f8d5a8ebb186ee81473d15dd2fbf7c8" - integrity sha512-QbH4LZXDsno9AACrN9eM0zfnby9G+OsdNgZUohjg/P0mLy1O+/bzTAJGT6VSIjVCe8yKM6SzEl/ckEOFBT7Vnw== - dependencies: - "@jest/types" "^26.1.0" - ansi-styles "^4.0.0" - jest-get-type "^26.0.0" - jest-matcher-utils "^26.1.0" - jest-message-util "^26.1.0" - jest-regex-util "^26.0.0" - -extend-shallow@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/extend-shallow/-/extend-shallow-2.0.1.tgz#51af7d614ad9a9f610ea1bafbb989d6b1c56890f" - integrity sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8= - dependencies: - is-extendable "^0.1.0" - -extend-shallow@^3.0.0, extend-shallow@^3.0.2: - version "3.0.2" - resolved "https://registry.yarnpkg.com/extend-shallow/-/extend-shallow-3.0.2.tgz#26a71aaf073b39fb2127172746131c2704028db8" - integrity sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg= - dependencies: - assign-symbols "^1.0.0" - is-extendable "^1.0.1" - -extend@~3.0.2: - version "3.0.2" - resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.2.tgz#f8b1136b4071fbd8eb140aff858b1019ec2915fa" - integrity sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g== - -extglob@^2.0.4: - version "2.0.4" - resolved "https://registry.yarnpkg.com/extglob/-/extglob-2.0.4.tgz#ad00fe4dc612a9232e8718711dc5cb5ab0285543" - integrity sha512-Nmb6QXkELsuBr24CJSkilo6UHHgbekK5UiZgfE6UHD3Eb27YC6oD+bhcT+tJ6cl8dmsgdQxnWlcry8ksBIBLpw== - dependencies: - array-unique "^0.3.2" - define-property "^1.0.0" - expand-brackets "^2.1.4" - extend-shallow "^2.0.1" - fragment-cache "^0.2.1" - regex-not "^1.0.0" - snapdragon "^0.8.1" - to-regex "^3.0.1" - -extsprintf@1.3.0: - version "1.3.0" - resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.3.0.tgz#96918440e3041a7a414f8c52e3c574eb3c3e1e05" - integrity sha1-lpGEQOMEGnpBT4xS48V06zw+HgU= - -extsprintf@^1.2.0: - version "1.4.0" - resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.4.0.tgz#e2689f8f356fad62cca65a3a91c5df5f9551692f" - integrity sha1-4mifjzVvrWLMplo6kcXfX5VRaS8= - -fast-deep-equal@^3.1.1: - version "3.1.3" - resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz#3a7d56b559d6cbc3eb512325244e619a65c6c525" - integrity sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q== - -fast-json-stable-stringify@2.x, fast-json-stable-stringify@^2.0.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz#874bf69c6f404c2b5d99c481341399fd55892633" - integrity sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw== - -fast-levenshtein@~2.0.6: - version "2.0.6" - resolved "https://registry.yarnpkg.com/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz#3d8a5c66883a16a30ca8643e851f19baa7797917" - integrity sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc= - -fb-watchman@^2.0.0: - version "2.0.1" - resolved "https://registry.yarnpkg.com/fb-watchman/-/fb-watchman-2.0.1.tgz#fc84fb39d2709cf3ff6d743706157bb5708a8a85" - integrity sha512-DkPJKQeY6kKwmuMretBhr7G6Vodr7bFwDYTXIkfG1gjvNpaxBTQV3PbXg6bR1c1UP4jPOX0jHUbbHANL9vRjVg== - dependencies: - bser "2.1.1" - -fill-range@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-4.0.0.tgz#d544811d428f98eb06a63dc402d2403c328c38f7" - integrity sha1-1USBHUKPmOsGpj3EAtJAPDKMOPc= - dependencies: - extend-shallow "^2.0.1" - is-number "^3.0.0" - repeat-string "^1.6.1" - to-regex-range "^2.1.0" - -fill-range@^7.0.1: - version "7.0.1" - resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.0.1.tgz#1919a6a7c75fe38b2c7c77e5198535da9acdda40" - integrity sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ== - dependencies: - to-regex-range "^5.0.1" - -find-up@^4.0.0, find-up@^4.1.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/find-up/-/find-up-4.1.0.tgz#97afe7d6cdc0bc5928584b7c8d7b16e8a9aa5d19" - integrity sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw== - dependencies: - locate-path "^5.0.0" - path-exists "^4.0.0" - -for-in@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/for-in/-/for-in-1.0.2.tgz#81068d295a8142ec0ac726c6e2200c30fb6d5e80" - integrity sha1-gQaNKVqBQuwKxybG4iAMMPttXoA= - -forever-agent@~0.6.1: - version "0.6.1" - resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91" - integrity sha1-+8cfDEGt6zf5bFd60e1C2P2sypE= - -form-data@~2.3.2: - version "2.3.3" - resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.3.3.tgz#dcce52c05f644f298c6a7ab936bd724ceffbf3a6" - integrity sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ== - dependencies: - asynckit "^0.4.0" - combined-stream "^1.0.6" - mime-types "^2.1.12" - -fragment-cache@^0.2.1: - version "0.2.1" - resolved "https://registry.yarnpkg.com/fragment-cache/-/fragment-cache-0.2.1.tgz#4290fad27f13e89be7f33799c6bc5a0abfff0d19" - integrity sha1-QpD60n8T6Jvn8zeZxrxaCr//DRk= - dependencies: - map-cache "^0.2.2" - -from@~0: - version "0.1.7" - resolved "https://registry.yarnpkg.com/from/-/from-0.1.7.tgz#83c60afc58b9c56997007ed1a768b3ab303a44fe" - integrity sha1-g8YK/Fi5xWmXAH7Rp2izqzA6RP4= - -fs-extra@^9.0.0: - version "9.0.1" - resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-9.0.1.tgz#910da0062437ba4c39fedd863f1675ccfefcb9fc" - integrity sha512-h2iAoN838FqAFJY2/qVpzFXy+EBxfVE220PalAqQLDVsFOHLJrZvut5puAbCdNv6WJk+B8ihI+k0c7JK5erwqQ== - dependencies: - at-least-node "^1.0.0" - graceful-fs "^4.2.0" - jsonfile "^6.0.1" - universalify "^1.0.0" - -fs-extra@^9.0.1: - version "9.1.0" - resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-9.1.0.tgz#5954460c764a8da2094ba3554bf839e6b9a7c86d" - integrity sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ== - dependencies: - at-least-node "^1.0.0" - graceful-fs "^4.2.0" - jsonfile "^6.0.1" - universalify "^2.0.0" - -fs.realpath@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f" - integrity sha1-FQStJSMVjKpA20onh8sBQRmU6k8= - -fsevents@^2.1.2: - version "2.1.3" - resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.1.3.tgz#fb738703ae8d2f9fe900c33836ddebee8b97f23e" - integrity sha512-Auw9a4AxqWpa9GUfj370BMPzzyncfBABW8Mab7BGWBYDj4Isgq+cDKtx0i6u9jcX9pQDnswsaaOTgTmA5pEjuQ== - -function-bind@^1.1.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.1.tgz#a56899d3ea3c9bab874bb9773b7c5ede92f4895d" - integrity sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A== - -gensync@^1.0.0-beta.1: - version "1.0.0-beta.1" - resolved "https://registry.yarnpkg.com/gensync/-/gensync-1.0.0-beta.1.tgz#58f4361ff987e5ff6e1e7a210827aa371eaac269" - integrity sha512-r8EC6NO1sngH/zdD9fiRDLdcgnbayXah+mLgManTaIZJqEC1MZstmnox8KpnI2/fxQwrp5OpCOYWLp4rBl4Jcg== - -get-caller-file@^2.0.1: - version "2.0.5" - resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-2.0.5.tgz#4f94412a82db32f36e3b0b9741f8a97feb031f7e" - integrity sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg== - -get-package-type@^0.1.0: - version "0.1.0" - resolved "https://registry.yarnpkg.com/get-package-type/-/get-package-type-0.1.0.tgz#8de2d803cff44df3bc6c456e6668b36c3926e11a" - integrity sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q== - -get-stream@^4.0.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-4.1.0.tgz#c1b255575f3dc21d59bfc79cd3d2b46b1c3a54b5" - integrity sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w== - dependencies: - pump "^3.0.0" - -get-stream@^5.0.0: - version "5.1.0" - resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-5.1.0.tgz#01203cdc92597f9b909067c3e656cc1f4d3c4dc9" - integrity sha512-EXr1FOzrzTfGeL0gQdeFEvOMm2mzMOglyiOXSTpPC+iAjAKftbr3jpCMWynogwYnM+eSj9sHGc6wjIcDvYiygw== - dependencies: - pump "^3.0.0" - -get-value@^2.0.3, get-value@^2.0.6: - version "2.0.6" - resolved "https://registry.yarnpkg.com/get-value/-/get-value-2.0.6.tgz#dc15ca1c672387ca76bd37ac0a395ba2042a2c28" - integrity sha1-3BXKHGcjh8p2vTesCjlbogQqLCg= - -getpass@^0.1.1: - version "0.1.7" - resolved "https://registry.yarnpkg.com/getpass/-/getpass-0.1.7.tgz#5eff8e3e684d569ae4cb2b1282604e8ba62149fa" - integrity sha1-Xv+OPmhNVprkyysSgmBOi6YhSfo= - dependencies: - assert-plus "^1.0.0" - -glob@^7.0.0: - version "7.2.3" - resolved "https://registry.yarnpkg.com/glob/-/glob-7.2.3.tgz#b8df0fb802bbfa8e89bd1d938b4e16578ed44f2b" - integrity sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q== - dependencies: - fs.realpath "^1.0.0" - inflight "^1.0.4" - inherits "2" - minimatch "^3.1.1" - once "^1.3.0" - path-is-absolute "^1.0.0" - -glob@^7.0.5, glob@^7.1.1, glob@^7.1.2, glob@^7.1.3, glob@^7.1.4: - version "7.1.6" - resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.6.tgz#141f33b81a7c2492e125594307480c46679278a6" - integrity sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA== - dependencies: - fs.realpath "^1.0.0" - inflight "^1.0.4" - inherits "2" - minimatch "^3.0.4" - once "^1.3.0" - path-is-absolute "^1.0.0" - -globals@^11.1.0: - version "11.12.0" - resolved "https://registry.yarnpkg.com/globals/-/globals-11.12.0.tgz#ab8795338868a0babd8525758018c2a7eb95c42e" - integrity sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA== - -graceful-fs@^4.1.6, graceful-fs@^4.2.0: - version "4.2.2" - resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.2.tgz#6f0952605d0140c1cfdb138ed005775b92d67b02" - integrity sha512-IItsdsea19BoLC7ELy13q1iJFNmd7ofZH5+X/pJr90/nRoPEX0DJo1dHDbgtYWOhJhcCgMDTOw84RZ72q6lB+Q== - -graceful-fs@^4.2.4: - version "4.2.4" - resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.4.tgz#2256bde14d3632958c465ebc96dc467ca07a29fb" - integrity sha512-WjKPNJF79dtJAVniUlGGWHYGz2jWxT6VhN/4m1NdkbZ2nOsEF+cI1Edgql5zCRhs/VsQYRvrXctxktVXZUkixw== - -growly@^1.3.0: - version "1.3.0" - resolved "https://registry.yarnpkg.com/growly/-/growly-1.3.0.tgz#f10748cbe76af964b7c96c93c6bcc28af120c081" - integrity sha1-8QdIy+dq+WS3yWyTxrzCivEgwIE= - -handlebars@^4.7.6: - version "4.7.7" - resolved "https://registry.yarnpkg.com/handlebars/-/handlebars-4.7.7.tgz#9ce33416aad02dbd6c8fafa8240d5d98004945a1" - integrity sha512-aAcXm5OAfE/8IXkcZvCepKU3VzW1/39Fb5ZuqMtgI/hT8X2YgoMvBY5dLhq/cpOvw7Lk1nK/UF71aLG/ZnVYRA== - dependencies: - minimist "^1.2.5" - neo-async "^2.6.0" - source-map "^0.6.1" - wordwrap "^1.0.0" - optionalDependencies: - uglify-js "^3.1.4" - -har-schema@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/har-schema/-/har-schema-2.0.0.tgz#a94c2224ebcac04782a0d9035521f24735b7ec92" - integrity sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI= - -har-validator@~5.1.3: - version "5.1.3" - resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-5.1.3.tgz#1ef89ebd3e4996557675eed9893110dc350fa080" - integrity sha512-sNvOCzEQNr/qrvJgc3UG/kD4QtlHycrzwS+6mfTrrSq97BvaYcPZZI1ZSqGSPR73Cxn4LKTD4PttRwfU7jWq5g== - dependencies: - ajv "^6.5.5" - har-schema "^2.0.0" - -has-flag@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-3.0.0.tgz#b5d454dc2199ae225699f3467e5a07f3b955bafd" - integrity sha1-tdRU3CGZriJWmfNGfloH87lVuv0= - -has-flag@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-4.0.0.tgz#944771fd9c81c81265c4d6941860da06bb59479b" - integrity sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ== - -has-value@^0.3.1: - version "0.3.1" - resolved "https://registry.yarnpkg.com/has-value/-/has-value-0.3.1.tgz#7b1f58bada62ca827ec0a2078025654845995e1f" - integrity sha1-ex9YutpiyoJ+wKIHgCVlSEWZXh8= - dependencies: - get-value "^2.0.3" - has-values "^0.1.4" - isobject "^2.0.0" - -has-value@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/has-value/-/has-value-1.0.0.tgz#18b281da585b1c5c51def24c930ed29a0be6b177" - integrity sha1-GLKB2lhbHFxR3vJMkw7SmgvmsXc= - dependencies: - get-value "^2.0.6" - has-values "^1.0.0" - isobject "^3.0.0" - -has-values@^0.1.4: - version "0.1.4" - resolved "https://registry.yarnpkg.com/has-values/-/has-values-0.1.4.tgz#6d61de95d91dfca9b9a02089ad384bff8f62b771" - integrity sha1-bWHeldkd/Km5oCCJrThL/49it3E= - -has-values@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/has-values/-/has-values-1.0.0.tgz#95b0b63fec2146619a6fe57fe75628d5a39efe4f" - integrity sha1-lbC2P+whRmGab+V/51Yo1aOe/k8= - dependencies: - is-number "^3.0.0" - kind-of "^4.0.0" - -has@^1.0.3: - version "1.0.3" - resolved "https://registry.yarnpkg.com/has/-/has-1.0.3.tgz#722d7cbfc1f6aa8241f16dd814e011e1f41e8796" - integrity sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw== - dependencies: - function-bind "^1.1.1" - -highlight.js@^10.2.0: - version "10.7.3" - resolved "https://registry.yarnpkg.com/highlight.js/-/highlight.js-10.7.3.tgz#697272e3991356e40c3cac566a74eef681756531" - integrity sha512-tzcUFauisWKNHaRkN4Wjl/ZA07gENAjFl3J/c480dprkGTg5EQstgaNFqBfUqCq54kZRIEcreTsAgF/m2quD7A== - -hosted-git-info@^2.1.4: - version "2.8.8" - resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-2.8.8.tgz#7539bd4bc1e0e0a895815a2e0262420b12858488" - integrity sha512-f/wzC2QaWBs7t9IYqB4T3sR1xviIViXJRJTWBlx2Gf3g0Xi5vI7Yy4koXQ1c9OYDGHN9sBy1DQ2AB8fqZBWhUg== - -html-encoding-sniffer@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/html-encoding-sniffer/-/html-encoding-sniffer-2.0.1.tgz#42a6dc4fd33f00281176e8b23759ca4e4fa185f3" - integrity sha512-D5JbOMBIR/TVZkubHT+OyT2705QvogUW4IBn6nHd756OwieSF9aDYFj4dv6HHEVGYbHaLETa3WggZYWWMyy3ZQ== - dependencies: - whatwg-encoding "^1.0.5" - -html-escaper@^2.0.0: - version "2.0.2" - resolved "https://registry.yarnpkg.com/html-escaper/-/html-escaper-2.0.2.tgz#dfd60027da36a36dfcbe236262c00a5822681453" - integrity sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg== - -http-signature@~1.2.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-1.2.0.tgz#9aecd925114772f3d95b65a60abb8f7c18fbace1" - integrity sha1-muzZJRFHcvPZW2WmCruPfBj7rOE= - dependencies: - assert-plus "^1.0.0" - jsprim "^1.2.2" - sshpk "^1.7.0" - -human-signals@^1.1.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/human-signals/-/human-signals-1.1.1.tgz#c5b1cd14f50aeae09ab6c59fe63ba3395fe4dfa3" - integrity sha512-SEQu7vl8KjNL2eoGBLF3+wAjpsNfA9XMlXAYj/3EdaNfAlxKthD1xjEQfGOUhllCGGJVNY34bRr6lPINhNjyZw== - -iconv-lite@0.4.24: - version "0.4.24" - resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.24.tgz#2022b4b25fbddc21d2f524974a474aafe733908b" - integrity sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA== - dependencies: - safer-buffer ">= 2.1.2 < 3" - -import-local@^3.0.2: - version "3.0.2" - resolved "https://registry.yarnpkg.com/import-local/-/import-local-3.0.2.tgz#a8cfd0431d1de4a2199703d003e3e62364fa6db6" - integrity sha512-vjL3+w0oulAVZ0hBHnxa/Nm5TAurf9YLQJDhqRZyqb+VKGOB6LU8t9H1Nr5CIo16vh9XfJTOoHwU0B71S557gA== - dependencies: - pkg-dir "^4.2.0" - resolve-cwd "^3.0.0" - -imurmurhash@^0.1.4: - version "0.1.4" - resolved "https://registry.yarnpkg.com/imurmurhash/-/imurmurhash-0.1.4.tgz#9218b9b2b928a238b13dc4fb6b6d576f231453ea" - integrity sha1-khi5srkoojixPcT7a21XbyMUU+o= - -inflection@^1.12.0: - version "1.12.0" - resolved "https://registry.yarnpkg.com/inflection/-/inflection-1.12.0.tgz#a200935656d6f5f6bc4dc7502e1aecb703228416" - integrity sha1-ogCTVlbW9fa8TcdQLhrstwMihBY= - -inflight@^1.0.4: - version "1.0.6" - resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9" - integrity sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk= - dependencies: - once "^1.3.0" - wrappy "1" - -inherits@2, inherits@^2.0.1, inherits@~2.0.1, inherits@~2.0.3: - version "2.0.4" - resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.4.tgz#0fa2c64f932917c3433a0ded55363aae37416b7c" - integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ== - -interpret@^1.0.0: - version "1.4.0" - resolved "https://registry.yarnpkg.com/interpret/-/interpret-1.4.0.tgz#665ab8bc4da27a774a40584e812e3e0fa45b1a1e" - integrity sha512-agE4QfB2Lkp9uICn7BAqoscw4SZP9kTE2hxiFI3jBPmXJfdqiahTbUuKGsMoN2GtqL9AxhYioAcVvgsb1HvRbA== - -ip-regex@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/ip-regex/-/ip-regex-2.1.0.tgz#fa78bf5d2e6913c911ce9f819ee5146bb6d844e9" - integrity sha1-+ni/XS5pE8kRzp+BnuUUa7bYROk= - -is-accessor-descriptor@^0.1.6: - version "0.1.6" - resolved "https://registry.yarnpkg.com/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz#a9e12cb3ae8d876727eeef3843f8a0897b5c98d6" - integrity sha1-qeEss66Nh2cn7u84Q/igiXtcmNY= - dependencies: - kind-of "^3.0.2" - -is-accessor-descriptor@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz#169c2f6d3df1f992618072365c9b0ea1f6878656" - integrity sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ== - dependencies: - kind-of "^6.0.0" - -is-arrayish@^0.2.1: - version "0.2.1" - resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d" - integrity sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0= - -is-buffer@^1.1.5: - version "1.1.6" - resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-1.1.6.tgz#efaa2ea9daa0d7ab2ea13a97b2b8ad51fefbe8be" - integrity sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w== - -is-ci@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/is-ci/-/is-ci-2.0.0.tgz#6bc6334181810e04b5c22b3d589fdca55026404c" - integrity sha512-YfJT7rkpQB0updsdHLGWrvhBJfcfzNNawYDNIyQXJz0IViGf75O8EBPKSdvw2rF+LGCsX4FZ8tcr3b19LcZq4w== - dependencies: - ci-info "^2.0.0" - -is-core-module@^2.8.1: - version "2.9.0" - resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.9.0.tgz#e1c34429cd51c6dd9e09e0799e396e27b19a9c69" - integrity sha512-+5FPy5PnwmO3lvfMb0AsoPaBG+5KHUI0wYFXOtYPnVVVspTFUuMZNfNaNVRt3FZadstu2c8x23vykRW/NBoU6A== - dependencies: - has "^1.0.3" - -is-data-descriptor@^0.1.4: - version "0.1.4" - resolved "https://registry.yarnpkg.com/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz#0b5ee648388e2c860282e793f1856fec3f301b56" - integrity sha1-C17mSDiOLIYCgueT8YVv7D8wG1Y= - dependencies: - kind-of "^3.0.2" - -is-data-descriptor@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz#d84876321d0e7add03990406abbbbd36ba9268c7" - integrity sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ== - dependencies: - kind-of "^6.0.0" - -is-descriptor@^0.1.0: - version "0.1.6" - resolved "https://registry.yarnpkg.com/is-descriptor/-/is-descriptor-0.1.6.tgz#366d8240dde487ca51823b1ab9f07a10a78251ca" - integrity sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg== - dependencies: - is-accessor-descriptor "^0.1.6" - is-data-descriptor "^0.1.4" - kind-of "^5.0.0" - -is-descriptor@^1.0.0, is-descriptor@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/is-descriptor/-/is-descriptor-1.0.2.tgz#3b159746a66604b04f8c81524ba365c5f14d86ec" - integrity sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg== - dependencies: - is-accessor-descriptor "^1.0.0" - is-data-descriptor "^1.0.0" - kind-of "^6.0.2" - -is-docker@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/is-docker/-/is-docker-2.0.0.tgz#2cb0df0e75e2d064fe1864c37cdeacb7b2dcf25b" - integrity sha512-pJEdRugimx4fBMra5z2/5iRdZ63OhYV0vr0Dwm5+xtW4D1FvRkB8hamMIhnWfyJeDdyr/aa7BDyNbtG38VxgoQ== - -is-extendable@^0.1.0, is-extendable@^0.1.1: - version "0.1.1" - resolved "https://registry.yarnpkg.com/is-extendable/-/is-extendable-0.1.1.tgz#62b110e289a471418e3ec36a617d472e301dfc89" - integrity sha1-YrEQ4omkcUGOPsNqYX1HLjAd/Ik= - -is-extendable@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/is-extendable/-/is-extendable-1.0.1.tgz#a7470f9e426733d81bd81e1155264e3a3507cab4" - integrity sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA== - dependencies: - is-plain-object "^2.0.4" - -is-fullwidth-code-point@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz#f116f8064fe90b3f7844a38997c0b75051269f1d" - integrity sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg== - -is-generator-fn@^2.0.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/is-generator-fn/-/is-generator-fn-2.1.0.tgz#7d140adc389aaf3011a8f2a2a4cfa6faadffb118" - integrity sha512-cTIB4yPYL/Grw0EaSzASzg6bBy9gqCofvWN8okThAYIxKJZC+udlRAmGbM0XLeniEJSs8uEgHPGuHSe1XsOLSQ== - -is-number@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/is-number/-/is-number-3.0.0.tgz#24fd6201a4782cf50561c810276afc7d12d71195" - integrity sha1-JP1iAaR4LPUFYcgQJ2r8fRLXEZU= - dependencies: - kind-of "^3.0.2" - -is-number@^7.0.0: - version "7.0.0" - resolved "https://registry.yarnpkg.com/is-number/-/is-number-7.0.0.tgz#7535345b896734d5f80c4d06c50955527a14f12b" - integrity sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng== - -is-plain-object@^2.0.3, is-plain-object@^2.0.4: - version "2.0.4" - resolved "https://registry.yarnpkg.com/is-plain-object/-/is-plain-object-2.0.4.tgz#2c163b3fafb1b606d9d17928f05c2a1c38e07677" - integrity sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og== - dependencies: - isobject "^3.0.1" - -is-potential-custom-element-name@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/is-potential-custom-element-name/-/is-potential-custom-element-name-1.0.0.tgz#0c52e54bcca391bb2c494b21e8626d7336c6e397" - integrity sha1-DFLlS8yjkbssSUsh6GJtczbG45c= - -is-stream@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-1.1.0.tgz#12d4a3dd4e68e0b79ceb8dbc84173ae80d91ca44" - integrity sha1-EtSj3U5o4Lec6428hBc66A2RykQ= - -is-stream@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-2.0.0.tgz#bde9c32680d6fae04129d6ac9d921ce7815f78e3" - integrity sha512-XCoy+WlUr7d1+Z8GgSuXmpuUFC9fOhRXglJMx+dwLKTkL44Cjd4W1Z5P+BQZpr+cR93aGP4S/s7Ftw6Nd/kiEw== - -is-typedarray@^1.0.0, is-typedarray@~1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a" - integrity sha1-5HnICFjfDBsR3dppQPlgEfzaSpo= - -is-windows@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/is-windows/-/is-windows-1.0.2.tgz#d1850eb9791ecd18e6182ce12a30f396634bb19d" - integrity sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA== - -is-wsl@^2.1.1: - version "2.2.0" - resolved "https://registry.yarnpkg.com/is-wsl/-/is-wsl-2.2.0.tgz#74a4c76e77ca9fd3f932f290c17ea326cd157271" - integrity sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww== - dependencies: - is-docker "^2.0.0" - -isarray@0.0.1: - version "0.0.1" - resolved "https://registry.yarnpkg.com/isarray/-/isarray-0.0.1.tgz#8a18acfca9a8f4177e09abfc6038939b05d1eedf" - integrity sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8= - -isarray@1.0.0, isarray@~1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11" - integrity sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE= - -isexe@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10" - integrity sha1-6PvzdNxVb/iUehDcsFctYz8s+hA= - -isobject@^2.0.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/isobject/-/isobject-2.1.0.tgz#f065561096a3f1da2ef46272f815c840d87e0c89" - integrity sha1-8GVWEJaj8dou9GJy+BXIQNh+DIk= - dependencies: - isarray "1.0.0" - -isobject@^3.0.0, isobject@^3.0.1: - version "3.0.1" - resolved "https://registry.yarnpkg.com/isobject/-/isobject-3.0.1.tgz#4e431e92b11a9731636aa1f9c8d1ccbcfdab78df" - integrity sha1-TkMekrEalzFjaqH5yNHMvP2reN8= - -isstream@~0.1.2: - version "0.1.2" - resolved "https://registry.yarnpkg.com/isstream/-/isstream-0.1.2.tgz#47e63f7af55afa6f92e1500e690eb8b8529c099a" - integrity sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo= - -istanbul-lib-coverage@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/istanbul-lib-coverage/-/istanbul-lib-coverage-3.0.0.tgz#f5944a37c70b550b02a78a5c3b2055b280cec8ec" - integrity sha512-UiUIqxMgRDET6eR+o5HbfRYP1l0hqkWOs7vNxC/mggutCMUIhWMm8gAHb8tHlyfD3/l6rlgNA5cKdDzEAf6hEg== - -istanbul-lib-instrument@^4.0.0, istanbul-lib-instrument@^4.0.3: - version "4.0.3" - resolved "https://registry.yarnpkg.com/istanbul-lib-instrument/-/istanbul-lib-instrument-4.0.3.tgz#873c6fff897450118222774696a3f28902d77c1d" - integrity sha512-BXgQl9kf4WTCPCCpmFGoJkz/+uhvm7h7PFKUYxh7qarQd3ER33vHG//qaE8eN25l07YqZPpHXU9I09l/RD5aGQ== - dependencies: - "@babel/core" "^7.7.5" - "@istanbuljs/schema" "^0.1.2" - istanbul-lib-coverage "^3.0.0" - semver "^6.3.0" - -istanbul-lib-report@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/istanbul-lib-report/-/istanbul-lib-report-3.0.0.tgz#7518fe52ea44de372f460a76b5ecda9ffb73d8a6" - integrity sha512-wcdi+uAKzfiGT2abPpKZ0hSU1rGQjUQnLvtY5MpQ7QCTahD3VODhcu4wcfY1YtkGaDD5yuydOLINXsfbus9ROw== - dependencies: - istanbul-lib-coverage "^3.0.0" - make-dir "^3.0.0" - supports-color "^7.1.0" - -istanbul-lib-source-maps@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/istanbul-lib-source-maps/-/istanbul-lib-source-maps-4.0.0.tgz#75743ce6d96bb86dc7ee4352cf6366a23f0b1ad9" - integrity sha512-c16LpFRkR8vQXyHZ5nLpY35JZtzj1PQY1iZmesUbf1FZHbIupcWfjgOXBY9YHkLEQ6puz1u4Dgj6qmU/DisrZg== - dependencies: - debug "^4.1.1" - istanbul-lib-coverage "^3.0.0" - source-map "^0.6.1" - -istanbul-reports@^3.0.2: - version "3.0.2" - resolved "https://registry.yarnpkg.com/istanbul-reports/-/istanbul-reports-3.0.2.tgz#d593210e5000683750cb09fc0644e4b6e27fd53b" - integrity sha512-9tZvz7AiR3PEDNGiV9vIouQ/EAcqMXFmkcA1CDFTwOB98OZVDL0PH9glHotf5Ugp6GCOTypfzGWI/OqjWNCRUw== - dependencies: - html-escaper "^2.0.0" - istanbul-lib-report "^3.0.0" - -jest-changed-files@^26.1.0: - version "26.1.0" - resolved "https://registry.yarnpkg.com/jest-changed-files/-/jest-changed-files-26.1.0.tgz#de66b0f30453bca2aff98e9400f75905da495305" - integrity sha512-HS5MIJp3B8t0NRKGMCZkcDUZo36mVRvrDETl81aqljT1S9tqiHRSpyoOvWg9ZilzZG9TDisDNaN1IXm54fLRZw== - dependencies: - "@jest/types" "^26.1.0" - execa "^4.0.0" - throat "^5.0.0" - -jest-cli@^26.1.0: - version "26.1.0" - resolved "https://registry.yarnpkg.com/jest-cli/-/jest-cli-26.1.0.tgz#eb9ec8a18cf3b6aa556d9deaa9e24be12b43ad87" - integrity sha512-Imumvjgi3rU7stq6SJ1JUEMaV5aAgJYXIs0jPqdUnF47N/Tk83EXfmtvNKQ+SnFVI6t6mDOvfM3aA9Sg6kQPSw== - dependencies: - "@jest/core" "^26.1.0" - "@jest/test-result" "^26.1.0" - "@jest/types" "^26.1.0" - chalk "^4.0.0" - exit "^0.1.2" - graceful-fs "^4.2.4" - import-local "^3.0.2" - is-ci "^2.0.0" - jest-config "^26.1.0" - jest-util "^26.1.0" - jest-validate "^26.1.0" - prompts "^2.0.1" - yargs "^15.3.1" - -jest-config@^26.1.0: - version "26.1.0" - resolved "https://registry.yarnpkg.com/jest-config/-/jest-config-26.1.0.tgz#9074f7539acc185e0113ad6d22ed589c16a37a73" - integrity sha512-ONTGeoMbAwGCdq4WuKkMcdMoyfs5CLzHEkzFOlVvcDXufZSaIWh/OXMLa2fwKXiOaFcqEw8qFr4VOKJQfn4CVw== - dependencies: - "@babel/core" "^7.1.0" - "@jest/test-sequencer" "^26.1.0" - "@jest/types" "^26.1.0" - babel-jest "^26.1.0" - chalk "^4.0.0" - deepmerge "^4.2.2" - glob "^7.1.1" - graceful-fs "^4.2.4" - jest-environment-jsdom "^26.1.0" - jest-environment-node "^26.1.0" - jest-get-type "^26.0.0" - jest-jasmine2 "^26.1.0" - jest-regex-util "^26.0.0" - jest-resolve "^26.1.0" - jest-util "^26.1.0" - jest-validate "^26.1.0" - micromatch "^4.0.2" - pretty-format "^26.1.0" - -jest-diff@^25.2.1: - version "25.5.0" - resolved "https://registry.yarnpkg.com/jest-diff/-/jest-diff-25.5.0.tgz#1dd26ed64f96667c068cef026b677dfa01afcfa9" - integrity sha512-z1kygetuPiREYdNIumRpAHY6RXiGmp70YHptjdaxTWGmA085W3iCnXNx0DhflK3vwrKmrRWyY1wUpkPMVxMK7A== - dependencies: - chalk "^3.0.0" - diff-sequences "^25.2.6" - jest-get-type "^25.2.6" - pretty-format "^25.5.0" - -jest-diff@^26.1.0: - version "26.1.0" - resolved "https://registry.yarnpkg.com/jest-diff/-/jest-diff-26.1.0.tgz#00a549bdc936c9691eb4dc25d1fbd78bf456abb2" - integrity sha512-GZpIcom339y0OXznsEKjtkfKxNdg7bVbEofK8Q6MnevTIiR1jNhDWKhRX6X0SDXJlwn3dy59nZ1z55fLkAqPWg== - dependencies: - chalk "^4.0.0" - diff-sequences "^26.0.0" - jest-get-type "^26.0.0" - pretty-format "^26.1.0" - -jest-docblock@^26.0.0: - version "26.0.0" - resolved "https://registry.yarnpkg.com/jest-docblock/-/jest-docblock-26.0.0.tgz#3e2fa20899fc928cb13bd0ff68bd3711a36889b5" - integrity sha512-RDZ4Iz3QbtRWycd8bUEPxQsTlYazfYn/h5R65Fc6gOfwozFhoImx+affzky/FFBuqISPTqjXomoIGJVKBWoo0w== - dependencies: - detect-newline "^3.0.0" - -jest-each@^26.1.0: - version "26.1.0" - resolved "https://registry.yarnpkg.com/jest-each/-/jest-each-26.1.0.tgz#e35449875009a22d74d1bda183b306db20f286f7" - integrity sha512-lYiSo4Igr81q6QRsVQq9LIkJW0hZcKxkIkHzNeTMPENYYDw/W/Raq28iJ0sLlNFYz2qxxeLnc5K2gQoFYlu2bA== - dependencies: - "@jest/types" "^26.1.0" - chalk "^4.0.0" - jest-get-type "^26.0.0" - jest-util "^26.1.0" - pretty-format "^26.1.0" - -jest-environment-jsdom@^26.1.0: - version "26.1.0" - resolved "https://registry.yarnpkg.com/jest-environment-jsdom/-/jest-environment-jsdom-26.1.0.tgz#9dc7313ffe1b59761dad1fedb76e2503e5d37c5b" - integrity sha512-dWfiJ+spunVAwzXbdVqPH1LbuJW/kDL+FyqgA5YzquisHqTi0g9hquKif9xKm7c1bKBj6wbmJuDkeMCnxZEpUw== - dependencies: - "@jest/environment" "^26.1.0" - "@jest/fake-timers" "^26.1.0" - "@jest/types" "^26.1.0" - jest-mock "^26.1.0" - jest-util "^26.1.0" - jsdom "^16.2.2" - -jest-environment-node@^26.1.0: - version "26.1.0" - resolved "https://registry.yarnpkg.com/jest-environment-node/-/jest-environment-node-26.1.0.tgz#8bb387b3eefb132eab7826f9a808e4e05618960b" - integrity sha512-DNm5x1aQH0iRAe9UYAkZenuzuJ69VKzDCAYISFHQ5i9e+2Tbeu2ONGY7YStubCLH8a1wdKBgqScYw85+ySxqxg== - dependencies: - "@jest/environment" "^26.1.0" - "@jest/fake-timers" "^26.1.0" - "@jest/types" "^26.1.0" - jest-mock "^26.1.0" - jest-util "^26.1.0" - -jest-get-type@^25.2.6: - version "25.2.6" - resolved "https://registry.yarnpkg.com/jest-get-type/-/jest-get-type-25.2.6.tgz#0b0a32fab8908b44d508be81681487dbabb8d877" - integrity sha512-DxjtyzOHjObRM+sM1knti6or+eOgcGU4xVSb2HNP1TqO4ahsT+rqZg+nyqHWJSvWgKC5cG3QjGFBqxLghiF/Ig== - -jest-get-type@^26.0.0: - version "26.0.0" - resolved "https://registry.yarnpkg.com/jest-get-type/-/jest-get-type-26.0.0.tgz#381e986a718998dbfafcd5ec05934be538db4039" - integrity sha512-zRc1OAPnnws1EVfykXOj19zo2EMw5Hi6HLbFCSjpuJiXtOWAYIjNsHVSbpQ8bDX7L5BGYGI8m+HmKdjHYFF0kg== - -jest-haste-map@^26.1.0: - version "26.1.0" - resolved "https://registry.yarnpkg.com/jest-haste-map/-/jest-haste-map-26.1.0.tgz#ef31209be73f09b0d9445e7d213e1b53d0d1476a" - integrity sha512-WeBS54xCIz9twzkEdm6+vJBXgRBQfdbbXD0dk8lJh7gLihopABlJmIQFdWSDDtuDe4PRiObsjZSUjbJ1uhWEpA== - dependencies: - "@jest/types" "^26.1.0" - "@types/graceful-fs" "^4.1.2" - anymatch "^3.0.3" - fb-watchman "^2.0.0" - graceful-fs "^4.2.4" - jest-serializer "^26.1.0" - jest-util "^26.1.0" - jest-worker "^26.1.0" - micromatch "^4.0.2" - sane "^4.0.3" - walker "^1.0.7" - which "^2.0.2" - optionalDependencies: - fsevents "^2.1.2" - -jest-jasmine2@^26.1.0: - version "26.1.0" - resolved "https://registry.yarnpkg.com/jest-jasmine2/-/jest-jasmine2-26.1.0.tgz#4dfe349b2b2d3c6b3a27c024fd4cb57ac0ed4b6f" - integrity sha512-1IPtoDKOAG+MeBrKvvuxxGPJb35MTTRSDglNdWWCndCB3TIVzbLThRBkwH9P081vXLgiJHZY8Bz3yzFS803xqQ== - dependencies: - "@babel/traverse" "^7.1.0" - "@jest/environment" "^26.1.0" - "@jest/source-map" "^26.1.0" - "@jest/test-result" "^26.1.0" - "@jest/types" "^26.1.0" - chalk "^4.0.0" - co "^4.6.0" - expect "^26.1.0" - is-generator-fn "^2.0.0" - jest-each "^26.1.0" - jest-matcher-utils "^26.1.0" - jest-message-util "^26.1.0" - jest-runtime "^26.1.0" - jest-snapshot "^26.1.0" - jest-util "^26.1.0" - pretty-format "^26.1.0" - throat "^5.0.0" - -jest-leak-detector@^26.1.0: - version "26.1.0" - resolved "https://registry.yarnpkg.com/jest-leak-detector/-/jest-leak-detector-26.1.0.tgz#039c3a07ebcd8adfa984b6ac015752c35792e0a6" - integrity sha512-dsMnKF+4BVOZwvQDlgn3MG+Ns4JuLv8jNvXH56bgqrrboyCbI1rQg6EI5rs+8IYagVcfVP2yZFKfWNZy0rK0Hw== - dependencies: - jest-get-type "^26.0.0" - pretty-format "^26.1.0" - -jest-matcher-utils@^26.1.0: - version "26.1.0" - resolved "https://registry.yarnpkg.com/jest-matcher-utils/-/jest-matcher-utils-26.1.0.tgz#cf75a41bd413dda784f022de5a65a2a5c73a5c92" - integrity sha512-PW9JtItbYvES/xLn5mYxjMd+Rk+/kIt88EfH3N7w9KeOrHWaHrdYPnVHndGbsFGRJ2d5gKtwggCvkqbFDoouQA== - dependencies: - chalk "^4.0.0" - jest-diff "^26.1.0" - jest-get-type "^26.0.0" - pretty-format "^26.1.0" - -jest-message-util@^26.1.0: - version "26.1.0" - resolved "https://registry.yarnpkg.com/jest-message-util/-/jest-message-util-26.1.0.tgz#52573fbb8f5cea443c4d1747804d7a238a3e233c" - integrity sha512-dY0+UlldiAJwNDJ08SF0HdF32g9PkbF2NRK/+2iMPU40O6q+iSn1lgog/u0UH8ksWoPv0+gNq8cjhYO2MFtT0g== - dependencies: - "@babel/code-frame" "^7.0.0" - "@jest/types" "^26.1.0" - "@types/stack-utils" "^1.0.1" - chalk "^4.0.0" - graceful-fs "^4.2.4" - micromatch "^4.0.2" - slash "^3.0.0" - stack-utils "^2.0.2" - -jest-mock@^26.1.0: - version "26.1.0" - resolved "https://registry.yarnpkg.com/jest-mock/-/jest-mock-26.1.0.tgz#80d8286da1f05a345fbad1bfd6fa49a899465d3d" - integrity sha512-1Rm8EIJ3ZFA8yCIie92UbxZWj9SuVmUGcyhLHyAhY6WI3NIct38nVcfOPWhJteqSn8V8e3xOMha9Ojfazfpovw== - dependencies: - "@jest/types" "^26.1.0" - -jest-pnp-resolver@^1.2.1: - version "1.2.2" - resolved "https://registry.yarnpkg.com/jest-pnp-resolver/-/jest-pnp-resolver-1.2.2.tgz#b704ac0ae028a89108a4d040b3f919dfddc8e33c" - integrity sha512-olV41bKSMm8BdnuMsewT4jqlZ8+3TCARAXjZGT9jcoSnrfUnRCqnMoF9XEeoWjbzObpqF9dRhHQj0Xb9QdF6/w== - -jest-regex-util@^26.0.0: - version "26.0.0" - resolved "https://registry.yarnpkg.com/jest-regex-util/-/jest-regex-util-26.0.0.tgz#d25e7184b36e39fd466c3bc41be0971e821fee28" - integrity sha512-Gv3ZIs/nA48/Zvjrl34bf+oD76JHiGDUxNOVgUjh3j890sblXryjY4rss71fPtD/njchl6PSE2hIhvyWa1eT0A== - -jest-resolve-dependencies@^26.1.0: - version "26.1.0" - resolved "https://registry.yarnpkg.com/jest-resolve-dependencies/-/jest-resolve-dependencies-26.1.0.tgz#1ce36472f864a5dadf7dc82fa158e1c77955691b" - integrity sha512-fQVEPHHQ1JjHRDxzlLU/buuQ9om+hqW6Vo928aa4b4yvq4ZHBtRSDsLdKQLuCqn5CkTVpYZ7ARh2fbA8WkRE6g== - dependencies: - "@jest/types" "^26.1.0" - jest-regex-util "^26.0.0" - jest-snapshot "^26.1.0" - -jest-resolve@^26.1.0: - version "26.1.0" - resolved "https://registry.yarnpkg.com/jest-resolve/-/jest-resolve-26.1.0.tgz#a530eaa302b1f6fa0479079d1561dd69abc00e68" - integrity sha512-KsY1JV9FeVgEmwIISbZZN83RNGJ1CC+XUCikf/ZWJBX/tO4a4NvA21YixokhdR9UnmPKKAC4LafVixJBrwlmfg== - dependencies: - "@jest/types" "^26.1.0" - chalk "^4.0.0" - graceful-fs "^4.2.4" - jest-pnp-resolver "^1.2.1" - jest-util "^26.1.0" - read-pkg-up "^7.0.1" - resolve "^1.17.0" - slash "^3.0.0" - -jest-runner@^26.1.0: - version "26.1.0" - resolved "https://registry.yarnpkg.com/jest-runner/-/jest-runner-26.1.0.tgz#457f7fc522afe46ca6db1dccf19f87f500b3288d" - integrity sha512-elvP7y0fVDREnfqit0zAxiXkDRSw6dgCkzPCf1XvIMnSDZ8yogmSKJf192dpOgnUVykmQXwYYJnCx641uLTgcw== - dependencies: - "@jest/console" "^26.1.0" - "@jest/environment" "^26.1.0" - "@jest/test-result" "^26.1.0" - "@jest/types" "^26.1.0" - chalk "^4.0.0" - exit "^0.1.2" - graceful-fs "^4.2.4" - jest-config "^26.1.0" - jest-docblock "^26.0.0" - jest-haste-map "^26.1.0" - jest-jasmine2 "^26.1.0" - jest-leak-detector "^26.1.0" - jest-message-util "^26.1.0" - jest-resolve "^26.1.0" - jest-runtime "^26.1.0" - jest-util "^26.1.0" - jest-worker "^26.1.0" - source-map-support "^0.5.6" - throat "^5.0.0" - -jest-runtime@^26.1.0: - version "26.1.0" - resolved "https://registry.yarnpkg.com/jest-runtime/-/jest-runtime-26.1.0.tgz#45a37af42115f123ed5c51f126c05502da2469cb" - integrity sha512-1qiYN+EZLmG1QV2wdEBRf+Ci8i3VSfIYLF02U18PiUDrMbhfpN/EAMMkJtT02jgJUoaEOpHAIXG6zS3QRMzRmA== - dependencies: - "@jest/console" "^26.1.0" - "@jest/environment" "^26.1.0" - "@jest/fake-timers" "^26.1.0" - "@jest/globals" "^26.1.0" - "@jest/source-map" "^26.1.0" - "@jest/test-result" "^26.1.0" - "@jest/transform" "^26.1.0" - "@jest/types" "^26.1.0" - "@types/yargs" "^15.0.0" - chalk "^4.0.0" - collect-v8-coverage "^1.0.0" - exit "^0.1.2" - glob "^7.1.3" - graceful-fs "^4.2.4" - jest-config "^26.1.0" - jest-haste-map "^26.1.0" - jest-message-util "^26.1.0" - jest-mock "^26.1.0" - jest-regex-util "^26.0.0" - jest-resolve "^26.1.0" - jest-snapshot "^26.1.0" - jest-util "^26.1.0" - jest-validate "^26.1.0" - slash "^3.0.0" - strip-bom "^4.0.0" - yargs "^15.3.1" - -jest-serializer@^26.1.0: - version "26.1.0" - resolved "https://registry.yarnpkg.com/jest-serializer/-/jest-serializer-26.1.0.tgz#72a394531fc9b08e173dc7d297440ac610d95022" - integrity sha512-eqZOQG/0+MHmr25b2Z86g7+Kzd5dG9dhCiUoyUNJPgiqi38DqbDEOlHcNijyfZoj74soGBohKBZuJFS18YTJ5w== - dependencies: - graceful-fs "^4.2.4" - -jest-snapshot@^26.1.0: - version "26.1.0" - resolved "https://registry.yarnpkg.com/jest-snapshot/-/jest-snapshot-26.1.0.tgz#c36ed1e0334bd7bd2fe5ad07e93a364ead7e1349" - integrity sha512-YhSbU7eMTVQO/iRbNs8j0mKRxGp4plo7sJ3GzOQ0IYjvsBiwg0T1o0zGQAYepza7lYHuPTrG5J2yDd0CE2YxSw== - dependencies: - "@babel/types" "^7.0.0" - "@jest/types" "^26.1.0" - "@types/prettier" "^2.0.0" - chalk "^4.0.0" - expect "^26.1.0" - graceful-fs "^4.2.4" - jest-diff "^26.1.0" - jest-get-type "^26.0.0" - jest-haste-map "^26.1.0" - jest-matcher-utils "^26.1.0" - jest-message-util "^26.1.0" - jest-resolve "^26.1.0" - natural-compare "^1.4.0" - pretty-format "^26.1.0" - semver "^7.3.2" - -jest-util@^26.1.0: - version "26.1.0" - resolved "https://registry.yarnpkg.com/jest-util/-/jest-util-26.1.0.tgz#80e85d4ba820decacf41a691c2042d5276e5d8d8" - integrity sha512-rNMOwFQevljfNGvbzNQAxdmXQ+NawW/J72dmddsK0E8vgxXCMtwQ/EH0BiWEIxh0hhMcTsxwAxINt7Lh46Uzbg== - dependencies: - "@jest/types" "^26.1.0" - chalk "^4.0.0" - graceful-fs "^4.2.4" - is-ci "^2.0.0" - micromatch "^4.0.2" - -jest-validate@^26.1.0: - version "26.1.0" - resolved "https://registry.yarnpkg.com/jest-validate/-/jest-validate-26.1.0.tgz#942c85ad3d60f78250c488a7f85d8f11a29788e7" - integrity sha512-WPApOOnXsiwhZtmkDsxnpye+XLb/tUISP+H6cHjfUIXvlG+eKwP+isnivsxlHCPaO9Q5wvbhloIBkdF3qUn+Nw== - dependencies: - "@jest/types" "^26.1.0" - camelcase "^6.0.0" - chalk "^4.0.0" - jest-get-type "^26.0.0" - leven "^3.1.0" - pretty-format "^26.1.0" - -jest-watcher@^26.1.0: - version "26.1.0" - resolved "https://registry.yarnpkg.com/jest-watcher/-/jest-watcher-26.1.0.tgz#99812a0cd931f0cb3d153180426135ab83e4d8f2" - integrity sha512-ffEOhJl2EvAIki613oPsSG11usqnGUzIiK7MMX6hE4422aXOcVEG3ySCTDFLn1+LZNXGPE8tuJxhp8OBJ1pgzQ== - dependencies: - "@jest/test-result" "^26.1.0" - "@jest/types" "^26.1.0" - ansi-escapes "^4.2.1" - chalk "^4.0.0" - jest-util "^26.1.0" - string-length "^4.0.1" - -jest-worker@^26.1.0: - version "26.1.0" - resolved "https://registry.yarnpkg.com/jest-worker/-/jest-worker-26.1.0.tgz#65d5641af74e08ccd561c240e7db61284f82f33d" - integrity sha512-Z9P5pZ6UC+kakMbNJn+tA2RdVdNX5WH1x+5UCBZ9MxIK24pjYtFt96fK+UwBTrjLYm232g1xz0L3eTh51OW+yQ== - dependencies: - merge-stream "^2.0.0" - supports-color "^7.0.0" - -jest@^26.0.1: - version "26.1.0" - resolved "https://registry.yarnpkg.com/jest/-/jest-26.1.0.tgz#2f3aa7bcffb9bfd025473f83bbbf46a3af026263" - integrity sha512-LIti8jppw5BcQvmNJe4w2g1N/3V68HUfAv9zDVm7v+VAtQulGhH0LnmmiVkbNE4M4I43Bj2fXPiBGKt26k9tHw== - dependencies: - "@jest/core" "^26.1.0" - import-local "^3.0.2" - jest-cli "^26.1.0" - -js-tokens@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-4.0.0.tgz#19203fb59991df98e3a287050d4647cdeaf32499" - integrity sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ== - -js-yaml@^3.13.1: - version "3.14.0" - resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.14.0.tgz#a7a34170f26a21bb162424d8adacb4113a69e482" - integrity sha512-/4IbIeHcD9VMHFqDR/gQ7EdZdLimOvW2DdcxFjdyyZ9NsbS+ccrXqVWDtab/lRl5AlUqmpBx8EhPaWR+OtY17A== - dependencies: - argparse "^1.0.7" - esprima "^4.0.0" - -jsbn@~0.1.0: - version "0.1.1" - resolved "https://registry.yarnpkg.com/jsbn/-/jsbn-0.1.1.tgz#a5e654c2e5a2deb5f201d96cefbca80c0ef2f513" - integrity sha1-peZUwuWi3rXyAdls77yoDA7y9RM= - -jsdom@^16.2.2: - version "16.2.2" - resolved "https://registry.yarnpkg.com/jsdom/-/jsdom-16.2.2.tgz#76f2f7541646beb46a938f5dc476b88705bedf2b" - integrity sha512-pDFQbcYtKBHxRaP55zGXCJWgFHkDAYbKcsXEK/3Icu9nKYZkutUXfLBwbD+09XDutkYSHcgfQLZ0qvpAAm9mvg== - dependencies: - abab "^2.0.3" - acorn "^7.1.1" - acorn-globals "^6.0.0" - cssom "^0.4.4" - cssstyle "^2.2.0" - data-urls "^2.0.0" - decimal.js "^10.2.0" - domexception "^2.0.1" - escodegen "^1.14.1" - html-encoding-sniffer "^2.0.1" - is-potential-custom-element-name "^1.0.0" - nwsapi "^2.2.0" - parse5 "5.1.1" - request "^2.88.2" - request-promise-native "^1.0.8" - saxes "^5.0.0" - symbol-tree "^3.2.4" - tough-cookie "^3.0.1" - w3c-hr-time "^1.0.2" - w3c-xmlserializer "^2.0.0" - webidl-conversions "^6.0.0" - whatwg-encoding "^1.0.5" - whatwg-mimetype "^2.3.0" - whatwg-url "^8.0.0" - ws "^7.2.3" - xml-name-validator "^3.0.0" - -jsesc@^2.5.1: - version "2.5.2" - resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-2.5.2.tgz#80564d2e483dacf6e8ef209650a67df3f0c283a4" - integrity sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA== - -json-parse-better-errors@^1.0.1: - version "1.0.2" - resolved "https://registry.yarnpkg.com/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz#bb867cfb3450e69107c131d1c514bab3dc8bcaa9" - integrity sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw== - -json-schema-traverse@^0.4.1: - version "0.4.1" - resolved "https://registry.yarnpkg.com/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz#69f6a87d9513ab8bb8fe63bdb0979c448e684660" - integrity sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg== - -json-schema@0.2.3: - version "0.2.3" - resolved "https://registry.yarnpkg.com/json-schema/-/json-schema-0.2.3.tgz#b480c892e59a2f05954ce727bd3f2a4e882f9e13" - integrity sha1-tIDIkuWaLwWVTOcnvT8qTogvnhM= - -json-stringify-safe@~5.0.1: - version "5.0.1" - resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb" - integrity sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus= - -json5@2.x, json5@^2.1.2: - version "2.1.3" - resolved "https://registry.yarnpkg.com/json5/-/json5-2.1.3.tgz#c9b0f7fa9233bfe5807fe66fcf3a5617ed597d43" - integrity sha512-KXPvOm8K9IJKFM0bmdn8QXh7udDh1g/giieX0NLCaMnb4hEiVFqnop2ImTXCc5e0/oHz3LTqmHGtExn5hfMkOA== - dependencies: - minimist "^1.2.5" - -jsonfile@^6.0.1: - version "6.0.1" - resolved "https://registry.yarnpkg.com/jsonfile/-/jsonfile-6.0.1.tgz#98966cba214378c8c84b82e085907b40bf614179" - integrity sha512-jR2b5v7d2vIOust+w3wtFKZIfpC2pnRmFAhAC/BuweZFQR8qZzxH1OyrQ10HmdVYiXWkYUqPVsz91cG7EL2FBg== - dependencies: - universalify "^1.0.0" - optionalDependencies: - graceful-fs "^4.1.6" - -jsprim@^1.2.2: - version "1.4.1" - resolved "https://registry.yarnpkg.com/jsprim/-/jsprim-1.4.1.tgz#313e66bc1e5cc06e438bc1b7499c2e5c56acb6a2" - integrity sha1-MT5mvB5cwG5Di8G3SZwuXFastqI= - dependencies: - assert-plus "1.0.0" - extsprintf "1.3.0" - json-schema "0.2.3" - verror "1.10.0" - -kind-of@^3.0.2, kind-of@^3.0.3, kind-of@^3.2.0: - version "3.2.2" - resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-3.2.2.tgz#31ea21a734bab9bbb0f32466d893aea51e4a3c64" - integrity sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ= - dependencies: - is-buffer "^1.1.5" - -kind-of@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-4.0.0.tgz#20813df3d712928b207378691a45066fae72dd57" - integrity sha1-IIE989cSkosgc3hpGkUGb65y3Vc= - dependencies: - is-buffer "^1.1.5" - -kind-of@^5.0.0: - version "5.1.0" - resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-5.1.0.tgz#729c91e2d857b7a419a1f9aa65685c4c33f5845d" - integrity sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw== - -kind-of@^6.0.0, kind-of@^6.0.2: - version "6.0.3" - resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-6.0.3.tgz#07c05034a6c349fa06e24fa35aa76db4580ce4dd" - integrity sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw== - -kleur@^3.0.3: - version "3.0.3" - resolved "https://registry.yarnpkg.com/kleur/-/kleur-3.0.3.tgz#a79c9ecc86ee1ce3fa6206d1216c501f147fc07e" - integrity sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w== - -leven@^3.1.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/leven/-/leven-3.1.0.tgz#77891de834064cccba82ae7842bb6b14a13ed7f2" - integrity sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A== - -levn@~0.3.0: - version "0.3.0" - resolved "https://registry.yarnpkg.com/levn/-/levn-0.3.0.tgz#3b09924edf9f083c0490fdd4c0bc4421e04764ee" - integrity sha1-OwmSTt+fCDwEkP3UwLxEIeBHZO4= - dependencies: - prelude-ls "~1.1.2" - type-check "~0.3.2" - -lines-and-columns@^1.1.6: - version "1.1.6" - resolved "https://registry.yarnpkg.com/lines-and-columns/-/lines-and-columns-1.1.6.tgz#1c00c743b433cd0a4e80758f7b64a57440d9ff00" - integrity sha1-HADHQ7QzzQpOgHWPe2SldEDZ/wA= - -locate-path@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-5.0.0.tgz#1afba396afd676a6d42504d0a67a3a7eb9f62aa0" - integrity sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g== - dependencies: - p-locate "^4.1.0" - -lodash.memoize@4.x: - version "4.1.2" - resolved "https://registry.yarnpkg.com/lodash.memoize/-/lodash.memoize-4.1.2.tgz#bcc6c49a42a2840ed997f323eada5ecd182e0bfe" - integrity sha1-vMbEmkKihA7Zl/Mj6tpezRguC/4= - -lodash.sortby@^4.7.0: - version "4.7.0" - resolved "https://registry.yarnpkg.com/lodash.sortby/-/lodash.sortby-4.7.0.tgz#edd14c824e2cc9c1e0b0a1b42bb5210516a42438" - integrity sha1-7dFMgk4sycHgsKG0K7UhBRakJDg= - -lodash@^4.17.13, lodash@^4.17.15: - version "4.17.15" - resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.15.tgz#b447f6670a0455bbfeedd11392eff330ea097548" - integrity sha512-8xOcRHvCjnocdS5cpwXQXVzmmh5e5+saE2QGoeQmbKmRS6J3VQppPOIt0MnmE+4xlZoumy0GPG0D0MVIQbNA1A== - -lodash@^4.17.20: - version "4.17.21" - resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c" - integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg== - -lunr@^2.3.9: - version "2.3.9" - resolved "https://registry.yarnpkg.com/lunr/-/lunr-2.3.9.tgz#18b123142832337dd6e964df1a5a7707b25d35e1" - integrity sha512-zTU3DaZaF3Rt9rhN3uBMGQD3dD2/vFQqnvZCDv4dl5iOzq2IZQqTxu90r4E5J+nP70J3ilqVCrbho2eWaeW8Ow== - -make-dir@^3.0.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/make-dir/-/make-dir-3.1.0.tgz#415e967046b3a7f1d185277d84aa58203726a13f" - integrity sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw== - dependencies: - semver "^6.0.0" - -make-error@1.x: - version "1.3.6" - resolved "https://registry.yarnpkg.com/make-error/-/make-error-1.3.6.tgz#2eb2e37ea9b67c4891f684a1394799af484cf7a2" - integrity sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw== - -makeerror@1.0.x: - version "1.0.11" - resolved "https://registry.yarnpkg.com/makeerror/-/makeerror-1.0.11.tgz#e01a5c9109f2af79660e4e8b9587790184f5a96c" - integrity sha1-4BpckQnyr3lmDk6LlYd5AYT1qWw= - dependencies: - tmpl "1.0.x" - -map-cache@^0.2.2: - version "0.2.2" - resolved "https://registry.yarnpkg.com/map-cache/-/map-cache-0.2.2.tgz#c32abd0bd6525d9b051645bb4f26ac5dc98a0dbf" - integrity sha1-wyq9C9ZSXZsFFkW7TyasXcmKDb8= - -map-stream@~0.1.0: - version "0.1.0" - resolved "https://registry.yarnpkg.com/map-stream/-/map-stream-0.1.0.tgz#e56aa94c4c8055a16404a0674b78f215f7c8e194" - integrity sha1-5WqpTEyAVaFkBKBnS3jyFffI4ZQ= - -map-visit@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/map-visit/-/map-visit-1.0.0.tgz#ecdca8f13144e660f1b5bd41f12f3479d98dfb8f" - integrity sha1-7Nyo8TFE5mDxtb1B8S80edmN+48= - dependencies: - object-visit "^1.0.0" - -marked@^1.1.1: - version "1.2.9" - resolved "https://registry.yarnpkg.com/marked/-/marked-1.2.9.tgz#53786f8b05d4c01a2a5a76b7d1ec9943d29d72dc" - integrity sha512-H8lIX2SvyitGX+TRdtS06m1jHMijKN/XjfH6Ooii9fvxMlh8QdqBfBDkGUpMWH2kQNrtixjzYUa3SH8ROTgRRw== - -merge-stream@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/merge-stream/-/merge-stream-2.0.0.tgz#52823629a14dd00c9770fb6ad47dc6310f2c1f60" - integrity sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w== - -micromatch@4.x, micromatch@^4.0.2: - version "4.0.2" - resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.2.tgz#4fcb0999bf9fbc2fcbdd212f6d629b9a56c39259" - integrity sha512-y7FpHSbMUMoyPbYUSzO6PaZ6FyRnQOpHuKwbo1G+Knck95XVU4QAiKdGEnj5wwoS7PlOgthX/09u5iFJ+aYf5Q== - dependencies: - braces "^3.0.1" - picomatch "^2.0.5" - -micromatch@^3.1.4: - version "3.1.10" - resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-3.1.10.tgz#70859bc95c9840952f359a068a3fc49f9ecfac23" - integrity sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg== - dependencies: - arr-diff "^4.0.0" - array-unique "^0.3.2" - braces "^2.3.1" - define-property "^2.0.2" - extend-shallow "^3.0.2" - extglob "^2.0.4" - fragment-cache "^0.2.1" - kind-of "^6.0.2" - nanomatch "^1.2.9" - object.pick "^1.3.0" - regex-not "^1.0.0" - snapdragon "^0.8.1" - to-regex "^3.0.2" - -mime-db@1.44.0: - version "1.44.0" - resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.44.0.tgz#fa11c5eb0aca1334b4233cb4d52f10c5a6272f92" - integrity sha512-/NOTfLrsPBVeH7YtFPgsVWveuL+4SjjYxaQ1xtM1KMFj7HdxlBlxeyNLzhyJVx7r4rZGJAZ/6lkKCitSc/Nmpg== - -mime-types@^2.1.12, mime-types@~2.1.19: - version "2.1.27" - resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.27.tgz#47949f98e279ea53119f5722e0f34e529bec009f" - integrity sha512-JIhqnCasI9yD+SsmkquHBxTSEuZdQX5BuQnS2Vc7puQQQ+8yiP5AY5uWhpdv4YL4VM5c6iliiYWPgJ/nJQLp7w== - dependencies: - mime-db "1.44.0" - -mimic-fn@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-2.1.0.tgz#7ed2c2ccccaf84d3ffcb7a69b57711fc2083401b" - integrity sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg== - -minimatch@^3.0.0, minimatch@^3.1.1: - version "3.1.2" - resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.1.2.tgz#19cd194bfd3e428f049a70817c038d89ab4be35b" - integrity sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw== - dependencies: - brace-expansion "^1.1.7" - -minimatch@^3.0.3, minimatch@^3.0.4: - version "3.0.4" - resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.4.tgz#5166e286457f03306064be5497e8dbb0c3d32083" - integrity sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA== - dependencies: - brace-expansion "^1.1.7" - -minimist@^1.1.1, minimist@^1.2.0, minimist@^1.2.5: - version "1.2.5" - resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.5.tgz#67d66014b66a6a8aaa0c083c5fd58df4e4e97602" - integrity sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw== - -mixin-deep@^1.2.0: - version "1.3.2" - resolved "https://registry.yarnpkg.com/mixin-deep/-/mixin-deep-1.3.2.tgz#1120b43dc359a785dce65b55b82e257ccf479566" - integrity sha512-WRoDn//mXBiJ1H40rqa3vH0toePwSsGb45iInWlTySa+Uu4k3tYUSxa2v1KqAiLtvlrSzaExqS1gtk96A9zvEA== - dependencies: - for-in "^1.0.2" - is-extendable "^1.0.1" - -mkdirp@1.x, mkdirp@^1.0.4: - version "1.0.4" - resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-1.0.4.tgz#3eb5ed62622756d79a5f0e2a221dfebad75c2f7e" - integrity sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw== - -mkdirp@^0.5.3: - version "0.5.5" - resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.5.tgz#d91cefd62d1436ca0f41620e251288d420099def" - integrity sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ== - dependencies: - minimist "^1.2.5" - -ms@2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/ms/-/ms-2.0.0.tgz#5608aeadfc00be6c2901df5f9861788de0d597c8" - integrity sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g= - -ms@^2.1.1: - version "2.1.2" - resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.2.tgz#d09d1f357b443f493382a8eb3ccd183872ae6009" - integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w== - -nanomatch@^1.2.9: - version "1.2.13" - resolved "https://registry.yarnpkg.com/nanomatch/-/nanomatch-1.2.13.tgz#b87a8aa4fc0de8fe6be88895b38983ff265bd119" - integrity sha512-fpoe2T0RbHwBTBUOftAfBPaDEi06ufaUai0mE6Yn1kacc3SnTErfb/h+X94VXzI64rKFHYImXSvdwGGCmwOqCA== - dependencies: - arr-diff "^4.0.0" - array-unique "^0.3.2" - define-property "^2.0.2" - extend-shallow "^3.0.2" - fragment-cache "^0.2.1" - is-windows "^1.0.2" - kind-of "^6.0.2" - object.pick "^1.3.0" - regex-not "^1.0.0" - snapdragon "^0.8.1" - to-regex "^3.0.1" - -natural-compare@^1.4.0: - version "1.4.0" - resolved "https://registry.yarnpkg.com/natural-compare/-/natural-compare-1.4.0.tgz#4abebfeed7541f2c27acfb29bdbbd15c8d5ba4f7" - integrity sha1-Sr6/7tdUHywnrPspvbvRXI1bpPc= - -neo-async@^2.6.0: - version "2.6.1" - resolved "https://registry.yarnpkg.com/neo-async/-/neo-async-2.6.1.tgz#ac27ada66167fa8849a6addd837f6b189ad2081c" - integrity sha512-iyam8fBuCUpWeKPGpaNMetEocMt364qkCsfL9JuhjXX6dRnguRVOfk2GZaDpPjcOKiiXCPINZC1GczQ7iTq3Zw== - -nice-try@^1.0.4: - version "1.0.5" - resolved "https://registry.yarnpkg.com/nice-try/-/nice-try-1.0.5.tgz#a3378a7696ce7d223e88fc9b764bd7ef1089e366" - integrity sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ== - -node-cleanup@^2.1.2: - version "2.1.2" - resolved "https://registry.yarnpkg.com/node-cleanup/-/node-cleanup-2.1.2.tgz#7ac19abd297e09a7f72a71545d951b517e4dde2c" - integrity sha1-esGavSl+Caf3KnFUXZUbUX5N3iw= - -node-int64@^0.4.0: - version "0.4.0" - resolved "https://registry.yarnpkg.com/node-int64/-/node-int64-0.4.0.tgz#87a9065cdb355d3182d8f94ce11188b825c68a3b" - integrity sha1-h6kGXNs1XTGC2PlM4RGIuCXGijs= - -node-modules-regexp@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/node-modules-regexp/-/node-modules-regexp-1.0.0.tgz#8d9dbe28964a4ac5712e9131642107c71e90ec40" - integrity sha1-jZ2+KJZKSsVxLpExZCEHxx6Q7EA= - -node-notifier@^7.0.0: - version "7.0.1" - resolved "https://registry.yarnpkg.com/node-notifier/-/node-notifier-7.0.1.tgz#a355e33e6bebacef9bf8562689aed0f4230ca6f9" - integrity sha512-VkzhierE7DBmQEElhTGJIoiZa1oqRijOtgOlsXg32KrJRXsPy0NXFBqWGW/wTswnJlDCs5viRYaqWguqzsKcmg== - dependencies: - growly "^1.3.0" - is-wsl "^2.1.1" - semver "^7.2.1" - shellwords "^0.1.1" - uuid "^7.0.3" - which "^2.0.2" - -noms@0.0.0: - version "0.0.0" - resolved "https://registry.yarnpkg.com/noms/-/noms-0.0.0.tgz#da8ebd9f3af9d6760919b27d9cdc8092a7332859" - integrity sha1-2o69nzr51nYJGbJ9nNyAkqczKFk= - dependencies: - inherits "^2.0.1" - readable-stream "~1.0.31" - -normalize-package-data@^2.5.0: - version "2.5.0" - resolved "https://registry.yarnpkg.com/normalize-package-data/-/normalize-package-data-2.5.0.tgz#e66db1838b200c1dfc233225d12cb36520e234a8" - integrity sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA== - dependencies: - hosted-git-info "^2.1.4" - resolve "^1.10.0" - semver "2 || 3 || 4 || 5" - validate-npm-package-license "^3.0.1" - -normalize-path@^2.1.1: - version "2.1.1" - resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-2.1.1.tgz#1ab28b556e198363a8c1a6f7e6fa20137fe6aed9" - integrity sha1-GrKLVW4Zg2Oowab35vogE3/mrtk= - dependencies: - remove-trailing-separator "^1.0.1" - -normalize-path@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-3.0.0.tgz#0dcd69ff23a1c9b11fd0978316644a0388216a65" - integrity sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA== - -npm-run-path@^2.0.0: - version "2.0.2" - resolved "https://registry.yarnpkg.com/npm-run-path/-/npm-run-path-2.0.2.tgz#35a9232dfa35d7067b4cb2ddf2357b1871536c5f" - integrity sha1-NakjLfo11wZ7TLLd8jV7GHFTbF8= - dependencies: - path-key "^2.0.0" - -npm-run-path@^4.0.0: - version "4.0.1" - resolved "https://registry.yarnpkg.com/npm-run-path/-/npm-run-path-4.0.1.tgz#b7ecd1e5ed53da8e37a55e1c2269e0b97ed748ea" - integrity sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw== - dependencies: - path-key "^3.0.0" - -nwsapi@^2.2.0: - version "2.2.0" - resolved "https://registry.yarnpkg.com/nwsapi/-/nwsapi-2.2.0.tgz#204879a9e3d068ff2a55139c2c772780681a38b7" - integrity sha512-h2AatdwYH+JHiZpv7pt/gSX1XoRGb7L/qSIeuqA6GwYoF9w1vP1cw42TO0aI2pNyshRK5893hNSl+1//vHK7hQ== - -oauth-sign@~0.9.0: - version "0.9.0" - resolved "https://registry.yarnpkg.com/oauth-sign/-/oauth-sign-0.9.0.tgz#47a7b016baa68b5fa0ecf3dee08a85c679ac6455" - integrity sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ== - -object-copy@^0.1.0: - version "0.1.0" - resolved "https://registry.yarnpkg.com/object-copy/-/object-copy-0.1.0.tgz#7e7d858b781bd7c991a41ba975ed3812754e998c" - integrity sha1-fn2Fi3gb18mRpBupde04EnVOmYw= - dependencies: - copy-descriptor "^0.1.0" - define-property "^0.2.5" - kind-of "^3.0.3" - -object-visit@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/object-visit/-/object-visit-1.0.1.tgz#f79c4493af0c5377b59fe39d395e41042dd045bb" - integrity sha1-95xEk68MU3e1n+OdOV5BBC3QRbs= - dependencies: - isobject "^3.0.0" - -object.pick@^1.3.0: - version "1.3.0" - resolved "https://registry.yarnpkg.com/object.pick/-/object.pick-1.3.0.tgz#87a10ac4c1694bd2e1cbf53591a66141fb5dd747" - integrity sha1-h6EKxMFpS9Lhy/U1kaZhQftd10c= - dependencies: - isobject "^3.0.1" - -once@^1.3.0, once@^1.3.1, once@^1.4.0: - version "1.4.0" - resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1" - integrity sha1-WDsap3WWHUsROsF9nFC6753Xa9E= - dependencies: - wrappy "1" - -onetime@^5.1.0: - version "5.1.0" - resolved "https://registry.yarnpkg.com/onetime/-/onetime-5.1.0.tgz#fff0f3c91617fe62bb50189636e99ac8a6df7be5" - integrity sha512-5NcSkPHhwTVFIQN+TUqXoS5+dlElHXdpAWu9I0HP20YOtIi+aZ0Ct82jdlILDxjLEAWwvm+qj1m6aEtsDVmm6Q== - dependencies: - mimic-fn "^2.1.0" - -optionator@^0.8.1: - version "0.8.3" - resolved "https://registry.yarnpkg.com/optionator/-/optionator-0.8.3.tgz#84fa1d036fe9d3c7e21d99884b601167ec8fb495" - integrity sha512-+IW9pACdk3XWmmTXG8m3upGUJst5XRGzxMRjXzAuJ1XnIFNvfhjjIuYkDvysnPQ7qzqVzLt78BCruntqRhWQbA== - dependencies: - deep-is "~0.1.3" - fast-levenshtein "~2.0.6" - levn "~0.3.0" - prelude-ls "~1.1.2" - type-check "~0.3.2" - word-wrap "~1.2.3" - -p-each-series@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/p-each-series/-/p-each-series-2.1.0.tgz#961c8dd3f195ea96c747e636b262b800a6b1af48" - integrity sha512-ZuRs1miPT4HrjFa+9fRfOFXxGJfORgelKV9f9nNOWw2gl6gVsRaVDOQP0+MI0G0wGKns1Yacsu0GjOFbTK0JFQ== - -p-finally@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/p-finally/-/p-finally-1.0.0.tgz#3fbcfb15b899a44123b34b6dcc18b724336a2cae" - integrity sha1-P7z7FbiZpEEjs0ttzBi3JDNqLK4= - -p-limit@^2.2.0: - version "2.3.0" - resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-2.3.0.tgz#3dd33c647a214fdfffd835933eb086da0dc21db1" - integrity sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w== - dependencies: - p-try "^2.0.0" - -p-locate@^4.1.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-4.1.0.tgz#a3428bb7088b3a60292f66919278b7c297ad4f07" - integrity sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A== - dependencies: - p-limit "^2.2.0" - -p-try@^2.0.0: - version "2.2.0" - resolved "https://registry.yarnpkg.com/p-try/-/p-try-2.2.0.tgz#cb2868540e313d61de58fafbe35ce9004d5540e6" - integrity sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ== - -parse-json@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-5.0.0.tgz#73e5114c986d143efa3712d4ea24db9a4266f60f" - integrity sha512-OOY5b7PAEFV0E2Fir1KOkxchnZNCdowAJgQ5NuxjpBKTRP3pQhwkrkxqQjeoKJ+fO7bCpmIZaogI4eZGDMEGOw== - dependencies: - "@babel/code-frame" "^7.0.0" - error-ex "^1.3.1" - json-parse-better-errors "^1.0.1" - lines-and-columns "^1.1.6" - -parse5@5.1.1: - version "5.1.1" - resolved "https://registry.yarnpkg.com/parse5/-/parse5-5.1.1.tgz#f68e4e5ba1852ac2cadc00f4555fff6c2abb6178" - integrity sha512-ugq4DFI0Ptb+WWjAdOK16+u/nHfiIrcE+sh8kZMaM0WllQKLI9rOUq6c2b7cwPkXdzfQESqvoqK6ug7U/Yyzug== - -pascalcase@^0.1.1: - version "0.1.1" - resolved "https://registry.yarnpkg.com/pascalcase/-/pascalcase-0.1.1.tgz#b363e55e8006ca6fe21784d2db22bd15d7917f14" - integrity sha1-s2PlXoAGym/iF4TS2yK9FdeRfxQ= - -path-exists@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-4.0.0.tgz#513bdbe2d3b95d7762e8c1137efa195c6c61b5b3" - integrity sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w== - -path-is-absolute@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f" - integrity sha1-F0uSaHNVNP+8es5r9TpanhtcX18= - -path-key@^2.0.0, path-key@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/path-key/-/path-key-2.0.1.tgz#411cadb574c5a140d3a4b1910d40d80cc9f40b40" - integrity sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A= - -path-key@^3.0.0, path-key@^3.1.0: - version "3.1.1" - resolved "https://registry.yarnpkg.com/path-key/-/path-key-3.1.1.tgz#581f6ade658cbba65a0d3380de7753295054f375" - integrity sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q== - -path-parse@^1.0.6: - version "1.0.6" - resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.6.tgz#d62dbb5679405d72c4737ec58600e9ddcf06d24c" - integrity sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw== - -path-parse@^1.0.7: - version "1.0.7" - resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735" - integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw== - -pause-stream@0.0.11: - version "0.0.11" - resolved "https://registry.yarnpkg.com/pause-stream/-/pause-stream-0.0.11.tgz#fe5a34b0cbce12b5aa6a2b403ee2e73b602f1445" - integrity sha1-/lo0sMvOErWqaitAPuLnO2AvFEU= - dependencies: - through "~2.3" - -performance-now@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/performance-now/-/performance-now-2.1.0.tgz#6309f4e0e5fa913ec1c69307ae364b4b377c9e7b" - integrity sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns= - -picomatch@^2.0.4, picomatch@^2.0.5: - version "2.2.2" - resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.2.2.tgz#21f333e9b6b8eaff02468f5146ea406d345f4dad" - integrity sha512-q0M/9eZHzmr0AulXyPwNfZjtwZ/RBZlbN3K3CErVrk50T2ASYI7Bye0EvekFY3IP1Nt2DHu0re+V2ZHIpMkuWg== - -pirates@^4.0.1: - version "4.0.1" - resolved "https://registry.yarnpkg.com/pirates/-/pirates-4.0.1.tgz#643a92caf894566f91b2b986d2c66950a8e2fb87" - integrity sha512-WuNqLTbMI3tmfef2TKxlQmAiLHKtFhlsCZnPIpuv2Ow0RDVO8lfy1Opf4NUzlMXLjPl+Men7AuVdX6TA+s+uGA== - dependencies: - node-modules-regexp "^1.0.0" - -pkg-dir@^4.2.0: - version "4.2.0" - resolved "https://registry.yarnpkg.com/pkg-dir/-/pkg-dir-4.2.0.tgz#f099133df7ede422e81d1d8448270eeb3e4261f3" - integrity sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ== - dependencies: - find-up "^4.0.0" - -posix-character-classes@^0.1.0: - version "0.1.1" - resolved "https://registry.yarnpkg.com/posix-character-classes/-/posix-character-classes-0.1.1.tgz#01eac0fe3b5af71a2a6c02feabb8c1fef7e00eab" - integrity sha1-AerA/jta9xoqbAL+q7jB/vfgDqs= - -prelude-ls@~1.1.2: - version "1.1.2" - resolved "https://registry.yarnpkg.com/prelude-ls/-/prelude-ls-1.1.2.tgz#21932a549f5e52ffd9a827f570e04be62a97da54" - integrity sha1-IZMqVJ9eUv/ZqCf1cOBL5iqX2lQ= - -pretty-format@^25.2.1, pretty-format@^25.5.0: - version "25.5.0" - resolved "https://registry.yarnpkg.com/pretty-format/-/pretty-format-25.5.0.tgz#7873c1d774f682c34b8d48b6743a2bf2ac55791a" - integrity sha512-kbo/kq2LQ/A/is0PQwsEHM7Ca6//bGPPvU6UnsdDRSKTWxT/ru/xb88v4BJf6a69H+uTytOEsTusT9ksd/1iWQ== - dependencies: - "@jest/types" "^25.5.0" - ansi-regex "^5.0.0" - ansi-styles "^4.0.0" - react-is "^16.12.0" - -pretty-format@^26.1.0: - version "26.1.0" - resolved "https://registry.yarnpkg.com/pretty-format/-/pretty-format-26.1.0.tgz#272b9cd1f1a924ab5d443dc224899d7a65cb96ec" - integrity sha512-GmeO1PEYdM+non4BKCj+XsPJjFOJIPnsLewqhDVoqY1xo0yNmDas7tC2XwpMrRAHR3MaE2hPo37deX5OisJ2Wg== - dependencies: - "@jest/types" "^26.1.0" - ansi-regex "^5.0.0" - ansi-styles "^4.0.0" - react-is "^16.12.0" - -process-nextick-args@~2.0.0: - version "2.0.1" - resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-2.0.1.tgz#7820d9b16120cc55ca9ae7792680ae7dba6d7fe2" - integrity sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag== - -progress@^2.0.3: - version "2.0.3" - resolved "https://registry.yarnpkg.com/progress/-/progress-2.0.3.tgz#7e8cf8d8f5b8f239c1bc68beb4eb78567d572ef8" - integrity sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA== - -prompts@^2.0.1: - version "2.3.2" - resolved "https://registry.yarnpkg.com/prompts/-/prompts-2.3.2.tgz#480572d89ecf39566d2bd3fe2c9fccb7c4c0b068" - integrity sha512-Q06uKs2CkNYVID0VqwfAl9mipo99zkBv/n2JtWY89Yxa3ZabWSrs0e2KTudKVa3peLUvYXMefDqIleLPVUBZMA== - dependencies: - kleur "^3.0.3" - sisteransi "^1.0.4" - -ps-tree@^1.2.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/ps-tree/-/ps-tree-1.2.0.tgz#5e7425b89508736cdd4f2224d028f7bb3f722ebd" - integrity sha512-0VnamPPYHl4uaU/nSFeZZpR21QAWRz+sRv4iW9+v/GS/J5U5iZB5BNN6J0RMoOvdx2gWM2+ZFMIm58q24e4UYA== - dependencies: - event-stream "=3.3.4" - -psl@^1.1.28: - version "1.8.0" - resolved "https://registry.yarnpkg.com/psl/-/psl-1.8.0.tgz#9326f8bcfb013adcc005fdff056acce020e51c24" - integrity sha512-RIdOzyoavK+hA18OGGWDqUTsCLhtA7IcZ/6NCs4fFJaHBDab+pDDmDIByWFRQJq2Cd7r1OoQxBGKOaztq+hjIQ== - -pump@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/pump/-/pump-3.0.0.tgz#b4a2116815bde2f4e1ea602354e8c75565107a64" - integrity sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww== - dependencies: - end-of-stream "^1.1.0" - once "^1.3.1" - -punycode@^2.1.0, punycode@^2.1.1: - version "2.1.1" - resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.1.1.tgz#b58b010ac40c22c5657616c8d2c2c02c7bf479ec" - integrity sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A== - -qs@~6.5.2: - version "6.5.2" - resolved "https://registry.yarnpkg.com/qs/-/qs-6.5.2.tgz#cb3ae806e8740444584ef154ce8ee98d403f3e36" - integrity sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA== - -react-is@^16.12.0: - version "16.13.1" - resolved "https://registry.yarnpkg.com/react-is/-/react-is-16.13.1.tgz#789729a4dc36de2999dc156dd6c1d9c18cea56a4" - integrity sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ== - -read-pkg-up@^7.0.1: - version "7.0.1" - resolved "https://registry.yarnpkg.com/read-pkg-up/-/read-pkg-up-7.0.1.tgz#f3a6135758459733ae2b95638056e1854e7ef507" - integrity sha512-zK0TB7Xd6JpCLmlLmufqykGE+/TlOePD6qKClNW7hHDKFh/J7/7gCWGR7joEQEW1bKq3a3yUZSObOoWLFQ4ohg== - dependencies: - find-up "^4.1.0" - read-pkg "^5.2.0" - type-fest "^0.8.1" - -read-pkg@^5.2.0: - version "5.2.0" - resolved "https://registry.yarnpkg.com/read-pkg/-/read-pkg-5.2.0.tgz#7bf295438ca5a33e56cd30e053b34ee7250c93cc" - integrity sha512-Ug69mNOpfvKDAc2Q8DRpMjjzdtrnv9HcSMX+4VsZxD1aZ6ZzrIE7rlzXBtWTyhULSMKg076AW6WR5iZpD0JiOg== - dependencies: - "@types/normalize-package-data" "^2.4.0" - normalize-package-data "^2.5.0" - parse-json "^5.0.0" - type-fest "^0.6.0" - -readable-stream@~1.0.31: - version "1.0.34" - resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-1.0.34.tgz#125820e34bc842d2f2aaafafe4c2916ee32c157c" - integrity sha1-Elgg40vIQtLyqq+v5MKRbuMsFXw= - dependencies: - core-util-is "~1.0.0" - inherits "~2.0.1" - isarray "0.0.1" - string_decoder "~0.10.x" - -readable-stream@~2.3.6: - version "2.3.7" - resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.3.7.tgz#1eca1cf711aef814c04f62252a36a62f6cb23b57" - integrity sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw== - dependencies: - core-util-is "~1.0.0" - inherits "~2.0.3" - isarray "~1.0.0" - process-nextick-args "~2.0.0" - safe-buffer "~5.1.1" - string_decoder "~1.1.1" - util-deprecate "~1.0.1" - -rechoir@^0.6.2: - version "0.6.2" - resolved "https://registry.yarnpkg.com/rechoir/-/rechoir-0.6.2.tgz#85204b54dba82d5742e28c96756ef43af50e3384" - integrity sha1-hSBLVNuoLVdC4oyWdW70OvUOM4Q= - dependencies: - resolve "^1.1.6" - -regex-not@^1.0.0, regex-not@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/regex-not/-/regex-not-1.0.2.tgz#1f4ece27e00b0b65e0247a6810e6a85d83a5752c" - integrity sha512-J6SDjUgDxQj5NusnOtdFxDwN/+HWykR8GELwctJ7mdqhcyy1xEc4SRFHUXvxTp661YaVKAjfRLZ9cCqS6tn32A== - dependencies: - extend-shallow "^3.0.2" - safe-regex "^1.1.0" - -remove-trailing-separator@^1.0.1: - version "1.1.0" - resolved "https://registry.yarnpkg.com/remove-trailing-separator/-/remove-trailing-separator-1.1.0.tgz#c24bce2a283adad5bc3f58e0d48249b92379d8ef" - integrity sha1-wkvOKig62tW8P1jg1IJJuSN52O8= - -repeat-element@^1.1.2: - version "1.1.3" - resolved "https://registry.yarnpkg.com/repeat-element/-/repeat-element-1.1.3.tgz#782e0d825c0c5a3bb39731f84efee6b742e6b1ce" - integrity sha512-ahGq0ZnV5m5XtZLMb+vP76kcAM5nkLqk0lpqAuojSKGgQtn4eRi4ZZGm2olo2zKFH+sMsWaqOCW1dqAnOru72g== - -repeat-string@^1.6.1: - version "1.6.1" - resolved "https://registry.yarnpkg.com/repeat-string/-/repeat-string-1.6.1.tgz#8dcae470e1c88abc2d600fff4a776286da75e637" - integrity sha1-jcrkcOHIirwtYA//Sndihtp15jc= - -request-promise-core@1.1.3: - version "1.1.3" - resolved "https://registry.yarnpkg.com/request-promise-core/-/request-promise-core-1.1.3.tgz#e9a3c081b51380dfea677336061fea879a829ee9" - integrity sha512-QIs2+ArIGQVp5ZYbWD5ZLCY29D5CfWizP8eWnm8FoGD1TX61veauETVQbrV60662V0oFBkrDOuaBI8XgtuyYAQ== - dependencies: - lodash "^4.17.15" - -request-promise-native@^1.0.8: - version "1.0.8" - resolved "https://registry.yarnpkg.com/request-promise-native/-/request-promise-native-1.0.8.tgz#a455b960b826e44e2bf8999af64dff2bfe58cb36" - integrity sha512-dapwLGqkHtwL5AEbfenuzjTYg35Jd6KPytsC2/TLkVMz8rm+tNt72MGUWT1RP/aYawMpN6HqbNGBQaRcBtjQMQ== - dependencies: - request-promise-core "1.1.3" - stealthy-require "^1.1.1" - tough-cookie "^2.3.3" - -request@^2.88.2: - version "2.88.2" - resolved "https://registry.yarnpkg.com/request/-/request-2.88.2.tgz#d73c918731cb5a87da047e207234146f664d12b3" - integrity sha512-MsvtOrfG9ZcrOwAW+Qi+F6HbD0CWXEh9ou77uOb7FM2WPhwT7smM833PzanhJLsgXjN89Ir6V2PczXNnMpwKhw== - dependencies: - aws-sign2 "~0.7.0" - aws4 "^1.8.0" - caseless "~0.12.0" - combined-stream "~1.0.6" - extend "~3.0.2" - forever-agent "~0.6.1" - form-data "~2.3.2" - har-validator "~5.1.3" - http-signature "~1.2.0" - is-typedarray "~1.0.0" - isstream "~0.1.2" - json-stringify-safe "~5.0.1" - mime-types "~2.1.19" - oauth-sign "~0.9.0" - performance-now "^2.1.0" - qs "~6.5.2" - safe-buffer "^5.1.2" - tough-cookie "~2.5.0" - tunnel-agent "^0.6.0" - uuid "^3.3.2" - -require-directory@^2.1.1: - version "2.1.1" - resolved "https://registry.yarnpkg.com/require-directory/-/require-directory-2.1.1.tgz#8c64ad5fd30dab1c976e2344ffe7f792a6a6df42" - integrity sha1-jGStX9MNqxyXbiNE/+f3kqam30I= - -require-main-filename@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/require-main-filename/-/require-main-filename-2.0.0.tgz#d0b329ecc7cc0f61649f62215be69af54aa8989b" - integrity sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg== - -resolve-cwd@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/resolve-cwd/-/resolve-cwd-3.0.0.tgz#0f0075f1bb2544766cf73ba6a6e2adfebcb13f2d" - integrity sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg== - dependencies: - resolve-from "^5.0.0" - -resolve-from@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-5.0.0.tgz#c35225843df8f776df21c57557bc087e9dfdfc69" - integrity sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw== - -resolve-url@^0.2.1: - version "0.2.1" - resolved "https://registry.yarnpkg.com/resolve-url/-/resolve-url-0.2.1.tgz#2c637fe77c893afd2a663fe21aa9080068e2052a" - integrity sha1-LGN/53yJOv0qZj/iGqkIAGjiBSo= - -resolve@^1.1.6: - version "1.22.0" - resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.22.0.tgz#5e0b8c67c15df57a89bdbabe603a002f21731198" - integrity sha512-Hhtrw0nLeSrFQ7phPp4OOcVjLPIeMnRlr5mcnVuMe7M/7eBn98A3hmFRLoFo3DLZkivSYwhRUJTyPyWAk56WLw== - dependencies: - is-core-module "^2.8.1" - path-parse "^1.0.7" - supports-preserve-symlinks-flag "^1.0.0" - -resolve@^1.10.0, resolve@^1.17.0, resolve@^1.3.2: - version "1.17.0" - resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.17.0.tgz#b25941b54968231cc2d1bb76a79cb7f2c0bf8444" - integrity sha512-ic+7JYiV8Vi2yzQGFWOkiZD5Z9z7O2Zhm9XMaTxdJExKasieFCr+yXZ/WmXsckHiKl12ar0y6XiXDx3m4RHn1w== - dependencies: - path-parse "^1.0.6" - -ret@~0.1.10: - version "0.1.15" - resolved "https://registry.yarnpkg.com/ret/-/ret-0.1.15.tgz#b8a4825d5bdb1fc3f6f53c2bc33f81388681c7bc" - integrity sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg== - -rimraf@^3.0.0: - version "3.0.2" - resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-3.0.2.tgz#f1a5402ba6220ad52cc1282bac1ae3aa49fd061a" - integrity sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA== - dependencies: - glob "^7.1.3" - -rsvp@^4.8.4: - version "4.8.5" - resolved "https://registry.yarnpkg.com/rsvp/-/rsvp-4.8.5.tgz#c8f155311d167f68f21e168df71ec5b083113734" - integrity sha512-nfMOlASu9OnRJo1mbEk2cz0D56a1MBNrJ7orjRZQG10XDyuvwksKbuXNp6qa+kbn839HwjwhBzhFmdsaEAfauA== - -safe-buffer@^5.0.1, safe-buffer@^5.1.2: - version "5.2.1" - resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.2.1.tgz#1eaf9fa9bdb1fdd4ec75f58f9cdb4e6b7827eec6" - integrity sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ== - -safe-buffer@~5.1.0, safe-buffer@~5.1.1: - version "5.1.2" - resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d" - integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g== - -safe-regex@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/safe-regex/-/safe-regex-1.1.0.tgz#40a3669f3b077d1e943d44629e157dd48023bf2e" - integrity sha1-QKNmnzsHfR6UPURinhV91IAjvy4= - dependencies: - ret "~0.1.10" - -"safer-buffer@>= 2.1.2 < 3", safer-buffer@^2.0.2, safer-buffer@^2.1.0, safer-buffer@~2.1.0: - version "2.1.2" - resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" - integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg== - -sane@^4.0.3: - version "4.1.0" - resolved "https://registry.yarnpkg.com/sane/-/sane-4.1.0.tgz#ed881fd922733a6c461bc189dc2b6c006f3ffded" - integrity sha512-hhbzAgTIX8O7SHfp2c8/kREfEn4qO/9q8C9beyY6+tvZ87EpoZ3i1RIEvp27YBswnNbY9mWd6paKVmKbAgLfZA== - dependencies: - "@cnakazawa/watch" "^1.0.3" - anymatch "^2.0.0" - capture-exit "^2.0.0" - exec-sh "^0.3.2" - execa "^1.0.0" - fb-watchman "^2.0.0" - micromatch "^3.1.4" - minimist "^1.1.1" - walker "~1.0.5" - -saxes@^5.0.0: - version "5.0.1" - resolved "https://registry.yarnpkg.com/saxes/-/saxes-5.0.1.tgz#eebab953fa3b7608dbe94e5dadb15c888fa6696d" - integrity sha512-5LBh1Tls8c9xgGjw3QrMwETmTMVk0oFgvrFSvWx62llR2hcEInrKNZ2GZCCuuy2lvWrdl5jhbpeqc5hRYKFOcw== - dependencies: - xmlchars "^2.2.0" - -"semver@2 || 3 || 4 || 5", semver@^5.3.0, semver@^5.4.1, semver@^5.5.0: - version "5.7.1" - resolved "https://registry.yarnpkg.com/semver/-/semver-5.7.1.tgz#a954f931aeba508d307bbf069eff0c01c96116f7" - integrity sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ== - -semver@7.x, semver@^7.2.1, semver@^7.3.2: - version "7.3.2" - resolved "https://registry.yarnpkg.com/semver/-/semver-7.3.2.tgz#604962b052b81ed0786aae84389ffba70ffd3938" - integrity sha512-OrOb32TeeambH6UrhtShmF7CRDqhL6/5XpPNp2DuRH6+9QLw/orhp72j87v8Qa1ScDkvrrBNpZcDejAirJmfXQ== - -semver@^6.0.0, semver@^6.3.0: - version "6.3.0" - resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.0.tgz#ee0a64c8af5e8ceea67687b133761e1becbd1d3d" - integrity sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw== - -set-blocking@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/set-blocking/-/set-blocking-2.0.0.tgz#045f9782d011ae9a6803ddd382b24392b3d890f7" - integrity sha1-BF+XgtARrppoA93TgrJDkrPYkPc= - -set-value@^2.0.0, set-value@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/set-value/-/set-value-2.0.1.tgz#a18d40530e6f07de4228c7defe4227af8cad005b" - integrity sha512-JxHc1weCN68wRY0fhCoXpyK55m/XPHafOmK4UWD7m2CI14GMcFypt4w/0+NV5f/ZMby2F6S2wwA7fgynh9gWSw== - dependencies: - extend-shallow "^2.0.1" - is-extendable "^0.1.1" - is-plain-object "^2.0.3" - split-string "^3.0.1" - -shebang-command@^1.2.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-1.2.0.tgz#44aac65b695b03398968c39f363fee5deafdf1ea" - integrity sha1-RKrGW2lbAzmJaMOfNj/uXer98eo= - dependencies: - shebang-regex "^1.0.0" - -shebang-command@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-2.0.0.tgz#ccd0af4f8835fbdc265b82461aaf0c36663f34ea" - integrity sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA== - dependencies: - shebang-regex "^3.0.0" - -shebang-regex@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-1.0.0.tgz#da42f49740c0b42db2ca9728571cb190c98efea3" - integrity sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM= - -shebang-regex@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-3.0.0.tgz#ae16f1644d873ecad843b0307b143362d4c42172" - integrity sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A== - -shelljs@^0.8.4: - version "0.8.5" - resolved "https://registry.yarnpkg.com/shelljs/-/shelljs-0.8.5.tgz#de055408d8361bed66c669d2f000538ced8ee20c" - integrity sha512-TiwcRcrkhHvbrZbnRcFYMLl30Dfov3HKqzp5tO5b4pt6G/SezKcYhmDg15zXVBswHmctSAQKznqNW2LO5tTDow== - dependencies: - glob "^7.0.0" - interpret "^1.0.0" - rechoir "^0.6.2" - -shellwords@^0.1.1: - version "0.1.1" - resolved "https://registry.yarnpkg.com/shellwords/-/shellwords-0.1.1.tgz#d6b9181c1a48d397324c84871efbcfc73fc0654b" - integrity sha512-vFwSUfQvqybiICwZY5+DAWIPLKsWO31Q91JSKl3UYv+K5c2QRPzn0qzec6QPu1Qc9eHYItiP3NdJqNVqetYAww== - -signal-exit@^3.0.0, signal-exit@^3.0.2: - version "3.0.3" - resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.3.tgz#a1410c2edd8f077b08b4e253c8eacfcaf057461c" - integrity sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA== - -sisteransi@^1.0.4: - version "1.0.5" - resolved "https://registry.yarnpkg.com/sisteransi/-/sisteransi-1.0.5.tgz#134d681297756437cc05ca01370d3a7a571075ed" - integrity sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg== - -slash@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/slash/-/slash-3.0.0.tgz#6539be870c165adbd5240220dbe361f1bc4d4634" - integrity sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q== - -snapdragon-node@^2.0.1: - version "2.1.1" - resolved "https://registry.yarnpkg.com/snapdragon-node/-/snapdragon-node-2.1.1.tgz#6c175f86ff14bdb0724563e8f3c1b021a286853b" - integrity sha512-O27l4xaMYt/RSQ5TR3vpWCAB5Kb/czIcqUFOM/C4fYcLnbZUc1PkjTAMjof2pBWaSTwOUd6qUHcFGVGj7aIwnw== - dependencies: - define-property "^1.0.0" - isobject "^3.0.0" - snapdragon-util "^3.0.1" - -snapdragon-util@^3.0.1: - version "3.0.1" - resolved "https://registry.yarnpkg.com/snapdragon-util/-/snapdragon-util-3.0.1.tgz#f956479486f2acd79700693f6f7b805e45ab56e2" - integrity sha512-mbKkMdQKsjX4BAL4bRYTj21edOf8cN7XHdYUJEe+Zn99hVEYcMvKPct1IqNe7+AZPirn8BCDOQBHQZknqmKlZQ== - dependencies: - kind-of "^3.2.0" - -snapdragon@^0.8.1: - version "0.8.2" - resolved "https://registry.yarnpkg.com/snapdragon/-/snapdragon-0.8.2.tgz#64922e7c565b0e14204ba1aa7d6964278d25182d" - integrity sha512-FtyOnWN/wCHTVXOMwvSv26d+ko5vWlIDD6zoUJ7LW8vh+ZBC8QdljveRP+crNrtBwioEUWy/4dMtbBjA4ioNlg== - dependencies: - base "^0.11.1" - debug "^2.2.0" - define-property "^0.2.5" - extend-shallow "^2.0.1" - map-cache "^0.2.2" - source-map "^0.5.6" - source-map-resolve "^0.5.0" - use "^3.1.0" - -source-map-resolve@^0.5.0: - version "0.5.3" - resolved "https://registry.yarnpkg.com/source-map-resolve/-/source-map-resolve-0.5.3.tgz#190866bece7553e1f8f267a2ee82c606b5509a1a" - integrity sha512-Htz+RnsXWk5+P2slx5Jh3Q66vhQj1Cllm0zvnaY98+NFx+Dv2CF/f5O/t8x+KaNdrdIAsruNzoh/KpialbqAnw== - dependencies: - atob "^2.1.2" - decode-uri-component "^0.2.0" - resolve-url "^0.2.1" - source-map-url "^0.4.0" - urix "^0.1.0" - -source-map-support@^0.5.6: - version "0.5.19" - resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.5.19.tgz#a98b62f86dcaf4f67399648c085291ab9e8fed61" - integrity sha512-Wonm7zOCIJzBGQdB+thsPar0kYuCIzYvxZwlBa87yi/Mdjv7Tip2cyVbLj5o0cFPN4EVkuTwb3GDDyUx2DGnGw== - dependencies: - buffer-from "^1.0.0" - source-map "^0.6.0" - -source-map-url@^0.4.0: - version "0.4.0" - resolved "https://registry.yarnpkg.com/source-map-url/-/source-map-url-0.4.0.tgz#3e935d7ddd73631b97659956d55128e87b5084a3" - integrity sha1-PpNdfd1zYxuXZZlW1VEo6HtQhKM= - -source-map@^0.5.0, source-map@^0.5.6: - version "0.5.7" - resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.7.tgz#8a039d2d1021d22d1ea14c80d8ea468ba2ef3fcc" - integrity sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w= - -source-map@^0.6.0, source-map@^0.6.1, source-map@~0.6.1: - version "0.6.1" - resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.6.1.tgz#74722af32e9614e9c287a8d0bbde48b5e2f1a263" - integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g== - -source-map@^0.7.3: - version "0.7.3" - resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.7.3.tgz#5302f8169031735226544092e64981f751750383" - integrity sha512-CkCj6giN3S+n9qrYiBTX5gystlENnRW5jZeNLHpe6aue+SrHcG5VYwujhW9s4dY31mEGsxBDrHR6oI69fTXsaQ== - -spdx-correct@^3.0.0: - version "3.1.1" - resolved "https://registry.yarnpkg.com/spdx-correct/-/spdx-correct-3.1.1.tgz#dece81ac9c1e6713e5f7d1b6f17d468fa53d89a9" - integrity sha512-cOYcUWwhCuHCXi49RhFRCyJEK3iPj1Ziz9DpViV3tbZOwXD49QzIN3MpOLJNxh2qwq2lJJZaKMVw9qNi4jTC0w== - dependencies: - spdx-expression-parse "^3.0.0" - spdx-license-ids "^3.0.0" - -spdx-exceptions@^2.1.0: - version "2.3.0" - resolved "https://registry.yarnpkg.com/spdx-exceptions/-/spdx-exceptions-2.3.0.tgz#3f28ce1a77a00372683eade4a433183527a2163d" - integrity sha512-/tTrYOC7PPI1nUAgx34hUpqXuyJG+DTHJTnIULG4rDygi4xu/tfgmq1e1cIRwRzwZgo4NLySi+ricLkZkw4i5A== - -spdx-expression-parse@^3.0.0: - version "3.0.1" - resolved "https://registry.yarnpkg.com/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz#cf70f50482eefdc98e3ce0a6833e4a53ceeba679" - integrity sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q== - dependencies: - spdx-exceptions "^2.1.0" - spdx-license-ids "^3.0.0" - -spdx-license-ids@^3.0.0: - version "3.0.5" - resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-3.0.5.tgz#3694b5804567a458d3c8045842a6358632f62654" - integrity sha512-J+FWzZoynJEXGphVIS+XEh3kFSjZX/1i9gFBaWQcB+/tmpe2qUsSBABpcxqxnAxFdiUFEgAX1bjYGQvIZmoz9Q== - -split-string@^3.0.1, split-string@^3.0.2: - version "3.1.0" - resolved "https://registry.yarnpkg.com/split-string/-/split-string-3.1.0.tgz#7cb09dda3a86585705c64b39a6466038682e8fe2" - integrity sha512-NzNVhJDYpwceVVii8/Hu6DKfD2G+NrQHlS/V/qgv763EYudVwEcMQNxd2lh+0VrUByXN/oJkl5grOhYWvQUYiw== - dependencies: - extend-shallow "^3.0.0" - -split@0.3: - version "0.3.3" - resolved "https://registry.yarnpkg.com/split/-/split-0.3.3.tgz#cd0eea5e63a211dfff7eb0f091c4133e2d0dd28f" - integrity sha1-zQ7qXmOiEd//frDwkcQTPi0N0o8= - dependencies: - through "2" - -sprintf-js@~1.0.2: - version "1.0.3" - resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c" - integrity sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw= - -sshpk@^1.7.0: - version "1.16.1" - resolved "https://registry.yarnpkg.com/sshpk/-/sshpk-1.16.1.tgz#fb661c0bef29b39db40769ee39fa70093d6f6877" - integrity sha512-HXXqVUq7+pcKeLqqZj6mHFUMvXtOJt1uoUx09pFW6011inTMxqI8BA8PM95myrIyyKwdnzjdFjLiE6KBPVtJIg== - dependencies: - asn1 "~0.2.3" - assert-plus "^1.0.0" - bcrypt-pbkdf "^1.0.0" - dashdash "^1.12.0" - ecc-jsbn "~0.1.1" - getpass "^0.1.1" - jsbn "~0.1.0" - safer-buffer "^2.0.2" - tweetnacl "~0.14.0" - -stack-utils@^2.0.2: - version "2.0.2" - resolved "https://registry.yarnpkg.com/stack-utils/-/stack-utils-2.0.2.tgz#5cf48b4557becb4638d0bc4f21d23f5d19586593" - integrity sha512-0H7QK2ECz3fyZMzQ8rH0j2ykpfbnd20BFtfg/SqVC2+sCTtcw0aDTGB7dk+de4U4uUeuz6nOtJcrkFFLG1B0Rg== - dependencies: - escape-string-regexp "^2.0.0" - -static-extend@^0.1.1: - version "0.1.2" - resolved "https://registry.yarnpkg.com/static-extend/-/static-extend-0.1.2.tgz#60809c39cbff55337226fd5e0b520f341f1fb5c6" - integrity sha1-YICcOcv/VTNyJv1eC1IPNB8ftcY= - dependencies: - define-property "^0.2.5" - object-copy "^0.1.0" - -stealthy-require@^1.1.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/stealthy-require/-/stealthy-require-1.1.1.tgz#35b09875b4ff49f26a777e509b3090a3226bf24b" - integrity sha1-NbCYdbT/SfJqd35QmzCQoyJr8ks= - -stream-combiner@~0.0.4: - version "0.0.4" - resolved "https://registry.yarnpkg.com/stream-combiner/-/stream-combiner-0.0.4.tgz#4d5e433c185261dde623ca3f44c586bcf5c4ad14" - integrity sha1-TV5DPBhSYd3mI8o/RMWGvPXErRQ= - dependencies: - duplexer "~0.1.1" - -string-argv@^0.1.1: - version "0.1.2" - resolved "https://registry.yarnpkg.com/string-argv/-/string-argv-0.1.2.tgz#c5b7bc03fb2b11983ba3a72333dd0559e77e4738" - integrity sha512-mBqPGEOMNJKXRo7z0keX0wlAhbBAjilUdPW13nN0PecVryZxdHIeM7TqbsSUA7VYuS00HGC6mojP7DlQzfa9ZA== - -string-length@^4.0.1: - version "4.0.1" - resolved "https://registry.yarnpkg.com/string-length/-/string-length-4.0.1.tgz#4a973bf31ef77c4edbceadd6af2611996985f8a1" - integrity sha512-PKyXUd0LK0ePjSOnWn34V2uD6acUWev9uy0Ft05k0E8xRW+SKcA0F7eMr7h5xlzfn+4O3N+55rduYyet3Jk+jw== - dependencies: - char-regex "^1.0.2" - strip-ansi "^6.0.0" - -string-width@^4.1.0, string-width@^4.2.0: - version "4.2.0" - resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.0.tgz#952182c46cc7b2c313d1596e623992bd163b72b5" - integrity sha512-zUz5JD+tgqtuDjMhwIg5uFVV3dtqZ9yQJlZVfq4I01/K5Paj5UHj7VyrQOJvzawSVlKpObApbfD0Ed6yJc+1eg== - dependencies: - emoji-regex "^8.0.0" - is-fullwidth-code-point "^3.0.0" - strip-ansi "^6.0.0" - -string_decoder@~0.10.x: - version "0.10.31" - resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-0.10.31.tgz#62e203bc41766c6c28c9fc84301dab1c5310fa94" - integrity sha1-YuIDvEF2bGwoyfyEMB2rHFMQ+pQ= - -string_decoder@~1.1.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.1.1.tgz#9cf1611ba62685d7030ae9e4ba34149c3af03fc8" - integrity sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg== - dependencies: - safe-buffer "~5.1.0" - -strip-ansi@^6.0.0: - version "6.0.0" - resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.0.tgz#0b1571dd7669ccd4f3e06e14ef1eed26225ae532" - integrity sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w== - dependencies: - ansi-regex "^5.0.0" - -strip-bom@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/strip-bom/-/strip-bom-4.0.0.tgz#9c3505c1db45bcedca3d9cf7a16f5c5aa3901878" - integrity sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w== - -strip-eof@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/strip-eof/-/strip-eof-1.0.0.tgz#bb43ff5598a6eb05d89b59fcd129c983313606bf" - integrity sha1-u0P/VZim6wXYm1n80SnJgzE2Br8= - -strip-final-newline@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/strip-final-newline/-/strip-final-newline-2.0.0.tgz#89b852fb2fcbe936f6f4b3187afb0a12c1ab58ad" - integrity sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA== - -supports-color@^5.3.0: - version "5.5.0" - resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-5.5.0.tgz#e2e69a44ac8772f78a1ec0b35b689df6530efc8f" - integrity sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow== - dependencies: - has-flag "^3.0.0" - -supports-color@^7.0.0, supports-color@^7.1.0: - version "7.1.0" - resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-7.1.0.tgz#68e32591df73e25ad1c4b49108a2ec507962bfd1" - integrity sha512-oRSIpR8pxT1Wr2FquTNnGet79b3BWljqOuoW/h4oBhxJ/HUbX5nX6JSruTkvXDCFMwDPvsaTTbvMLKZWSy0R5g== - dependencies: - has-flag "^4.0.0" - -supports-hyperlinks@^2.0.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/supports-hyperlinks/-/supports-hyperlinks-2.1.0.tgz#f663df252af5f37c5d49bbd7eeefa9e0b9e59e47" - integrity sha512-zoE5/e+dnEijk6ASB6/qrK+oYdm2do1hjoLWrqUC/8WEIW1gbxFcKuBof7sW8ArN6e+AYvsE8HBGiVRWL/F5CA== - dependencies: - has-flag "^4.0.0" - supports-color "^7.0.0" - -supports-preserve-symlinks-flag@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz#6eda4bd344a3c94aea376d4cc31bc77311039e09" - integrity sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w== - -symbol-tree@^3.2.4: - version "3.2.4" - resolved "https://registry.yarnpkg.com/symbol-tree/-/symbol-tree-3.2.4.tgz#430637d248ba77e078883951fb9aa0eed7c63fa2" - integrity sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw== - -terminal-link@^2.0.0: - version "2.1.1" - resolved "https://registry.yarnpkg.com/terminal-link/-/terminal-link-2.1.1.tgz#14a64a27ab3c0df933ea546fba55f2d078edc994" - integrity sha512-un0FmiRUQNr5PJqy9kP7c40F5BOfpGlYTrxonDChEZB7pzZxRNp/bt+ymiy9/npwXya9KH99nJ/GXFIiUkYGFQ== - dependencies: - ansi-escapes "^4.2.1" - supports-hyperlinks "^2.0.0" - -test-exclude@^6.0.0: - version "6.0.0" - resolved "https://registry.yarnpkg.com/test-exclude/-/test-exclude-6.0.0.tgz#04a8698661d805ea6fa293b6cb9e63ac044ef15e" - integrity sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w== - dependencies: - "@istanbuljs/schema" "^0.1.2" - glob "^7.1.4" - minimatch "^3.0.4" - -throat@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/throat/-/throat-5.0.0.tgz#c5199235803aad18754a667d659b5e72ce16764b" - integrity sha512-fcwX4mndzpLQKBS1DVYhGAcYaYt7vsHNIvQV+WXMvnow5cgjPphq5CaayLaGsjRdSCKZFNGt7/GYAuXaNOiYCA== - -through2@^2.0.1: - version "2.0.5" - resolved "https://registry.yarnpkg.com/through2/-/through2-2.0.5.tgz#01c1e39eb31d07cb7d03a96a70823260b23132cd" - integrity sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ== - dependencies: - readable-stream "~2.3.6" - xtend "~4.0.1" - -through@2, through@~2.3, through@~2.3.1: - version "2.3.8" - resolved "https://registry.yarnpkg.com/through/-/through-2.3.8.tgz#0dd4c9ffaabc357960b1b724115d7e0e86a2e1f5" - integrity sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU= - -tmpl@1.0.x: - version "1.0.4" - resolved "https://registry.yarnpkg.com/tmpl/-/tmpl-1.0.4.tgz#23640dd7b42d00433911140820e5cf440e521dd1" - integrity sha1-I2QN17QtAEM5ERQIIOXPRA5SHdE= - -to-fast-properties@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/to-fast-properties/-/to-fast-properties-2.0.0.tgz#dc5e698cbd079265bc73e0377681a4e4e83f616e" - integrity sha1-3F5pjL0HkmW8c+A3doGk5Og/YW4= - -to-object-path@^0.3.0: - version "0.3.0" - resolved "https://registry.yarnpkg.com/to-object-path/-/to-object-path-0.3.0.tgz#297588b7b0e7e0ac08e04e672f85c1f4999e17af" - integrity sha1-KXWIt7Dn4KwI4E5nL4XB9JmeF68= - dependencies: - kind-of "^3.0.2" - -to-regex-range@^2.1.0: - version "2.1.1" - resolved "https://registry.yarnpkg.com/to-regex-range/-/to-regex-range-2.1.1.tgz#7c80c17b9dfebe599e27367e0d4dd5590141db38" - integrity sha1-fIDBe53+vlmeJzZ+DU3VWQFB2zg= - dependencies: - is-number "^3.0.0" - repeat-string "^1.6.1" - -to-regex-range@^5.0.1: - version "5.0.1" - resolved "https://registry.yarnpkg.com/to-regex-range/-/to-regex-range-5.0.1.tgz#1648c44aae7c8d988a326018ed72f5b4dd0392e4" - integrity sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ== - dependencies: - is-number "^7.0.0" - -to-regex@^3.0.1, to-regex@^3.0.2: - version "3.0.2" - resolved "https://registry.yarnpkg.com/to-regex/-/to-regex-3.0.2.tgz#13cfdd9b336552f30b51f33a8ae1b42a7a7599ce" - integrity sha512-FWtleNAtZ/Ki2qtqej2CXTOayOH9bHDQF+Q48VpWyDXjbYxA4Yz8iDB31zXOBUlOHHKidDbqGVrTUvQMPmBGBw== - dependencies: - define-property "^2.0.2" - extend-shallow "^3.0.2" - regex-not "^1.0.2" - safe-regex "^1.1.0" - -tough-cookie@^2.3.3, tough-cookie@~2.5.0: - version "2.5.0" - resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.5.0.tgz#cd9fb2a0aa1d5a12b473bd9fb96fa3dcff65ade2" - integrity sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g== - dependencies: - psl "^1.1.28" - punycode "^2.1.1" - -tough-cookie@^3.0.1: - version "3.0.1" - resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-3.0.1.tgz#9df4f57e739c26930a018184887f4adb7dca73b2" - integrity sha512-yQyJ0u4pZsv9D4clxO69OEjLWYw+jbgspjTue4lTQZLfV0c5l1VmK2y1JK8E9ahdpltPOaAThPcp5nKPUgSnsg== - dependencies: - ip-regex "^2.1.0" - psl "^1.1.28" - punycode "^2.1.1" - -tr46@^2.0.2: - version "2.0.2" - resolved "https://registry.yarnpkg.com/tr46/-/tr46-2.0.2.tgz#03273586def1595ae08fedb38d7733cee91d2479" - integrity sha512-3n1qG+/5kg+jrbTzwAykB5yRYtQCTqOGKq5U5PE3b0a1/mzo6snDhjGS0zJVJunO0NrT3Dg1MLy5TjWP/UJppg== - dependencies: - punycode "^2.1.1" - -ts-jest@^26.1.0: - version "26.1.1" - resolved "https://registry.yarnpkg.com/ts-jest/-/ts-jest-26.1.1.tgz#b98569b8a4d4025d966b3d40c81986dd1c510f8d" - integrity sha512-Lk/357quLg5jJFyBQLnSbhycnB3FPe+e9i7ahxokyXxAYoB0q1pPmqxxRPYr4smJic1Rjcf7MXDBhZWgxlli0A== - dependencies: - bs-logger "0.x" - buffer-from "1.x" - fast-json-stable-stringify "2.x" - json5 "2.x" - lodash.memoize "4.x" - make-error "1.x" - micromatch "4.x" - mkdirp "1.x" - semver "7.x" - yargs-parser "18.x" - -tsc-watch@^4.2.9: - version "4.2.9" - resolved "https://registry.yarnpkg.com/tsc-watch/-/tsc-watch-4.2.9.tgz#d93fc74233ca4ef7ee6b12d08c0fe6aca3e19044" - integrity sha512-DlTaoDs74+KUpyWr7dCGhuscAUKCz6CiFduBN7R9RbLJSSN1moWdwoCLASE7+zLgGvV5AwXfYDiEMAsPGaO+Vw== - dependencies: - cross-spawn "^7.0.3" - node-cleanup "^2.1.2" - ps-tree "^1.2.0" - string-argv "^0.1.1" - strip-ansi "^6.0.0" - -tslib@^1.10.0, tslib@^1.8.1: - version "1.13.0" - resolved "https://registry.yarnpkg.com/tslib/-/tslib-1.13.0.tgz#c881e13cc7015894ed914862d276436fa9a47043" - integrity sha512-i/6DQjL8Xf3be4K/E6Wgpekn5Qasl1usyw++dAA35Ue5orEn65VIxOA+YvNNl9HV3qv70T7CNwjODHZrLwvd1Q== - -tslint@^6.1.2: - version "6.1.2" - resolved "https://registry.yarnpkg.com/tslint/-/tslint-6.1.2.tgz#2433c248512cc5a7b2ab88ad44a6b1b34c6911cf" - integrity sha512-UyNrLdK3E0fQG/xWNqAFAC5ugtFyPO4JJR1KyyfQAyzR8W0fTRrC91A8Wej4BntFzcvETdCSDa/4PnNYJQLYiA== - dependencies: - "@babel/code-frame" "^7.0.0" - builtin-modules "^1.1.1" - chalk "^2.3.0" - commander "^2.12.1" - diff "^4.0.1" - glob "^7.1.1" - js-yaml "^3.13.1" - minimatch "^3.0.4" - mkdirp "^0.5.3" - resolve "^1.3.2" - semver "^5.3.0" - tslib "^1.10.0" - tsutils "^2.29.0" - -tsutils@^2.29.0: - version "2.29.0" - resolved "https://registry.yarnpkg.com/tsutils/-/tsutils-2.29.0.tgz#32b488501467acbedd4b85498673a0812aca0b99" - integrity sha512-g5JVHCIJwzfISaXpXE1qvNalca5Jwob6FjI4AoPlqMusJ6ftFE7IkkFoMhVLRgK+4Kx3gkzb8UZK5t5yTTvEmA== - dependencies: - tslib "^1.8.1" - -tunnel-agent@^0.6.0: - version "0.6.0" - resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.6.0.tgz#27a5dea06b36b04a0a9966774b290868f0fc40fd" - integrity sha1-J6XeoGs2sEoKmWZ3SykIaPD8QP0= - dependencies: - safe-buffer "^5.0.1" - -tweetnacl@^0.14.3, tweetnacl@~0.14.0: - version "0.14.5" - resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-0.14.5.tgz#5ae68177f192d4456269d108afa93ff8743f4f64" - integrity sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q= - -type-check@~0.3.2: - version "0.3.2" - resolved "https://registry.yarnpkg.com/type-check/-/type-check-0.3.2.tgz#5884cab512cf1d355e3fb784f30804b2b520db72" - integrity sha1-WITKtRLPHTVeP7eE8wgEsrUg23I= - dependencies: - prelude-ls "~1.1.2" - -type-detect@4.0.8: - version "4.0.8" - resolved "https://registry.yarnpkg.com/type-detect/-/type-detect-4.0.8.tgz#7646fb5f18871cfbb7749e69bd39a6388eb7450c" - integrity sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g== - -type-fest@^0.11.0: - version "0.11.0" - resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.11.0.tgz#97abf0872310fed88a5c466b25681576145e33f1" - integrity sha512-OdjXJxnCN1AvyLSzeKIgXTXxV+99ZuXl3Hpo9XpJAv9MBcHrrJOQ5kV7ypXOuQie+AmWG25hLbiKdwYTifzcfQ== - -type-fest@^0.6.0: - version "0.6.0" - resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.6.0.tgz#8d2a2370d3df886eb5c90ada1c5bf6188acf838b" - integrity sha512-q+MB8nYR1KDLrgr4G5yemftpMC7/QLqVndBmEEdqzmNj5dcFOO4Oo8qlwZE3ULT3+Zim1F8Kq4cBnikNhlCMlg== - -type-fest@^0.8.1: - version "0.8.1" - resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.8.1.tgz#09e249ebde851d3b1e48d27c105444667f17b83d" - integrity sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA== - -typedarray-to-buffer@^3.1.5: - version "3.1.5" - resolved "https://registry.yarnpkg.com/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz#a97ee7a9ff42691b9f783ff1bc5112fe3fca9080" - integrity sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q== - dependencies: - is-typedarray "^1.0.0" - -typedoc-default-themes@^0.11.4: - version "0.11.4" - resolved "https://registry.yarnpkg.com/typedoc-default-themes/-/typedoc-default-themes-0.11.4.tgz#1bc55b7c8d1132844616ff6f570e1e2cd0eb7343" - integrity sha512-Y4Lf+qIb9NTydrexlazAM46SSLrmrQRqWiD52593g53SsmUFioAsMWt8m834J6qsp+7wHRjxCXSZeiiW5cMUdw== - -typedoc@0.19.2: - version "0.19.2" - resolved "https://registry.yarnpkg.com/typedoc/-/typedoc-0.19.2.tgz#842a63a581f4920f76b0346bb80eb2a49afc2c28" - integrity sha512-oDEg1BLEzi1qvgdQXc658EYgJ5qJLVSeZ0hQ57Eq4JXy6Vj2VX4RVo18qYxRWz75ifAaYuYNBUCnbhjd37TfOg== - dependencies: - fs-extra "^9.0.1" - handlebars "^4.7.6" - highlight.js "^10.2.0" - lodash "^4.17.20" - lunr "^2.3.9" - marked "^1.1.1" - minimatch "^3.0.0" - progress "^2.0.3" - semver "^7.3.2" - shelljs "^0.8.4" - typedoc-default-themes "^0.11.4" - -typescript@^4: - version "4.6.4" - resolved "https://registry.yarnpkg.com/typescript/-/typescript-4.6.4.tgz#caa78bbc3a59e6a5c510d35703f6a09877ce45e9" - integrity sha512-9ia/jWHIEbo49HfjrLGfKbZSuWo9iTMwXO+Ca3pRsSpbsMbc7/IU8NKdCZVRRBafVPGnoJeFL76ZOAA84I9fEg== - -uglify-js@^3.1.4: - version "3.10.0" - resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-3.10.0.tgz#397a7e6e31ce820bfd1cb55b804ee140c587a9e7" - integrity sha512-Esj5HG5WAyrLIdYU74Z3JdG2PxdIusvj6IWHMtlyESxc7kcDz7zYlYjpnSokn1UbpV0d/QX9fan7gkCNd/9BQA== - -union-value@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/union-value/-/union-value-1.0.1.tgz#0b6fe7b835aecda61c6ea4d4f02c14221e109847" - integrity sha512-tJfXmxMeWYnczCVs7XAEvIV7ieppALdyepWMkHkwciRpZraG/xwT+s2JN8+pr1+8jCRf80FFzvr+MpQeeoF4Xg== - dependencies: - arr-union "^3.1.0" - get-value "^2.0.6" - is-extendable "^0.1.1" - set-value "^2.0.1" - -universalify@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/universalify/-/universalify-1.0.0.tgz#b61a1da173e8435b2fe3c67d29b9adf8594bd16d" - integrity sha512-rb6X1W158d7pRQBg5gkR8uPaSfiids68LTJQYOtEUhoJUWBdaQHsuT/EUduxXYxcrt4r5PJ4fuHW1MHT6p0qug== - -universalify@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/universalify/-/universalify-2.0.0.tgz#75a4984efedc4b08975c5aeb73f530d02df25717" - integrity sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ== - -unset-value@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/unset-value/-/unset-value-1.0.0.tgz#8376873f7d2335179ffb1e6fc3a8ed0dfc8ab559" - integrity sha1-g3aHP30jNRef+x5vw6jtDfyKtVk= - dependencies: - has-value "^0.3.1" - isobject "^3.0.0" - -uri-js@^4.2.2: - version "4.2.2" - resolved "https://registry.yarnpkg.com/uri-js/-/uri-js-4.2.2.tgz#94c540e1ff772956e2299507c010aea6c8838eb0" - integrity sha512-KY9Frmirql91X2Qgjry0Wd4Y+YTdrdZheS8TFwvkbLWf/G5KNJDCh6pKL5OZctEW4+0Baa5idK2ZQuELRwPznQ== - dependencies: - punycode "^2.1.0" - -urix@^0.1.0: - version "0.1.0" - resolved "https://registry.yarnpkg.com/urix/-/urix-0.1.0.tgz#da937f7a62e21fec1fd18d49b35c2935067a6c72" - integrity sha1-2pN/emLiH+wf0Y1Js1wpNQZ6bHI= - -use@^3.1.0: - version "3.1.1" - resolved "https://registry.yarnpkg.com/use/-/use-3.1.1.tgz#d50c8cac79a19fbc20f2911f56eb973f4e10070f" - integrity sha512-cwESVXlO3url9YWlFW/TA9cshCEhtu7IKJ/p5soJ/gGpj7vbvFrAY/eIioQ6Dw23KjZhYgiIo8HOs1nQ2vr/oQ== - -util-deprecate@~1.0.1: - version "1.0.2" - resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf" - integrity sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8= - -uuid@^3.3.2: - version "3.4.0" - resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.4.0.tgz#b23e4358afa8a202fe7a100af1f5f883f02007ee" - integrity sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A== - -uuid@^7.0.3: - version "7.0.3" - resolved "https://registry.yarnpkg.com/uuid/-/uuid-7.0.3.tgz#c5c9f2c8cf25dc0a372c4df1441c41f5bd0c680b" - integrity sha512-DPSke0pXhTZgoF/d+WSt2QaKMCFSfx7QegxEWT+JOuHF5aWrKEn0G+ztjuJg/gG8/ItK+rbPCD/yNv8yyih6Cg== - -v8-to-istanbul@^4.1.3: - version "4.1.4" - resolved "https://registry.yarnpkg.com/v8-to-istanbul/-/v8-to-istanbul-4.1.4.tgz#b97936f21c0e2d9996d4985e5c5156e9d4e49cd6" - integrity sha512-Rw6vJHj1mbdK8edjR7+zuJrpDtKIgNdAvTSAcpYfgMIw+u2dPDntD3dgN4XQFLU2/fvFQdzj+EeSGfd/jnY5fQ== - dependencies: - "@types/istanbul-lib-coverage" "^2.0.1" - convert-source-map "^1.6.0" - source-map "^0.7.3" - -validate-npm-package-license@^3.0.1: - version "3.0.4" - resolved "https://registry.yarnpkg.com/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz#fc91f6b9c7ba15c857f4cb2c5defeec39d4f410a" - integrity sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew== - dependencies: - spdx-correct "^3.0.0" - spdx-expression-parse "^3.0.0" - -verror@1.10.0: - version "1.10.0" - resolved "https://registry.yarnpkg.com/verror/-/verror-1.10.0.tgz#3a105ca17053af55d6e270c1f8288682e18da400" - integrity sha1-OhBcoXBTr1XW4nDB+CiGguGNpAA= - dependencies: - assert-plus "^1.0.0" - core-util-is "1.0.2" - extsprintf "^1.2.0" - -w3c-hr-time@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/w3c-hr-time/-/w3c-hr-time-1.0.2.tgz#0a89cdf5cc15822df9c360543676963e0cc308cd" - integrity sha512-z8P5DvDNjKDoFIHK7q8r8lackT6l+jo/Ye3HOle7l9nICP9lf1Ci25fy9vHd0JOWewkIFzXIEig3TdKT7JQ5fQ== - dependencies: - browser-process-hrtime "^1.0.0" - -w3c-xmlserializer@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/w3c-xmlserializer/-/w3c-xmlserializer-2.0.0.tgz#3e7104a05b75146cc60f564380b7f683acf1020a" - integrity sha512-4tzD0mF8iSiMiNs30BiLO3EpfGLZUT2MSX/G+o7ZywDzliWQ3OPtTZ0PTC3B3ca1UAf4cJMHB+2Bf56EriJuRA== - dependencies: - xml-name-validator "^3.0.0" - -walker@^1.0.7, walker@~1.0.5: - version "1.0.7" - resolved "https://registry.yarnpkg.com/walker/-/walker-1.0.7.tgz#2f7f9b8fd10d677262b18a884e28d19618e028fb" - integrity sha1-L3+bj9ENZ3JisYqITijRlhjgKPs= - dependencies: - makeerror "1.0.x" - -webidl-conversions@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-5.0.0.tgz#ae59c8a00b121543a2acc65c0434f57b0fc11aff" - integrity sha512-VlZwKPCkYKxQgeSbH5EyngOmRp7Ww7I9rQLERETtf5ofd9pGeswWiOtogpEO850jziPRarreGxn5QIiTqpb2wA== - -webidl-conversions@^6.0.0: - version "6.1.0" - resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-6.1.0.tgz#9111b4d7ea80acd40f5270d666621afa78b69514" - integrity sha512-qBIvFLGiBpLjfwmYAaHPXsn+ho5xZnGvyGvsarywGNc8VyQJUMHJ8OBKGGrPER0okBeMDaan4mNBlgBROxuI8w== - -whatwg-encoding@^1.0.5: - version "1.0.5" - resolved "https://registry.yarnpkg.com/whatwg-encoding/-/whatwg-encoding-1.0.5.tgz#5abacf777c32166a51d085d6b4f3e7d27113ddb0" - integrity sha512-b5lim54JOPN9HtzvK9HFXvBma/rnfFeqsic0hSpjtDbVxR3dJKLc+KB4V6GgiGOvl7CY/KNh8rxSo9DKQrnUEw== - dependencies: - iconv-lite "0.4.24" - -whatwg-mimetype@^2.3.0: - version "2.3.0" - resolved "https://registry.yarnpkg.com/whatwg-mimetype/-/whatwg-mimetype-2.3.0.tgz#3d4b1e0312d2079879f826aff18dbeeca5960fbf" - integrity sha512-M4yMwr6mAnQz76TbJm914+gPpB/nCwvZbJU28cUD6dR004SAxDLOOSUaB1JDRqLtaOV/vi0IC5lEAGFgrjGv/g== - -whatwg-url@^8.0.0: - version "8.1.0" - resolved "https://registry.yarnpkg.com/whatwg-url/-/whatwg-url-8.1.0.tgz#c628acdcf45b82274ce7281ee31dd3c839791771" - integrity sha512-vEIkwNi9Hqt4TV9RdnaBPNt+E2Sgmo3gePebCRgZ1R7g6d23+53zCTnuB0amKI4AXq6VM8jj2DUAa0S1vjJxkw== - dependencies: - lodash.sortby "^4.7.0" - tr46 "^2.0.2" - webidl-conversions "^5.0.0" - -which-module@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/which-module/-/which-module-2.0.0.tgz#d9ef07dce77b9902b8a3a8fa4b31c3e3f7e6e87a" - integrity sha1-2e8H3Od7mQK4o6j6SzHD4/fm6Ho= - -which@^1.2.9: - version "1.3.1" - resolved "https://registry.yarnpkg.com/which/-/which-1.3.1.tgz#a45043d54f5805316da8d62f9f50918d3da70b0a" - integrity sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ== - dependencies: - isexe "^2.0.0" - -which@^2.0.1, which@^2.0.2: - version "2.0.2" - resolved "https://registry.yarnpkg.com/which/-/which-2.0.2.tgz#7c6a8dd0a636a0327e10b59c9286eee93f3f51b1" - integrity sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA== - dependencies: - isexe "^2.0.0" - -word-wrap@~1.2.3: - version "1.2.3" - resolved "https://registry.yarnpkg.com/word-wrap/-/word-wrap-1.2.3.tgz#610636f6b1f703891bd34771ccb17fb93b47079c" - integrity sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ== - -wordwrap@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-1.0.0.tgz#27584810891456a4171c8d0226441ade90cbcaeb" - integrity sha1-J1hIEIkUVqQXHI0CJkQa3pDLyus= - -wrap-ansi@^6.2.0: - version "6.2.0" - resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-6.2.0.tgz#e9393ba07102e6c91a3b221478f0257cd2856e53" - integrity sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA== - dependencies: - ansi-styles "^4.0.0" - string-width "^4.1.0" - strip-ansi "^6.0.0" - -wrappy@1: - version "1.0.2" - resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" - integrity sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8= - -write-file-atomic@^3.0.0: - version "3.0.3" - resolved "https://registry.yarnpkg.com/write-file-atomic/-/write-file-atomic-3.0.3.tgz#56bd5c5a5c70481cd19c571bd39ab965a5de56e8" - integrity sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q== - dependencies: - imurmurhash "^0.1.4" - is-typedarray "^1.0.0" - signal-exit "^3.0.2" - typedarray-to-buffer "^3.1.5" - -ws@^7.2.3: - version "7.3.0" - resolved "https://registry.yarnpkg.com/ws/-/ws-7.3.0.tgz#4b2f7f219b3d3737bc1a2fbf145d825b94d38ffd" - integrity sha512-iFtXzngZVXPGgpTlP1rBqsUK82p9tKqsWRPg5L56egiljujJT3vGAYnHANvFxBieXrTFavhzhxW52jnaWV+w2w== - -xml-name-validator@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/xml-name-validator/-/xml-name-validator-3.0.0.tgz#6ae73e06de4d8c6e47f9fb181f78d648ad457c6a" - integrity sha512-A5CUptxDsvxKJEU3yO6DuWBSJz/qizqzJKOMIfUJHETbBw/sFaDxgd6fxm1ewUaM0jZ444Fc5vC5ROYurg/4Pw== - -xmlchars@^2.2.0: - version "2.2.0" - resolved "https://registry.yarnpkg.com/xmlchars/-/xmlchars-2.2.0.tgz#060fe1bcb7f9c76fe2a17db86a9bc3ab894210cb" - integrity sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw== - -xtend@~4.0.1: - version "4.0.2" - resolved "https://registry.yarnpkg.com/xtend/-/xtend-4.0.2.tgz#bb72779f5fa465186b1f438f674fa347fdb5db54" - integrity sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ== - -y18n@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/y18n/-/y18n-4.0.0.tgz#95ef94f85ecc81d007c264e190a120f0a3c8566b" - integrity sha512-r9S/ZyXu/Xu9q1tYlpsLIsa3EeLXXk0VwlxqTcFRfg9EhMW+17kbt9G0NrgCmhGb5vT2hyhJZLfDGx+7+5Uj/w== - -yargs-parser@18.x, yargs-parser@^18.1.1: - version "18.1.3" - resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-18.1.3.tgz#be68c4975c6b2abf469236b0c870362fab09a7b0" - integrity sha512-o50j0JeToy/4K6OZcaQmW6lyXXKhq7csREXcDwk2omFPJEwUNOVtJKvmDr9EI1fAJZUyZcRF7kxGBWmRXudrCQ== - dependencies: - camelcase "^5.0.0" - decamelize "^1.2.0" - -yargs@^15.3.1: - version "15.3.1" - resolved "https://registry.yarnpkg.com/yargs/-/yargs-15.3.1.tgz#9505b472763963e54afe60148ad27a330818e98b" - integrity sha512-92O1HWEjw27sBfgmXiixJWT5hRBp2eobqXicLtPBIDBhYB+1HpwZlXmbW2luivBJHBzki+7VyCLRtAkScbTBQA== - dependencies: - cliui "^6.0.0" - decamelize "^1.2.0" - find-up "^4.1.0" - get-caller-file "^2.0.1" - require-directory "^2.1.1" - require-main-filename "^2.0.0" - set-blocking "^2.0.0" - string-width "^4.2.0" - which-module "^2.0.0" - y18n "^4.0.0" - yargs-parser "^18.1.1" diff --git a/docs/.env.development b/docs/.env.development deleted file mode 100644 index cc187749255f6..0000000000000 --- a/docs/.env.development +++ /dev/null @@ -1,3 +0,0 @@ -ALGOLIA_APP_ID='PBBQOO27UD' -ALGOLIA_API_KEY='2998a34fa3a0f52837692ecdaa03f44b' -ALGOLIA_INDEX_NAME='cubejs' diff --git a/docs/.env.production b/docs/.env.production deleted file mode 100644 index 7e76144e5090e..0000000000000 --- a/docs/.env.production +++ /dev/null @@ -1,4 +0,0 @@ -ALGOLIA_APP_ID='PBBQOO27UD' -ALGOLIA_API_KEY='2998a34fa3a0f52837692ecdaa03f44b' -ALGOLIA_INDEX_NAME='cubejs' -PATH_PREFIX='/docs' diff --git a/docs/.gitignore b/docs/.gitignore deleted file mode 100644 index 52e544d0380f5..0000000000000 --- a/docs/.gitignore +++ /dev/null @@ -1,13 +0,0 @@ -node_modules/ -public/ -dist/ -.cache -yarn-error.log -.DS_Store -.idea - -# Cube.js repo -cube.js/ - -# Local Netlify folder -.netlify \ No newline at end of file diff --git a/docs/.lintstagedrc b/docs/.lintstagedrc deleted file mode 100644 index f08a8fa9e3e61..0000000000000 --- a/docs/.lintstagedrc +++ /dev/null @@ -1,16 +0,0 @@ -{ - "CHANGELOG.md": [ - "" - ], - "*.md": [ - "prettier --write", - "markdownlint-cli2" - ], - "*.scss": [ - ], - "*.js?(x)": [ - ], - "*.ts?(x)": [ - "prettier --write" - ] -} diff --git a/docs/.markdownlint.json b/docs/.markdownlint.json deleted file mode 100644 index 24d02034b461c..0000000000000 --- a/docs/.markdownlint.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "line-length": false, - "no-duplicate-heading": false, - "no-inline-html": { - "allowed_elements": [ - "a", - "br", - "code", - "div", - "img", - "iframe", - "p", - "pre", - "sup", - "CodeTabs", - "Grid", - "InfoBox", - "LoomVideo", - "WarningBox" - ] - }, - "no-trailing-punctuation": "" -} diff --git a/docs/.prettierrc b/docs/.prettierrc deleted file mode 100644 index 3c800898e375d..0000000000000 --- a/docs/.prettierrc +++ /dev/null @@ -1,20 +0,0 @@ -{ - "printWidth": 80, - "tabWidth": 2, - "useTabs": false, - "semi": true, - "singleQuote": false, - "arrowParens": "always", - "trailingComma": "es5", - "bracketSpacing": true, - "jsxBracketSameLine": false, - "proseWrap": "always", - "overrides": [ - { - "files": ["*.css", "*.scss"], - "options": { - "singleQuote": false - } - } - ] -} diff --git a/docs/.textlintrc b/docs/.textlintrc deleted file mode 100644 index 932102ec9712e..0000000000000 --- a/docs/.textlintrc +++ /dev/null @@ -1,8 +0,0 @@ -{ - "rules": { - "textlint-rule-alex": true, - "textlint-rule-diacritics": true, - "textlint-rule-no-todo": true, - "textlint-rule-stop-words": true - } -} diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md deleted file mode 100644 index f190c3e8d3049..0000000000000 --- a/docs/CHANGELOG.md +++ /dev/null @@ -1,387 +0,0 @@ -# Change Log - -All notable changes to this project will be documented in this file. -See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. - -## [0.26.53](https://github.com/cube-js/cube/compare/v0.26.52...v0.26.53) (2021-03-11) - -**Note:** Version bump only for package @cubejs-docs/site - - - - - -## [0.26.49](https://github.com/cube-js/cube/compare/v0.26.48...v0.26.49) (2021-03-05) - - -### Features - -* **elasticsearch-driver:** Support for elastic.co & improve docs ([#2240](https://github.com/cube-js/cube/issues/2240)) ([d8557f6](https://github.com/cube-js/cube/commit/d8557f6487ea98c19c055cc94b94b284dd273835)) - - - - - -## [0.26.46](https://github.com/cube-js/cube/compare/v0.26.45...v0.26.46) (2021-03-04) - -**Note:** Version bump only for package @cubejs-docs/site - - - - - -## [0.26.45](https://github.com/cube-js/cube/compare/v0.26.44...v0.26.45) (2021-03-04) - -**Note:** Version bump only for package @cubejs-docs/site - - - - - -## [0.26.44](https://github.com/cube-js/cube/compare/v0.26.43...v0.26.44) (2021-03-02) - -**Note:** Version bump only for package @cubejs-docs/site - - - - - -## [0.26.40](https://github.com/cube-js/cube/compare/v0.26.39...v0.26.40) (2021-03-01) - -**Note:** Version bump only for package @cubejs-docs/site - - - - - -## [0.26.38](https://github.com/cube-js/cube/compare/v0.26.37...v0.26.38) (2021-02-26) - -**Note:** Version bump only for package @cubejs-docs/site - - - - - -## [0.26.36](https://github.com/cube-js/cube/compare/v0.26.35...v0.26.36) (2021-02-25) - -**Note:** Version bump only for package @cubejs-docs/site - - - - - -## [0.26.34](https://github.com/cube-js/cube/compare/v0.26.33...v0.26.34) (2021-02-25) - -**Note:** Version bump only for package @cubejs-docs/site - - - - - -## [0.26.31](https://github.com/cube-js/cube/compare/v0.26.30...v0.26.31) (2021-02-23) - -**Note:** Version bump only for package @cubejs-docs/site - - - - - -## [0.26.30](https://github.com/cube-js/cube/compare/v0.26.29...v0.26.30) (2021-02-22) - -**Note:** Version bump only for package @cubejs-docs/site - - - - - -## [0.26.29](https://github.com/cube-js/cube/compare/v0.26.28...v0.26.29) (2021-02-22) - -**Note:** Version bump only for package @cubejs-docs/site - - - - - -## [0.26.20](https://github.com/cube-js/cube/compare/v0.26.19...v0.26.20) (2021-02-19) - -**Note:** Version bump only for package @cubejs-docs/site - - - - - -## [0.26.16](https://github.com/cube-js/cube/compare/v0.26.15...v0.26.16) (2021-02-18) - - -### Features - -* **druid-driver:** Support CUBEJS_DB_SSL ([d8124d0](https://github.com/cube-js/cube/commit/d8124d0a91c926ce0e1ffd21d6d057c164b01e79)) - - - - - -## [0.26.15](https://github.com/cube-js/cube/compare/v0.26.14...v0.26.15) (2021-02-16) - - -### Features - -* **clickhouse-driver:** HTTPS and readOnly support ([3d60ead](https://github.com/cube-js/cube/commit/3d60ead920635eb85c76b51a5c301e2f1fb08cb6)) - - - - - -## [0.26.14](https://github.com/cube-js/cube/compare/v0.26.13...v0.26.14) (2021-02-15) - -**Note:** Version bump only for package @cubejs-docs/site - - - - - -## [0.26.13](https://github.com/cube-js/cube/compare/v0.26.12...v0.26.13) (2021-02-12) - -**Note:** Version bump only for package @cubejs-docs/site - - - - - -## [0.26.10](https://github.com/cube-js/cube/compare/v0.26.9...v0.26.10) (2021-02-09) - -**Note:** Version bump only for package @cubejs-docs/site - - - - - -## [0.26.7](https://github.com/cube-js/cube/compare/v0.26.6...v0.26.7) (2021-02-09) - - -### Features - -* Support for Redis Sentinel + IORedis driver. fix [#1769](https://github.com/cube-js/cube/issues/1769) ([a5e7972](https://github.com/cube-js/cube/commit/a5e7972485fa97faaf9965b9794b0cf48256f484)) -* Use REDIS_URL for IORedis options (with santinels) ([988bfe5](https://github.com/cube-js/cube/commit/988bfe5526be3506fe7b773d247ad89b3287fad4)) - - - - - -## [0.26.6](https://github.com/cube-js/cube/compare/v0.26.5...v0.26.6) (2021-02-08) - -**Note:** Version bump only for package @cubejs-docs/site - - - - - -## [0.26.4](https://github.com/cube-js/cube/compare/v0.26.3...v0.26.4) (2021-02-02) - -**Note:** Version bump only for package @cubejs-docs/site - - - - - -## [0.26.2](https://github.com/cube-js/cube/compare/v0.26.1...v0.26.2) (2021-02-01) - -**Note:** Version bump only for package @cubejs-docs/site - - - - - -# [0.26.0](https://github.com/cube-js/cube/compare/v0.25.33...v0.26.0) (2021-02-01) - - -### Features - -* Storing userContext inside payload.u is deprecated, moved to root ([559bd87](https://github.com/cube-js/cube/commit/559bd8757d9754ab486eed88d1fdb0c280b82dc9)) - - - - - -## [0.25.32](https://github.com/cube-js/cube/compare/v0.25.31...v0.25.32) (2021-01-29) - -**Note:** Version bump only for package @cubejs-docs/site - - - - - -## [0.25.31](https://github.com/cube-js/cube/compare/v0.25.30...v0.25.31) (2021-01-28) - -**Note:** Version bump only for package @cubejs-docs/site - - - - - -## [0.25.28](https://github.com/cube-js/cube/compare/v0.25.27...v0.25.28) (2021-01-25) - -**Note:** Version bump only for package @cubejs-docs/site - - - - - -## [0.25.25](https://github.com/cube-js/cube/compare/v0.25.24...v0.25.25) (2021-01-24) - -**Note:** Version bump only for package @cubejs-docs/site - - - - - -## [0.25.23](https://github.com/cube-js/cube/compare/v0.25.22...v0.25.23) (2021-01-22) - -**Note:** Version bump only for package @cubejs-docs/site - - - - - -## [0.25.22](https://github.com/cube-js/cube/compare/v0.25.21...v0.25.22) (2021-01-21) - -**Note:** Version bump only for package @cubejs-docs/site - - - - - -## [0.25.21](https://github.com/cube-js/cube/compare/v0.25.20...v0.25.21) (2021-01-19) - -**Note:** Version bump only for package @cubejs-docs/site - - - - - -## [0.25.18](https://github.com/cube-js/cube/compare/v0.25.17...v0.25.18) (2021-01-14) - -**Note:** Version bump only for package @cubejs-docs/site - - - - - -## [0.25.14](https://github.com/cube-js/cube/compare/v0.25.13...v0.25.14) (2021-01-11) - -**Note:** Version bump only for package @cubejs-docs/site - - - - - -## [0.25.11](https://github.com/cube-js/cube/compare/v0.25.10...v0.25.11) (2021-01-04) - -**Note:** Version bump only for package @cubejs-docs/site - - - - - -## [0.25.4](https://github.com/cube-js/cube/compare/v0.25.3...v0.25.4) (2020-12-30) - - -### Features - -* **server-core:** Compatibility shim, for legacy imports ([2116799](https://github.com/cube-js/cube/commit/21167995045d7a5c0d1056dc034b14ec18205277)) -* **server-core:** Introduce CUBEJS_PRE_AGGREGATIONS_SCHEMA, use dev_preaggregations/prod_preaggregations by default ([e5bdf3d](https://github.com/cube-js/cube/commit/e5bdf3dfbd28d5e1c1e775c554c275304a0941f3)) - - - - - -## [0.25.2](https://github.com/cube-js/cube/compare/v0.25.1...v0.25.2) (2020-12-27) - - -### Features - -* Ability to set timeouts for polling in BigQuery/Athena ([#1675](https://github.com/cube-js/cube/issues/1675)) ([dc944b1](https://github.com/cube-js/cube/commit/dc944b1aaacc69dd74a9d9d31ceaf43e16d37ccd)), closes [#1672](https://github.com/cube-js/cube/issues/1672) - - - - - -## [0.25.1](https://github.com/cube-js/cube/compare/v0.25.0...v0.25.1) (2020-12-24) - - -### Features - -* **elasticsearch-driver:** Support CUBEJS_DB_ELASTIC_QUERY_FORMAT, Thanks [@dylman79](https://github.com/dylman79) ([a7460f5](https://github.com/cube-js/cube/commit/a7460f5d45dc7e9d96b65f6cc36df810a5b9312e)) - - - - - -# [0.25.0](https://github.com/cube-js/cube/compare/v0.24.15...v0.25.0) (2020-12-21) - -**Note:** Version bump only for package @cubejs-docs/site - - - - - -## [0.24.14](https://github.com/cube-js/cube/compare/v0.24.13...v0.24.14) (2020-12-19) - - -### Features - -* Add HTTP Post to cubejs client core ([#1608](https://github.com/cube-js/cube/issues/1608)). Thanks to [@mnifakram](https://github.com/mnifakram)! ([1ebd6a0](https://github.com/cube-js/cube/commit/1ebd6a04ac97b31c6a51ef63bb1d4c040e524190)) - - - - - -## [0.24.13](https://github.com/cube-js/cube/compare/v0.24.12...v0.24.13) (2020-12-18) - -**Note:** Version bump only for package @cubejs-docs/site - - - - - -## [0.24.11](https://github.com/cube-js/cube/compare/v0.24.10...v0.24.11) (2020-12-17) - -**Note:** Version bump only for package @cubejs-docs/site - - - - - -## [0.24.9](https://github.com/cube-js/cube/compare/v0.24.8...v0.24.9) (2020-12-16) - -**Note:** Version bump only for package @cubejs-docs/site - - - - - -# Change Log - -All notable changes to this project will be documented in this file. See -[Conventional Commits](https://conventionalcommits.org) for commit guidelines. - -## [0.24.8](https://github.com/cube-js/cube/compare/v0.24.7...v0.24.8) (2020-12-15) - -### Features - -- **@cubejs-client/core:** Added pivotConfig option to alias series with a - prefix ([#1594](https://github.com/cube-js/cube/issues/1594)). Thanks to - @MattGson! - ([a3342f7](https://github.com/cube-js/cube/commit/a3342f7fd0389ce3ad0bc62686c0e787de25f411)) - -## [0.24.6](https://github.com/cube-js/cube/compare/v0.24.5...v0.24.6) (2020-12-13) - -**Note:** Version bump only for package @cubejs-docs/site - -## [0.24.5](https://github.com/cube-js/cube/compare/v0.24.4...v0.24.5) (2020-12-09) - -**Note:** Version bump only for package @cubejs-docs/site - -## [0.24.4](https://github.com/cube-js/cube/compare/v0.24.3...v0.24.4) (2020-12-07) - -**Note:** Version bump only for package @cubejs-docs/site diff --git a/docs/README.md b/docs/README.md deleted file mode 100644 index b37b9700fcc42..0000000000000 --- a/docs/README.md +++ /dev/null @@ -1,68 +0,0 @@ -# Cube.js Docs - -This repository contains the [Gatsby][link-gatsby]-powered Cube.js -Documentation: [cube.dev/docs][link-docs-live] - -Docs are Markdown files located in the main Cube.js repository in -[`docs/content`][link-docs-content]. The build process uses the Gatsby CLI to -scan the `docs/content/` folder and generate a static HTML site. - -[link-gatsby]: https://www.gatsbyjs.com/ -[link-docs-live]: https://cube.dev/docs -[link-docs-content]: https://github.com/cube-js/cube/tree/master/docs/content - -## Development - -To start the project in development mode, run the following: - -```bash -yarn dev -``` - -To build a production-ready version of the site, run the following: - -```bash -source .env.production -yarn build --prefix-paths -``` - -## Formatting - -Run the following to format a Markdown file: - -```bash -yarn prettier content/ --write -``` - -If the file includes any alerts (`[[info | Note]]`), then wrap the alert with -`` and `` to prevent -Prettier from messing with them. - - -## Indexing - -The search functionality is powered by [DocSearch by Algolia][link-docsearch]. -The configuration file can be [found here][link-docsearch-config]. - -[link-docsearch]: https://docsearch.algolia.com/ -[link-docsearch-config]: - https://github.com/algolia/docsearch-configs/blob/master/configs/cubejs.json - -## Deployment - -### Staging - -[Netlify][link-netlify] is used for staging and pull request previews. The -staging URL is [cubejs-docs-staging.netlify.app][link-docs-staging]. - -[link-netlify]: https://www.netlify.com/ -[link-docs-staging]: https://cubejs-docs-staging.netlify.app - -PRs automatically generate [Deploy Previews] with unique URLs that can be found -in the status checks for a PR. - -### Production - -Deployment is handled via a [GitHub action][link-gh-docs-workflow]. - -[link-gh-docs-workflow]: /.github/workflows/docs.yml diff --git a/docs/build-netlify-prod.sh b/docs/build-netlify-prod.sh deleted file mode 100755 index 1488374517c87..0000000000000 --- a/docs/build-netlify-prod.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash -set -eo pipefail - -rm -rf .cache -rm -rf public -rm -rf dist - -yarn && yarn build --prefix-paths && mkdir -p dist/docs && mv public/_headers public/_redirects dist/ && rsync -av --delete public/ dist/docs diff --git a/docs/build-netlify.sh b/docs/build-netlify.sh deleted file mode 100755 index 7a41026bcebc9..0000000000000 --- a/docs/build-netlify.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -set -eo pipefail - -rm -rf .cache -rm -rf public - -yarn && yarn build:netlify diff --git a/docs/build.sh b/docs/build.sh deleted file mode 100755 index 517273ed04b90..0000000000000 --- a/docs/build.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -set -eo pipefail - -rm -rf .cache -rm -rf public - -yarn && yarn build --prefix-paths diff --git a/docs/content/APIs-Integrations/Frontend-Integrations/Introduction-angular.mdx b/docs/content/APIs-Integrations/Frontend-Integrations/Introduction-angular.mdx deleted file mode 100644 index b8bdef4cf715d..0000000000000 --- a/docs/content/APIs-Integrations/Frontend-Integrations/Introduction-angular.mdx +++ /dev/null @@ -1,102 +0,0 @@ ---- -title: Integration with Angular -permalink: /frontend-introduction/angular -category: APIs & Integrations -subCategory: JavaScript SDK -menuOrder: 54 ---- - -Cube is visualization-agnostic, so you can build any user interface for your -application. - -You can directly query Cube Backend using -JSON [Query Format](https://cube.dev/docs/query-format) via [HTTP API](https://cube.dev/docs/rest-api) -or [WebSockets](https://cube.dev/docs/real-time-data-fetch#web-sockets) and -visualize analytical data with tools of your choice. However, it’s much easier -to use the Cube JavaScript client and bindings for popular frameworks such as -React, Angular, and Vue. - -The client has methods to communicate with the Cube API Gateway, and retrieve -and process data. It is designed to work with existing charting libraries -including Chart.js, D3.js, and more. - -## Cube JavaScript Client - -The client provides methods to solve common tasks: - -**Abstract from the transport and query data.** You can -[fetch data](https://cube.dev/docs/@cubejs-client-core#load) from Cube Backend -or subscribe to [real-time updates](https://cube.dev/docs/real-time-data-fetch) -regardless of the protocol, be it HTTP or WebSockets. - -**Transform data for visualization.** You can -[pivot](https://cube.dev/docs/@cubejs-client-core#pivot) the result set to -display as a [chart](https://cube.dev/docs/@cubejs-client-core#chart-pivot) or -as a [table](https://cube.dev/docs/@cubejs-client-core#table-pivot), split into -[series](https://cube.dev/docs/@cubejs-client-core#series) or -[table columns](https://cube.dev/docs/@cubejs-client-core#table-columns). - -**Simplify work with complex query types.** You can build -[Drill Down](https://cube.dev/docs/@cubejs-client-core#drill-down) queries and -[decompose](https://cube.dev/docs/@cubejs-client-core#decompose) the results of -[compareDateRange](https://cube.dev/docs/query-format#time-dimensions-format) or -[Data Blending](https://cube.dev/docs/recipes/data-blending) queries. - -[Learn more](https://cube.dev/docs/@cubejs-client-core) in the documentation for -the `@cubejs-client/core` package. - -## Cube Angular Package - -The package provides convenient tools to work with Cube in Angular: - -**Modules.** Inject -[CubejsClientModule](https://cube.dev/docs/@cubejs-client-vue#query-builder) and -[CubejsClient](https://cube.dev/docs/@cubejs-client-vue#query-renderer) into -your components and services to get access to `@cubejs-client/core` API. - -**Subjects.** Use [RxJS Subject](https://cube.dev/docs/@cubejs-client-ngx#api) -and query to watch changes. - -## Example Usage - -Here are the typical steps to query and visualize analytical data in Angular: - -- **Import `@cubejs-client/core` and `@cubejs-client/ngx` packages.** These - packages provide all the necessary methods and convenient Angular tools. -- **Create an instance of Cube JavaScript Client.** The client is initialized - with Cube API URL. In development mode, the default URL is - [http://localhost:4000/cubejs-api/v1](http://localhost:4000/cubejs-api/v1). - The client is also initialized with an - [API token](https://cube.dev/docs/security), but it takes effect only in - [production](https://cube.dev/docs/deployment/production-checklist). -- **Query data from Cube Backend and Transform data for visualization.** Use - [CubejsClient](https://cube.dev/docs/@cubejs-client-ngx#api) to load data. The - client accepts a query, which is a plain JavaScript object. See - [Query Format](https://cube.dev/docs/query-format) for details. -- **Visualize the data.** Use tools of your choice to draw charts and create - visualizations. - -See an example of using Cube with Angular and Chart.js library. Note that you -can always use a different charting library that suits your needs: - - - -## Getting Started - -You can install Cube JavaScript Client and the Angular package with npm or Yarn: - -```bash{outputLines: 1,3-4} -# npm -npm install --save @cubejs-client/core @cubejs-client/ngx - -# Yarn -yarn add @cubejs-client/core @cubejs-client/ngx -``` - -Now you can build your application from scratch or connect to one of our -[supported data visualization tools](/config/downstream). You can also -[explore example applications](https://cube.dev/docs/examples) built with Cube. diff --git a/docs/content/APIs-Integrations/Frontend-Integrations/Introduction-react.mdx b/docs/content/APIs-Integrations/Frontend-Integrations/Introduction-react.mdx deleted file mode 100644 index 0295ae098013d..0000000000000 --- a/docs/content/APIs-Integrations/Frontend-Integrations/Introduction-react.mdx +++ /dev/null @@ -1,108 +0,0 @@ ---- -title: Integration with React -permalink: /frontend-introduction/react -category: APIs & Integrations -subCategory: JavaScript SDK -menuOrder: 52 ---- - -Cube is visualization-agnostic, so you can build any user interface for your -application. - -You can directly query Cube Backend using -JSON [Query Format](https://cube.dev/docs/query-format) via [HTTP API](https://cube.dev/docs/rest-api) -or [WebSockets](https://cube.dev/docs/real-time-data-fetch#web-sockets) and -visualize analytical data with tools of your choice. However, it's much easier -to use the Cube JavaScript client and bindings for popular frameworks such as -React, Angular, and Vue. - -The client has methods to communicate with Cube API Gateway and retrieve and -process data. It is designed to work with existing charting libraries including -Chart.js, D3.js, and more. - -## Cube JavaScript Client - -The client provides methods to solve common tasks: - -**Abstract from the transport and query data.** You can -[fetch data](https://cube.dev/docs/@cubejs-client-core#load) from Cube Backend -or subscribe to [real-time updates](https://cube.dev/docs/real-time-data-fetch) -regardless of the protocol, be it HTTP or WebSockets. - -**Transform data for visualization.** You can -[pivot](https://cube.dev/docs/@cubejs-client-core#pivot) the result set to -display as a [chart](https://cube.dev/docs/@cubejs-client-core#chart-pivot) or -as a [table](https://cube.dev/docs/@cubejs-client-core#table-pivot), split into -[series](https://cube.dev/docs/@cubejs-client-core#series) or -[table columns](https://cube.dev/docs/@cubejs-client-core#table-columns). - -**Simplify work with complex query types.** You can build -[Drill Down](https://cube.dev/docs/@cubejs-client-core#drill-down) queries and -[decompose](https://cube.dev/docs/@cubejs-client-core#decompose) the results of -[compareDateRange](https://cube.dev/docs/query-format#time-dimensions-format) or -[Data Blending](https://cube.dev/docs/recipes/data-blending) queries. - -[Learn more](https://cube.dev/docs/@cubejs-client-core) in the documentation for -the `@cubejs-client/core` package. - -## Cube React Package - -The package provides convenient tools to work with Cube in React: - -**Hooks.** You can add the -[useCubeQuery hook](https://cube.dev/docs/@cubejs-client-react#use-cube-query) -to functional React components to execute Cube queries. - -**Components.** You can use -[QueryBuilder](https://cube.dev/docs/@cubejs-client-react#query-builder) and -[QueryRenderer](https://cube.dev/docs/@cubejs-client-react#query-renderer) -components to separate state management and API calls from your rendering code. -You can also use -[CubeProvider](https://cube.dev/docs/@cubejs-client-react#cube-provider) and -[CubeContext](https://cube.dev/docs/@cubejs-client-react#cube-context) -components for direct access to Cube Client anywhere in your application. - -## Example Usage - -Here are the typical steps to query and visualize analytical data in React: - -- **Import `@cubejs-client/core` and `@cubejs-client/react` packages.** These - packages provide all the necessary methods and convenient React tools. -- **Create an instance of Cube JavaScript Client.** The client is initialized - with Cube API URL. In development mode, the default URL is - [http://localhost:4000/cubejs-api/v1](http://localhost:4000/cubejs-api/v1). - The client is also initialized with an - [API token](https://cube.dev/docs/security), but it takes effect only in - [production](https://cube.dev/docs/deployment/production-checklist). -- **Query data from Cube Backend.** In functional React components, use the - `useCubeQuery` hook to execute a query, which is a plain JavaScript object. - See [Query Format](https://cube.dev/docs/query-format) for details. -- **Transform data for visualization.** The result set has convenient methods, - such as `series` and `chartPivot`, to prepare data for charting. -- **Visualize the data.** Use tools of your choice to draw charts and create - visualizations. - -See an example of using Cube with React and Chart.js library. Note that you can -always use a different charting library that suits your needs: - - - -## Getting Started - -You can install Cube JavaScript Client and the React package with npm or Yarn: - -```bash{outputLines: 1,3-4} -# npm -npm install --save @cubejs-client/core @cubejs-client/react - -# Yarn -yarn add @cubejs-client/core @cubejs-client/react -``` - -Now you can build your application from scratch or connect to one of our -[supported data visualization tools](/config/downstream). You can also -[explore example applications](https://cube.dev/docs/examples) built with Cube. diff --git a/docs/content/APIs-Integrations/Frontend-Integrations/Introduction-vue.mdx b/docs/content/APIs-Integrations/Frontend-Integrations/Introduction-vue.mdx deleted file mode 100644 index b4496a7da19ee..0000000000000 --- a/docs/content/APIs-Integrations/Frontend-Integrations/Introduction-vue.mdx +++ /dev/null @@ -1,107 +0,0 @@ ---- -title: Integration with Vue -permalink: /frontend-introduction/vue -category: APIs & Integrations -subCategory: JavaScript SDK -menuOrder: 53 ---- - -Cube is visualization-agnostic, so you can build any user interface for your -application. - -You can directly query Cube Backend using -JSON [Query Format](https://cube.dev/docs/query-format) via [HTTP API](https://cube.dev/docs/rest-api) -or [WebSockets](https://cube.dev/docs/real-time-data-fetch#web-sockets) and -visualize analytical data with tools of your choice. However, it's much easier -to use the Cube JavaScript client and bindings for popular frameworks such as -React, Angular, and Vue. - -The client has methods to communicate with Cube API Gateway and retrieve and -process data. It is designed to work with existing charting libraries including -Chart.js, D3.js, and more. - -## Cube JavaScript Client - -The client provides methods to solve common tasks: - -**Abstract from the transport and query data.** You can -[fetch data](https://cube.dev/docs/@cubejs-client-core#load) from Cube Backend -or subscribe to [real-time updates](https://cube.dev/docs/real-time-data-fetch) -regardless of the protocol, be it HTTP or WebSockets. - -**Transform data for visualization.** You can -[pivot](https://cube.dev/docs/@cubejs-client-core#pivot) the result set to -display as a [chart](https://cube.dev/docs/@cubejs-client-core#chart-pivot) or -as a [table](https://cube.dev/docs/@cubejs-client-core#table-pivot), split into -[series](https://cube.dev/docs/@cubejs-client-core#series) or -[table columns](https://cube.dev/docs/@cubejs-client-core#table-columns). - -**Simplify work with complex query types.** You can build -[Drill Down](https://cube.dev/docs/@cubejs-client-core#drill-down) queries and -[decompose](https://cube.dev/docs/@cubejs-client-core#decompose) the results of -[compareDateRange](https://cube.dev/docs/query-format#time-dimensions-format) or -[Data Blending](https://cube.dev/docs/recipes/data-blending) queries. - -[Learn more](https://cube.dev/docs/@cubejs-client-core) in the documentation for -the `@cubejs-client/core` package. - -## Cube Vue Package - -The package provides convenient tools to work with Cube in Vue: - -**Components.** You can use -[QueryBuilder](https://cube.dev/docs/@cubejs-client-vue#query-builder) and -[QueryRenderer](https://cube.dev/docs/@cubejs-client-vue#query-renderer) -components to abstract state management and API calls from your rendering code. - -## Example Usage - -Here are the typical steps to query and visualize analytical data in Vue: - -- **Import `@cubejs-client/core` and `@cubejs-client/vue` packages.** These - packages provide all the necessary methods and convenient Vue tools. -- **Create an instance of Cube JavaScript Client.** The client is initialized - with Cube API URL. In development mode, the default URL is - [http://localhost:4000/cubejs-api/v1](http://localhost:4000/cubejs-api/v1). - The client is also initialized with an - [API token](https://cube.dev/docs/security), but it takes effect only in - [production](https://cube.dev/docs/deployment/production-checklist). -- **Query data from Cube Backend.** Use - [QueryBuilder](https://cube.dev/docs/@cubejs-client-vue#query-builder) or - [QueryRenderer](https://cube.dev/docs/@cubejs-client-vue#query-renderer) and - their props to execute a query and transform the result set. See - [Query Format](https://cube.dev/docs/query-format) for details. -- **Transform data for visualization.** Use - [QueryBuilder](https://cube.dev/docs/@cubejs-client-vue#query-builder) and - [QueryRenderer](https://cube.dev/docs/@cubejs-client-vue#query-renderer) slots - props to transform the result set. Furthermore, the result set has convenient - methods, such as `series` and `chartPivot`, to prepare data for charting. -- **Visualize the data.** Use tools of your choice to draw charts and create - visualizations. - -See an example of using Cube with Vue and Chart.js library. Note that you can -always use a different charting library that suits your needs: - - - -## Getting Started - -You can install Cube JavaScript Client and the Vue package with npm or Yarn: - -```bash{outputLines: 1,3-4} -# npm -npm install --save @cubejs-client/core @cubejs-client/vue3 - -# Yarn -yarn add @cubejs-client/core @cubejs-client/vue3 -``` - -**Note.** If you're using Vue 2, please use `@cubejs-client/vue`. - -Now you can build your application from scratch or connect to one of our -[supported data visualization tools](/config/downstream). You can also -[explore example applications](https://cube.dev/docs/examples) built with Cube. diff --git a/docs/content/APIs-Integrations/Frontend-Integrations/Introduction.mdx b/docs/content/APIs-Integrations/Frontend-Integrations/Introduction.mdx deleted file mode 100644 index 44c14e744223e..0000000000000 --- a/docs/content/APIs-Integrations/Frontend-Integrations/Introduction.mdx +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: JavaScript SDK -permalink: /frontend-introduction -category: APIs & Integrations -subCategory: JavaScript SDK -menuOrder: 51 ---- - -Cube is visualization-agnostic, so you can build any user interface for your -application. - -You can directly query Cube Backend using -JSON [Query Format](https://cube.dev/docs/query-format) via [HTTP API](https://cube.dev/docs/rest-api) -or [WebSockets](https://cube.dev/docs/real-time-data-fetch#web-sockets) and -visualize analytical data with tools of your choice. However, it's much easier -to use the Cube JavaScript client and bindings for popular frameworks such as -React, Angular, and Vue. - -The client has methods to communicate with Cube API Gateway and retrieve and -process data. It is designed to work with existing charting libraries including -Chart.js, D3.js, and more. - -## Cube JavaScript Client - -The client provides methods to solve common tasks: - -**Abstract from the transport and query data.** You can -[fetch data](https://cube.dev/docs/@cubejs-client-core#load) from Cube Backend -or subscribe to [real-time updates](https://cube.dev/docs/real-time-data-fetch) -regardless of the protocol, be it HTTP or WebSockets. - -**Transform data for visualization.** You can -[pivot](https://cube.dev/docs/@cubejs-client-core#pivot) the result set to -display as a [chart](https://cube.dev/docs/@cubejs-client-core#chart-pivot) or -as a [table](https://cube.dev/docs/@cubejs-client-core#table-pivot), split into -[series](https://cube.dev/docs/@cubejs-client-core#series) or -[table columns](https://cube.dev/docs/@cubejs-client-core#table-columns). - -**Simplify work with complex query types.** You can build -[Drill Down](https://cube.dev/docs/@cubejs-client-core#drill-down) queries and -[decompose](https://cube.dev/docs/@cubejs-client-core#decompose) the results of -[compareDateRange](https://cube.dev/docs/query-format#time-dimensions-format) or -[Data Blending](https://cube.dev/docs/recipes/data-blending) queries. - -[Learn more](https://cube.dev/docs/@cubejs-client-core) in the documentation for -the `@cubejs-client/core` package. - -## Example Usage - -Here are the typical steps to query and visualize analytical data: - -- **Import the `@cubejs-client/core` package.** This package provides all the - necessary methods. -- **Create an instance of Cube JavaScript Client.** The client is initialized - with Cube API URL. In development mode, the default URL is - [http://localhost:4000/cubejs-api/v1](http://localhost:4000/cubejs-api/v1). - The client is also initialized with an - [API token](https://cube.dev/docs/security), but it takes effect only in - [production](https://cube.dev/docs/deployment/production-checklist). -- **Query data from Cube Backend.** The client accepts a query, which is a plain - JavaScript object. See [Query Format](https://cube.dev/docs/query-format) for - details. -- **Transform data for visualization.** The result set has convenient methods, - such as `series` and `chartPivot`, to prepare data for charting. -- **Visualize the data.** Use tools of your choice to draw charts and create - visualizations. - -See an example of using Cube with vanilla JavaScript and Chart.js library. Note -that you can always use a different charting library that suits your needs: - - - -## Getting Started - -You can install Cube JavaScript Client with npm or Yarn: - -```bash{outputLines: 1,3-4} -# npm -npm install --save @cubejs-client/core - -# Yarn -yarn add @cubejs-client/core -``` - -Now you can build your application from scratch or connect to one of our -[supported data visualization tools](/config/downstream). You can also -[explore example applications](https://cube.dev/docs/examples) built with Cube. diff --git a/docs/content/APIs-Integrations/GraphQL-API/Overview.mdx b/docs/content/APIs-Integrations/GraphQL-API/Overview.mdx deleted file mode 100644 index 4b17f6f84816f..0000000000000 --- a/docs/content/APIs-Integrations/GraphQL-API/Overview.mdx +++ /dev/null @@ -1,221 +0,0 @@ ---- -title: GraphQL API -permalink: /http-api/graphql -category: APIs & Integrations -subCategory: GraphQL API -menuOrder: 31 ---- - -GraphQL API enables Cube to deliver data over the HTTP protocol to -[GraphQL][graphql]-enabled data applications, e.g., most commonly, front-end -applications. - -Often, the GraphQL API is used to enable the [embedded analytics][cube-ea] -use case. - -Under the hood, the GraphQL API is exposed via the `/graphql` endpoint of the -[REST API][ref-rest-api]. - -## Configuration - -GraphQL API is enabled by default and secured using -[API scopes][ref-api-scopes] and [CORS][ref-cors]. To disallow access to -GraphQL API, disable the `graphql` scope, e.g., by setting the -`CUBEJS_DEFAULT_API_SCOPES` environment variable to `meta,data`. - -## Getting started - -First, ensure you're running Cube v0.28.58 or later. Then start the project -locally in development mode, and navigate to `http://localhost:4000/` in your -browser. After generating data models and running query you should see the -GraphiQL interface if you click 'GraphiQL' button. If you click the 'Docs' -button in the top-right, you can explore the introspected schema. - -As an example, let's use the `orders` cube from the example eCommerce database: - - - -```yaml -cubes: - - name: orders - sql_table: orders - - measures: - - name: count - type: count - - dimensions: - - name: status - sql: status - type: string - - - name: created_at - sql: created_at - type: time -``` - -```javascript -cube(`orders`, { - sql_table: `orders`, - - measures: { - count: { - type: `count`, - }, - }, - - dimensions: { - status: { - sql: `status`, - type: `string`, - }, - - created_at: { - sql: `created_at`, - type: `time`, - }, - }, -}); -``` - - - -A GraphQL query to return the number of orders by status would look something -like this: - -```graphql -{ - cube { - orders { - count - status - created_at { - day - } - } - } -} -``` - -The equivalent query to the REST API endpoint would look like this: - -```json -{ - "measures": ["orders.count"], - "dimensions": ["orders.status"], - "timeDimensions": [ - { - "dimension": "orders.created_at", - "granularity": "day" - } - ] -} -``` - -### <--{"id" : "Getting started"}--> Modifying time dimension granularity - -The granularity for a time dimension can easily be changed by specifying it in -the query: - -```graphql -{ - cube { - orders { - created_at { - month - } - } - } -} -``` - -[Any supported granularity][ref-schema-ref-preagg-granularity] can be used. If -you prefer to not specify a granularity, then use `value`: - -```graphql -{ - cube { - orders { - created_at { - value - } - } - } -} -``` - -### <--{"id" : "Getting started"}--> Specifying filters and ranges - -Filters can be set on the load query or on a specific cube. Specifying the -filter on the load query applies it to all cubes in the query. Filters can be -added to the query as follows: - -```graphql -query { - cube(limit: 100, offset: 50, timezone: "America/Los_Angeles") { - orders( - orderBy: { created_at: asc, count: desc } - where: { status: { equals: "completed" } } - ) { - count - status - created_at - } - } -} -``` - -Some other differences between the JSON query filters and the GraphQL filters to -note: - -- `number` values are used for number types instead of strings -- The `notSet` filter is replaced by `{ set: false }` -- New `in` and `notIn` filters to check for multiple values -- `AND` and `OR` fields for boolean operators - -The GraphQL API supports `@skip` and `@include` directives too: - -```graphql -query GetOrders($byStatus: Boolean) { - cube(limit: 100, offset: 50, timezone: "America/Los_Angeles") { - orders( - orderBy: { created_at: asc, count: desc } - where: { status: { equals: "completed" } } - ) { - count - status @include(if: $byStatus) - created_at - } - } -} -``` - -### <--{"id" : "Getting started"}--> Querying multiple cubes - -Using the same `orders` cube as before, let's try and get the numbers of -products for each order status too. We can do this by adding the `products` cube -to our query as follows: - -```graphql -{ - cube { - orders { - status - count - created_at { - month - } - } - products { - count - } - } -} -``` - -[ref-schema-ref-preagg-granularity]: /schema/reference/pre-aggregations#parameters-granularity -[ref-rest-api]: /http-api/rest -[ref-api-scopes]: /http-api/rest#configuration-api-scopes -[ref-cors]: /http-api/rest#configuration-cors -[graphql]: https://graphql.org -[cube-ea]: https://cube.dev/use-cases/embedded-analytics \ No newline at end of file diff --git a/docs/content/APIs-Integrations/Orchestration-API/Airflow.mdx b/docs/content/APIs-Integrations/Orchestration-API/Airflow.mdx deleted file mode 100644 index e7e7b4b3582f5..0000000000000 --- a/docs/content/APIs-Integrations/Orchestration-API/Airflow.mdx +++ /dev/null @@ -1,217 +0,0 @@ ---- -title: Integration with Apache Airflow -menuTitle: Integration with Airflow -permalink: /orchestration-api/airflow -category: APIs & Integrations -subCategory: Orchestration API -menuOrder: 62 ---- - -[Apache Airflow][airflow] is a popular open-source workflow scheduler commonly -used for data orchestration. [Astro][astro] is a fully managed service for -Airflow by [Astronomer][astro]. - -This guide demonstrates how to setup Cube and Airflow to work together so that -Airflow can push changes from upstream data sources to Cube via the -[Orchestration API][ref-orchestration-api]. - -## Tasks - -In Airflow, pipelines are represented by directed acyclic graphs (DAGs), -Python function decorated with a `@dag` decorator. DAGs include calls to -tasks, implemented as instances of the `Operator` class. Operators can -perform various tasks: poll for some precondition, perform -extract-load-transform (ETL), or trigger external systems like Cube. - -Integration between Cube and Airflow is enabled by the -[`airflow-provider-cube`][github-airflow-provider-cube] package that provides -the following operators. - -### <--{"id" : "Operators"}--> CubeQueryOperator - -`CubeQueryOperator` is used to query Cube via the -[`/v1/load`][ref-load-endpoint] endpoint of the [REST API][ref-rest-api]. - -It supports the following options: - -| Option | Type | Default | Description | -| -------------- | -------- | -------------- | ---------------------------------------- | -| `cube_conn_id` | `string` | `cube_default` | Airflow connection name. | -| `headers` | `dict` | | HTTP headers to be added to the request. | -| `query` | `dict` | | Cube query object. | -| `timeout` | `int` | 30 | Response wait timeout in seconds. | -| `wait` | `int` | 10 | Interval between API calls in seconds. | - -### <--{"id" : "Operators"}--> CubeBuildOperator - -`CubeBuildOperator` is used to trigger pre-aggregation builds and check their -status via the [`/v1/pre-aggregations/jobs`][ref-ref-jobs-endpoint] endpoint -of the [Orchestration API][ref-orchestration-api]. - -It supports the following options: - -| Option | Type | Default | Description | -| -------------- | -------- | ---------------- | -------------------------------------------------------------- | -| `cube_conn_id` | `string` | `cube_default` | Airflow connection name. | -| `headers` | `dict` | | HTTP headers to be added to the request. | -| `selector` | `dict` | | [`/v1/pre-aggregations/jobs`][ref-ref-jobs-endpoint] selector. | -| `complete` | `bool` | `False` | Whether a task should wait for builds to complete or not. | -| `wait` | `int` | 10 | Interval between API calls in seconds. | - -## Installation - -Install [Astro CLI installed][astro-cli]. - -Create a new directory and [initialize][astro-cli-dev-init] a new Astro project: - -```bash -mkdir cube-astro -cd cube-astro -astro dev init -``` - -Add the integration package to `requirements.txt`: - -```bash -echo "airflow-provider-cube" >> ./requirements.txt -``` - -## Configuration - -### <--{"id" : "Configuration"}--> Connection - -Create an Airflow connection via the web console or by adding the following -contents to the `airflow_settings.yaml` file: - -```yaml -airflow: - connections: - - conn_id: cube_default - conn_type: generic - conn_host: https://awesome-ecom.gcp-us-central1.cubecloudapp.dev - conn_schema: - conn_login: - conn_password: SECRET - conn_port: - conn_extra: - security_context: {} -``` - -Let's break the options down: - -- By default, Cube operators use `cube_default` as an Airflow connection name. -- The connection shoud be of the `generic` type. -- `conn_host` should be set to the URL of your Cube deployment. -- `conn_password` should be set to the value of the `CUBEJS_API_SECRET` -environment variable. -- `conn_extra` should contain a security context (as `security_context`) that -will be sent with API requests. - -### <--{"id" : "Configuration"}--> DAGs - -Create a new DAG named `cube_query.py` in the `dags` subdirectory with the -following contents. As you can see, the `CubeQueryOperator` accepts a Cube -query via the `query` option. - -```python -from typing import Any -from pendulum import datetime -from airflow.decorators import dag, task -from cube_provider.operators.cube import CubeQueryOperator - -@dag( - start_date=datetime(2023, 6, 1), - schedule='*/1 * * * *', - max_active_runs=1, - concurrency=1, - default_args={"retries": 1, "cube_conn_id": "cube_default"}, - tags=["cube"], -) -def cube_query_workflow(): - query_op = CubeQueryOperator( - task_id="query_op", - query={ - "measures": ["Orders.count"], - "dimensions": ["Orders.status"] - } - ) - - @task() - def print_op(data: Any): - print(f"Result: {data}") - - print_op(query_op.output) - -cube_query_workflow() -``` - -Create a new DAG named `cube_build.py` in the `dags` subdirectory with the -following contents. As you can see, the `CubeBuildOperator` accepts a -pre-aggregation selector via the `selector` option. - -```python -from typing import Any -from pendulum import datetime -from airflow.decorators import dag, task -from cube_provider.operators.cube import CubeBuildOperator - -@dag( - start_date=datetime(2023, 6, 1), - schedule='*/1 * * * *', - max_active_runs=1, - concurrency=1, - default_args={"retries": 1, "cube_conn_id": "cube_default"}, - tags=["cube"], -) -def cube_build_workflow(): - build_op = CubeBuildOperator( - task_id="build_op", - selector={ - "contexts": [ - {"securityContext": {}} - ], - "timezones": ["UTC"] - }, - complete=True, - wait=10, - ) - - @task() - def print_op(data: Any): - print(f"Result: {data}") - - print_op(build_op.output) - -cube_build_workflow() -``` - -Pay attention to the `complete` option. When it's set to `True`, the operator -will wait for pre-aggregation builds to complete before allowing downstream -tasks to run. - -## Running workflows - -Now, you can run these DAGs: - -```bash -astro run cube_query_workflow -astro run cube_build_workflow -``` - -Alternatively, you can run Airflow and navigate to the web console at -[`localhost:8080`](http://localhost:8080) (use `admin`/`admin` to -authenticate): - -```bash -astro dev start -``` - -[airflow]: https://airflow.apache.org -[astro]: https://www.astronomer.io -[astro-cli]: https://docs.astronomer.io/astro/cli/overview -[astro-cli-dev-init]: https://docs.astronomer.io/astro/cli/astro-dev-init -[github-airflow-provider-cube]: https://github.com/cube-js/airflow-provider-cube/ -[ref-load-endpoint]: /rest-api#v-1-load -[ref-ref-jobs-endpoint]: /rest-api#v-1-pre-aggregations-jobs -[ref-rest-api]: /http-api/rest -[ref-orchestration-api]: /orchestration-api \ No newline at end of file diff --git a/docs/content/APIs-Integrations/Orchestration-API/Dagster.mdx b/docs/content/APIs-Integrations/Orchestration-API/Dagster.mdx deleted file mode 100644 index 22ee12c8cf43b..0000000000000 --- a/docs/content/APIs-Integrations/Orchestration-API/Dagster.mdx +++ /dev/null @@ -1,141 +0,0 @@ ---- -title: Integration with Dagster -permalink: /orchestration-api/dagster -category: APIs & Integrations -subCategory: Orchestration API -menuOrder: 63 ---- - -[Dagster][dagster] is a popular open-source data pipeline orchestrator. -[Dagster Cloud][dagster-cloud] is a fully managed service for Dagster. - -This guide demonstrates how to setup Cube and Dagster to work together so that -Dagster can push changes from upstream data sources to Cube via the -[Orchestration API][ref-orchestration-api]. - -## Resources - -In Dagster, each workflow is represented by jobs, Python functions decorated -with a `@job` decorator. Jobs include calls to ops, Python functions -decorated with an `@op` decorator. Ops represent distinct pieces of work -executed within a job. They can perform various jobs: poll for some -precondition, perform extract-load-transform (ETL), or trigger external -systems like Cube. - -Integration between Cube and Dagster is enabled by the -[`dagster_cube`][github-dagster-cube] package. - - - -Cube and Dagster integration package was originally contributed by -[Olivier Dupuis](https://github.com/olivierdupuis), founder of -[discursus.io](https://www.discursus.io), for which we're very grateful. - - - -The package provides the `CubeResource` class: - -- For querying Cube via the [`/v1/load`][ref-load-endpoint] endpoint of the -[REST API][ref-rest-api]. -- For triggering pre-aggregation builds via the -[`/v1/pre-aggregations/jobs`][ref-ref-jobs-endpoint] endpoint of the -[Orchestration API][ref-orchestration-api]. - -Please refer to the [package documentation][github-dagster-cube-docs] for -details and options reference. - -## Installation - -Install [Dagster][dagster-docs-install]. - -Create a new directory: - -```bash -mkdir cube-dagster -cd cube-dagster -``` - -Install the integration package: - -```bash -pip install dagster_cube -``` - -## Configuration - -Create a new file named `cube.py` with the following contents: - -```python -from dagster import asset -from dagster_cube.cube_resource import CubeResource - -@asset -def cube_query_workflow(): - my_cube_resource = CubeResource( - instance_url="https://awesome-ecom.gcp-us-central1.cubecloudapp.dev/cubejs-api/v1/", - api_key="eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpYXQiOjEwMDAwMDAwMDAsImV4cCI6NTAwMDAwMDAwMH0.OHZOpOBVKr-sCwn8sbZ5UFsqI3uCs6e4omT7P6WVMFw" - ) - - response = my_cube_resource.make_request( - method="POST", - endpoint="load", - data={ - 'query': { - 'measures': ['Orders.count'], - 'dimensions': ['Orders.status'] - } - } - ) - - return response - -@asset -def cube_build_workflow(): - my_cube_resource = CubeResource( - instance_url="https://awesome-ecom.gcp-us-central1.cubecloudapp.dev/cubejs-api/v1/", - api_key="eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpYXQiOjEwMDAwMDAwMDAsImV4cCI6NTAwMDAwMDAwMH0.OHZOpOBVKr-sCwn8sbZ5UFsqI3uCs6e4omT7P6WVMFw" - ) - - response = my_cube_resource.make_request( - method="POST", - endpoint="pre-aggregations/jobs", - data={ - 'action': 'post', - 'selector': { - 'timezones': ['UTC'], - 'contexts': [{'securityContext': {}}] - } - } - ) - - return response -``` - -As you can see, the `make_request` method for the `load` endpoint accepts a -Cube query via the `query` option and the `make_request` method for the -`pre-aggregations/jobs` endpoint accepts a pre-aggregation selector via the -`selector` option. - -## Running jobs - -Now, you can load these jobs to Dagster: - -```bash -dagster dev -f cube.py -``` - -Navigate to [Dagit UI][dagster-docs-dagit] at [localhost:3000](http://localhost:3000) -and click Materialize all to run both jobs: - - - -[dagster]: https://dagster.io -[dagster-cloud]: https://dagster.io/cloud -[dagster-docs-install]: https://docs.dagster.io/getting-started/install -[dagster-docs-dagit]: https://docs.dagster.io/concepts/dagit/dagit -[github-dagster-cube]: https://github.com/discursus-data/dagster-cube -[github-dagster-cube-docs]: https://github.com/discursus-data/dagster-cube/blob/main/README.md -[ref-load-endpoint]: /rest-api#v-1-load -[ref-ref-jobs-endpoint]: /rest-api#v-1-pre-aggregations-jobs -[ref-rest-api]: /http-api/rest -[ref-orchestration-api]: /orchestration-api \ No newline at end of file diff --git a/docs/content/APIs-Integrations/Orchestration-API/Overview.mdx b/docs/content/APIs-Integrations/Orchestration-API/Overview.mdx deleted file mode 100644 index 7f3a2d099203b..0000000000000 --- a/docs/content/APIs-Integrations/Orchestration-API/Overview.mdx +++ /dev/null @@ -1,81 +0,0 @@ ---- -title: Orchestration API -permalink: /orchestration-api -category: APIs & Integrations -subCategory: Orchestration API -menuOrder: 61 ---- - -Orchestration API enables Cube to work with data orchestration tools and let -them _push_ changes from upstream data sources to Cube, as opposed to -letting Cube _pull_ changes from upstream data sources via the -[`scheduledRefresh`][ref-scheduled-refresh] configration option of -pre-aggregations. - - - -Orchestration API can be used to implement both [embedded analytics][cube-ea] -and internal or self-serve [business intelligence][cube-issbi] use cases. -When implementing [real-time analytics][cube-rta], consider pulling data from -upstream data sources with [lambda pre-aggregations][ref-lambda-pre-aggs]. - -Under the hood, the Orchestration API is exposed via the -[`/v1/pre-aggregations/jobs`][ref-ref-jobs-endpoint] endpoint of the -[REST API][ref-rest-api]. - -## Supported tools - -Orchestration API has integration packages to work with popular data -orchestration tools. Check the following guides to get tool-specific -instructions: - - - - - - - -## Configuration - -Orchestration API is enabled by default but inaccessible due to the default -[API scopes][ref-api-scopes] configuration. To allow access to the -Orchestration API, enable the `jobs` scope, e.g., by setting the -`CUBEJS_DEFAULT_API_SCOPES` environment variable to `meta,data,graphql,jobs`. - -## Building pre-aggregations - -Orchestration API allows to trigger pre-aggregation builds programmatically. -It can be useful for data orchestration tools to push changes from upstream -data sources to Cube or for any third parties to invalidate and rebuild -pre-aggregations on demand. - -You can trigger pre-aggregation builds and check build statuses using the -[`/v1/pre-aggregations/jobs`][ref-ref-jobs-endpoint] endpoint. It is possible -to rebuild: - -- All pre-aggregations -- Specific pre-aggregations (e.g., refresh data from some columns) -- Pre-aggregations using specific cubes (e.g., refresh data from some tables) -- Pre-aggregations using cubes from specific data sources (e.g., refresh data -from some Snowflake connection) - -[ref-scheduled-refresh]: /schema/reference/pre-aggregations#parameters-scheduled-refresh -[cube-ea]: https://cube.dev/use-cases/embedded-analytics -[cube-issbi]: https://cube.dev/use-cases/semantic-layer -[cube-rta]: https://cube.dev/use-cases/real-time-analytics -[ref-lambda-pre-aggs]: /caching/pre-aggregations/lambda-pre-aggregations -[ref-rest-api]: /http-api/rest -[ref-api-scopes]: /http-api/rest#configuration-api-scopes -[ref-ref-jobs-endpoint]: /rest-api#v-1-pre-aggregations-jobs diff --git a/docs/content/APIs-Integrations/Orchestration-API/Prefect.mdx b/docs/content/APIs-Integrations/Orchestration-API/Prefect.mdx deleted file mode 100644 index fefd80c7d4baf..0000000000000 --- a/docs/content/APIs-Integrations/Orchestration-API/Prefect.mdx +++ /dev/null @@ -1,135 +0,0 @@ ---- -title: Integration with Prefect -permalink: /orchestration-api/prefect -category: APIs & Integrations -subCategory: Orchestration API -menuOrder: 64 ---- - -[Prefect][prefect] is a popular open-source orchestrator for data-intensive -workflows. [Prefect Cloud][prefect-cloud] is a fully managed service for -Prefect. - -This guide demonstrates how to setup Cube and Prefect to work together so that -Prefect can push changes from upstream data sources to Cube via the -[Orchestration API][ref-orchestration-api]. - -## Tasks - -In Prefect, each workflow is represented by flows, Python functions decorated -with a `@flow` decorator. Flows include calls to tasks, Python functions -decorated with a `@task` decorator, as well as to child flows. Tasks represent -distinct pieces of work executed within a flow. They can perform various jobs: -poll for some precondition, perform extract-load-transform (ETL), or trigger -external systems like Cube. - -Integration between Cube and Prefect is enabled by the -[`prefect-cubejs`][github-prefect-cubejs] package. - - - -Cube and Prefect integration package was originally contributed by -[Alessandro Lollo](https://github.com/AlessandroLollo), Data Engineering -Manager at Cloud Academy ([case study](https://cube.dev/case-studies/cloud-academy-and-cube)), -for which we're very grateful. - - - -The package provides the following tasks: - -- `run_query` for querying Cube via the [`/v1/load`][ref-load-endpoint] -endpoint of the [REST API][ref-rest-api]. -- `build_pre_aggregations` for triggering pre-aggregation builds via the -[`/v1/pre-aggregations/jobs`][ref-ref-jobs-endpoint] endpoint of the -[Orchestration API][ref-orchestration-api]. - -Please refer to the [package documentation][github-prefect-cubejs-docs] for -details and options reference. - -## Installation - -Install [Prefect][prefect-docs-install]. - -Create a new directory: - -```bash -mkdir cube-prefect -cd cube-prefect -``` - -Install the integration package: - -```bash -pip install prefect-cubejs -``` - -## Configuration - -Create a new workflow named `cube_query.py` with the following contents. As -you can see, the `run_query` task accepts a Cube query via the `query` option. - -```python -from prefect import flow -from prefect_cubejs.tasks import ( - run_query -) - -@flow -def cube_query_workflow(): - run_query( - url="https://awesome-ecom.gcp-us-central1.cubecloudapp.dev/cubejs-api", - api_secret="SECRET", - query="""{ - "measures": ["Orders.count"], - "dimensions": ["Orders.status"] - }""" - ) - -cube_query_workflow() -``` - -Create a new workflow named `cube_build.py` with the following contents. As -you can see, the `build_pre_aggregations` task accepts a pre-aggregation -selector via the `selector` option. - -```python -from prefect import flow -from prefect_cubejs.tasks import ( - build_pre_aggregations -) - -@flow -def cube_build_workflow(): - build_pre_aggregations( - url="https://awesome-ecom.gcp-us-central1.cubecloudapp.dev/cubejs-api", - api_secret="SECRET", - selector={ - "contexts": [ - {"securityContext": {}} - ], - "timezones": ["UTC"] - }, - wait_for_job_run_completion=True - ) - -cube_build_workflow() -``` - -## Running workflows - -Now, you can run these workflows: - -```bash -python cube_query.py -python cube_build.py -``` - -[prefect]: https://www.prefect.io -[prefect-cloud]: https://www.prefect.io/cloud/ -[prefect-docs-install]: https://docs.prefect.io/2.10.13/getting-started/installation/#install-prefect -[github-prefect-cubejs]: https://github.com/AlessandroLollo/prefect-cubejs -[github-prefect-cubejs-docs]: https://alessandrolollo.github.io/prefect-cubejs/tasks/ -[ref-load-endpoint]: /rest-api#v-1-load -[ref-ref-jobs-endpoint]: /rest-api#v-1-pre-aggregations-jobs -[ref-rest-api]: /http-api/rest -[ref-orchestration-api]: /orchestration-api \ No newline at end of file diff --git a/docs/content/APIs-Integrations/Overview.mdx b/docs/content/APIs-Integrations/Overview.mdx deleted file mode 100644 index 9823a9b386de9..0000000000000 --- a/docs/content/APIs-Integrations/Overview.mdx +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: APIs & Integrations -menuTitle: Overview -permalink: /apis-integrations -category: APIs & Integrations -menuOrder: 1 ---- - -With a rich set of APIs, Cube can power and deliver data to all kinds of -data applications. - - - -## Data APIs - -A few rules of thumb to help you choose an API: - -When implementing internal or self-serve [business intelligence][cube-issbi] -use case, pick the [SQL API][ref-sql-api] and [Semantic Layer Sync][ref-sls]. -The SQL API allows querying Cube with a Postgres-compatible -[dialect of SQL][ref-sql-syntax], either by writing queries manually or -generating them with BI tools. - -When implementing [embedded analytics][cube-ea] and -[real-time analytics][cube-rta] use cases, pick [REST API][ref-rest-api] or -[GraphQL API][ref-graphql-api]. Also, the [JavaScript SDK][ref-js-sdk] will -simplify integration with your front-end code. The REST API uses a -[JSON-based query format][ref-json-syntax], and the GraphQL API accepts -[GraphQL queries][ref-graphql-syntax]. - -## Management APIs - -In case you'd like Cube to work with data orchestration tools and let them -push changes from upstream data sources to Cube, explore the -[Orchestration API][ref-orchestration-api]. - -[cube-issbi]: https://cube.dev/use-cases/semantic-layer -[cube-ea]: https://cube.dev/use-cases/embedded-analytics -[cube-rta]: https://cube.dev/use-cases/real-time-analytics -[ref-sql-api]: /backend/sql -[ref-rest-api]: /http-api/rest -[ref-graphql-api]: /http-api/graphql -[ref-orchestration-api]: /orchestration-api -[ref-sls]: /semantic-layer-sync -[ref-js-sdk]: /frontend-introduction -[ref-sql-syntax]: /backend/sql#querying-fundamentals -[ref-json-syntax]: /query-format -[ref-graphql-syntax]: /http-api/graphql#getting-started diff --git a/docs/content/APIs-Integrations/REST-API/Overview.mdx b/docs/content/APIs-Integrations/REST-API/Overview.mdx deleted file mode 100644 index 0babd7bbb7212..0000000000000 --- a/docs/content/APIs-Integrations/REST-API/Overview.mdx +++ /dev/null @@ -1,234 +0,0 @@ ---- -title: REST API -permalink: /http-api/rest -category: APIs & Integrations -subCategory: REST API -menuOrder: 21 ---- - -REST API enables Cube to deliver data over the HTTP protocol to certain kinds -of data applications, including but not limited to the following ones: - -* Most commonly, front-end applications -* Some [data notebooks][ref-notebooks], e.g., [Observable][ref-observable] -* Some AI-based applications, e.g., [Delphi][ref-delphi] -* [Low-code tools][ref-low-code], e.g., [Retool][ref-retool] -* Automated jobs - -Often, the REST API is used to enable [embedded analytics][cube-ea] and -[real-time analytics][cube-rta] use cases. - - - -If you've chosen [GraphQL][graphql] as a query language for your front-end -application, consider using the [GraphQL API][ref-graphql-api] that Cube also provides. - - - -Under the hood, REST API also provides endpoints for -[GraphQL API][ref-graphql-api] and [Orchestration API][ref-orchestration-api]. -However, they target specific use cases and are not usually considered part -of the REST API. - -## Configuration - -REST API is enabled by default and secured using [API scopes][self-api-scopes] -and [CORS][self-cors]. - -### <--{"id" : "Configuration"}--> Base path - -By default, all REST API endpoints are prefixed with a base path of -`/cubejs-api`, e.g., the `/v1/load` endpoint will be available at -`/cubejs-api/v1/load`. - - - -Exception: `/livez` and `/readyz` endpoints are not prefixed with a base path. - - - -You can set a desired base path using the [`basePath`][ref-conf-basepath] -configuration option. - -### <--{"id" : "Configuration"}--> API scopes - -Each REST API endpoint belongs to an API scope, e.g., the `/v1/load` endpoint -belongs to the `data` scope. API scopes allow to secure access to API -endpoints by making them accessible to specific users only or disallowing -access for everyone. By default, API endpoints in all scopes, except for -`jobs`, are accessible for everyone. - -| API scope | REST API endpoints | Accessible by default? | -| --------- | ----------------------------------------------------------------------------------------- | ---------------------- | -| `meta` | [`/v1/meta`][ref-ref-meta] | ✅ Yes | -| `data` | [`/v1/load`][ref-ref-load], [`/v1/sql`][ref-ref-sql] | ✅ Yes | -| `graphql` | `/graphql` | ✅ Yes | -| `jobs` | [`/v1/run-scheduled-refresh`][ref-ref-rsr]
[`/v1/pre-aggregations/jobs`][ref-ref-paj] | ❌ No | - - - -Exception: `/livez` and `/readyz` endpoints don't belong to any scope. Access -to these endpoints can't be controlled using API scopes. - - - -You can set accessible API scopes *for all requests* using the -`CUBEJS_DEFAULT_API_SCOPES` environment variable. For example, to disallow -access to the GraphQL API for everyone, set `CUBEJS_DEFAULT_API_SCOPES` to -`meta,data`. - -You can also select accessible API scopes *for each request* using the -[`contextToApiScopes`][ref-conf-contexttoapiscopes] configuration option, -based on the provided [security context][ref-security-context]. For example, -to restrict access to the `/v1/meta` endpoint to service accounts only, you -can set `CUBEJS_DEFAULT_API_SCOPES` to `data,graphql` and use the following -configuration in the `cube.js` file, assuming that service accounts have -`service: true` in their security context: - -```javascript -module.exports = { - contextToApiScopes: (securityContext, defaultScopes) => { - if (securityContext.service) { - return ['meta', ...defaultScopes]; - } - - return defaultScopes; - } -}; -``` - -### <--{"id" : "Configuration"}--> CORS - -REST API supports [Cross-Origin Resource Sharing (CORS)][mdn-cors]. By -default, requests from any origin (`*`) are allowed. - -You can configure CORS using the [`http.cors`][ref-config-cors] configuration -option. For example, to allow requests from a specific domain only, use the -following configuration in the `cube.js` file: - -```javascript -module.exports = { - http: { - cors: { - origin: "https://example.com" - } - } -}; -``` - -## Prerequisites - -### <--{"id" : "Prerequisites"}--> Authentication - -Cube uses API tokens to authorize requests and also for passing additional -security context, which can be used in the -[`queryRewrite`][ref-config-queryrewrite] property in your [`cube.js` -configuration file][ref-config-js]. - -The API Token is passed via the Authorization Header. The token itself is a -[JSON Web Token](https://jwt.io), the [Security section][ref-security] describes -how to generate it. - -In the development environment the token is not required for authorization, but -you can still use it to pass a security context. - -### <--{"id" : "Prerequisites"}--> Example request - -```bash{promptUser: user} -curl -H "Authorization: EXAMPLE-API-TOKEN" https://example.com/cubejs-api/v1/sql -``` - -### <--{"id" : "Prerequisites"}--> Continue wait - -If the request takes too long to be processed, Cube Backend responds with -`{ "error": "Continue wait" }` and 200 status code. This is how the long polling -mechanism in Cube is implemented. Clients should continuously retry the same -query in a loop until they get a successful result. Subsequent calls to the Cube -endpoints are idempotent and don't lead to scheduling new database queries if -not required by the [`refresh_key`][ref-schema-ref-cube-refresh-key]. Also, -receiving `Continue wait` doesn't mean the database query has been canceled, and -it's actually still being processed by the Cube. Database queries that weren't -started and are no longer waited by the client's long polling loop will be -marked as orphaned and removed from the querying queue. - -Possible reasons of **Continue wait**: - -- The query requested is heavy, and it takes some time for the database to - process it. Clients should wait for its completion, continuously sending the - same REST API request. [`continueWaitTimeout`][ref-conf-queue-opts] can be - adjusted in order to change the time Cube waits before returning - `Continue wait` message. -- There are many queries requested and Cube backend queues them to save database - from overloading. - -### <--{"id" : "Prerequisites"}--> Error Handling - -Cube REST API has basic errors and HTTP Error codes for all requests. - -| Status | Error response | Description | -| ------ | ------------------------------ | ---------------------------------------------------------------------------------------------------- | -| 400 | Error message | General error. It may be a database error, timeout, or other issue. Check error message for details. | -| 403 | Authorization header isn't set | You didn't provide an auth token. Provide a valid API Token or disable authorization. | -| 403 | Invalid token | The auth token provided is not valid. It may be expired or have invalid signature. | -| 500 | Error message | Cube internal server error. Check error message for details. | - -### <--{"id" : "Prerequisites"}--> Request Span Annotation - -For monitoring tools such as Cube Cloud proper request span annotation should be -provided in `x-request-id` header of a request. Each request id should consist -of two parts: `spanId` and `requestSequenceId` which define `x-request-id` as -whole: `${spanId}-span-${requestSequenceId}`. Values of `x-request-id` header -should be unique for each separate request. `spanId` should define user -interaction span such us `Continue wait` retry cycle and it's value shouldn't -change during one single interaction. - -### <--{"id" : "Prerequisites"}--> Pagination - -Cube supports paginated requests for the `/v1/load` endpoint by including -[`limit` and `offset` parameters][ref-api-ref-query-format] in the query. For -example, the following query will retrieve rows 101-200 from the `Orders` cube: - -```json -{ - "dimensions": ["Orders.status"], - "measures": ["Orders.count"], - "timeDimensions": [ - { - "dimension": "Orders.createdAt", - "dateRange": "last year", - "granularity": "day" - } - ], - "limit": 100, - "offset": 100 -} -``` - -[mdn-cors]: https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS -[ref-config-js]: /config -[ref-config-queryrewrite]: /config#query-rewrite -[ref-conf-queue-opts]: /config#queue-options -[ref-api-ref-query-format]: /query-format#query-properties -[ref-config-cors]: /config#http -[ref-schema-ref-cube-refresh-key]: /schema/reference/cube#refresh-key -[ref-security]: /security -[ref-notebooks]: /config/downstream#notebooks -[ref-observable]: /config/downstream/observable -[ref-delphi]: /config/downstream/delphi -[ref-low-code]: /config/downstream#low-code-tools-internal-tool-builders -[ref-retool]: /config/downstream/retool -[ref-conf-basepath]: /config#options-reference-base-path -[ref-conf-contexttoapiscopes]: /config#options-reference-context-to-api-scopes -[ref-ref-load]: /rest-api#v-1-load -[ref-ref-meta]: /rest-api#v-1-meta -[ref-ref-sql]: /rest-api#v-1-sql -[ref-ref-rsr]: /rest-api#v-1-run-scheduled-refresh -[ref-ref-paj]: /rest-api#v-1-pre-aggregations-jobs -[ref-security-context]: /security/context -[ref-graphql-api]: /http-api/graphql -[ref-orchestration-api]: /orchestration-api -[cube-ea]: https://cube.dev/use-cases/embedded-analytics -[cube-rta]: https://cube.dev/use-cases/real-time-analytics -[graphql]: https://graphql.org -[self-api-scopes]: #configuration-api-scopes -[self-cors]: #configuration-cors \ No newline at end of file diff --git a/docs/content/APIs-Integrations/REST-API/Query-Format.mdx b/docs/content/APIs-Integrations/REST-API/Query-Format.mdx deleted file mode 100644 index 9e9d4aeab9ecb..0000000000000 --- a/docs/content/APIs-Integrations/REST-API/Query-Format.mdx +++ /dev/null @@ -1,614 +0,0 @@ ---- -title: Query format -permalink: /query-format -category: APIs & Integrations -subCategory: REST API -menuOrder: 22 ---- - -Cube Queries are plain JavaScript objects, describing an analytics query. The -basic elements of a query (query members) are `measures`, `dimensions`, and -`segments`. - -The query member format name is `CUBE_NAME.MEMBER_NAME`, for example the -dimension `email` in the Cube `Users` would have the name `Users.email`. - -In the case of dimension of type `time` granularity could be optionally added to -the name, in the following format `CUBE_NAME.TIME_DIMENSION_NAME.GRANULARITY`, -ex: `stories.time.month`. - -Supported granularities: `second`, `minute`, `hour`, `day`, `week`, `month`, -`quarter` and `year`. - -The Cube client also accepts an array of queries. By default, it will be treated -as a Data Blending query type. - -## Query Properties - -A Query has the following properties: - -- `measures`: An array of measures. -- `dimensions`: An array of dimensions. -- `filters`: An array of objects, describing filters. Learn about - [filters format](#filters-format). -- `timeDimensions`: A convenient way to specify a time dimension with a filter. - It is an array of objects in [timeDimension format.](#time-dimensions-format) -- `segments`: An array of segments. A segment is a named filter, created in the - data model. -- `limit`: A row limit for your query. The default value is `10000`. The maximum - allowed limit is `50000`. If you'd like to request more rows than the maximum - allowed limit, consider using [pagination][ref-recipe-pagination]. -- `offset`: The number of initial rows to be skipped for your query. The default - value is `0`. -- `order`: An object, where the keys are measures or dimensions to order by and - their corresponding values are either `asc` or `desc`. The order of the fields - to order on is based on the order of the keys in the object. -- `timezone`: All time based calculations performed within Cube are - timezone-aware. This property is applied to all time dimensions during - aggregation and filtering. It isn't applied to the time dimension referenced - in a `dimensions` query property unless granularity or date filter is - specified. Using this property you can set your desired timezone in - [TZ Database Name](https://en.wikipedia.org/wiki/Tz_database) format, e.g.: - `America/Los_Angeles`. The default value is `UTC`. -- `renewQuery`: If `renewQuery` is set to `true`, Cube will renew all - [`refreshKey`][ref-schema-ref-preaggs-refreshkey] for queries and query - results in the foreground. However, if the - [`refreshKey`][ref-schema-ref-preaggs-refreshkey] (or - [`refreshKey.every`][ref-schema-ref-preaggs-refreshkey-every]) doesn't - indicate that there's a need for an update this setting has no effect. The - default value is `false`. - > **NOTE**: Cube provides only eventual consistency guarantee. Using a small - > [`refreshKey.every`][ref-schema-ref-preaggs-refreshkey-every] value together - > with `renewQuery` to achieve immediate consistency can lead to endless - > refresh loops and overall system instability. -- `ungrouped`: If `ungrouped` is set to `true` no `GROUP BY` statement will be - added to the query. Instead, the raw results after filtering and joining will - be returned without grouping. By default `ungrouped` queries require a primary - key as a dimension of every cube involved in the query for security purposes. - In case of `ungrouped` query measures will be rendered as underlying `sql` of - measures without aggregation and time dimensions will be truncated as usual - however not grouped by. - -```json -{ - "measures": ["stories.count"], - "dimensions": ["stories.category"], - "filters": [ - { - "member": "stories.isDraft", - "operator": "equals", - "values": ["No"] - } - ], - "timeDimensions": [ - { - "dimension": "stories.time", - "dateRange": ["2015-01-01", "2015-12-31"], - "granularity": "month" - } - ], - "limit": 100, - "offset": 50, - "order": { - "stories.time": "asc", - "stories.count": "desc" - }, - "timezone": "America/Los_Angeles" -} -``` - -### <--{"id" : "Query Properties"}--> Default order - -If the `order` property is not specified in the query, Cube sorts results by -default using the following rules: - -- The first time dimension with granularity, ascending. If no time dimension - with granularity exists... -- The first measure, descending. If no measure exists... -- The first dimension, ascending. - -### <--{"id" : "Query Properties"}--> Alternative order format - -Also you can control the ordering of the `order` specification, Cube support -alternative order format - array of tuples: - -```json -{ - "order": [ - ["stories.time", "asc"], - ["stories.count", "asc"] - ] - } -} -``` - -## Filters Format - -A filter is a Javascript object with the following properties: - -- `member`: Dimension or measure to be used in the filter, for example: - `stories.isDraft`. See below on difference between filtering dimensions vs - filtering measures. -- `operator`: An operator to be used in the filter. Only some operators are - available for measures. For dimensions the available operators depend on the - type of the dimension. Please see the reference below for the full list of - available operators. -- `values`: An array of values for the filter. Values must be of type String. If - you need to pass a date, pass it as a string in `YYYY-MM-DD` format. - -### <--{"id" : "Filters Format"}--> Filtering Dimensions vs Filtering Measures - -Filters are applied differently to dimensions and measures. - -When you filter on a dimension, you are restricting the raw data before any -calculations are made. When you filter on a measure, you are restricting the -results after the measure has been calculated. - -## Filters Operators - -Only some operators are available for measures. For dimensions, the available -operators depend on the -[type of the dimension](/schema/reference/types-and-formats#types). - -### <--{"id" : "Filters Operators"}--> equals - -Use it when you need an exact match. It supports multiple values. - -- Applied to measures. -- Dimension types: `string`, `number`, `time`. - -```json -{ - "member": "users.country", - "operator": "equals", - "values": ["US", "Germany", "Israel"] -} -``` - -### <--{"id" : "Filters Operators"}--> notEquals - -The opposite operator of `equals`. It supports multiple values. - -- Applied to measures. -- Dimension types: `string`, `number`, `time`. - -```json -{ - "member": "users.country", - "operator": "notEquals", - "values": ["France"] -} -``` - -### <--{"id" : "Filters Operators"}--> contains - -The `contains` filter acts as a wildcard case-insensitive `LIKE` operator. In -the majority of SQL backends it uses `ILIKE` operator with values being -surrounded by `%`. It supports multiple values. - -- Dimension types: `string`. - -```json -{ - "member": "posts.title", - "operator": "contains", - "values": ["serverless", "aws"] -} -``` - -### <--{"id" : "Filters Operators"}--> notContains - -The opposite operator of `contains`. It supports multiple values. - -- Dimension types: `string`. - -```json -{ - "member": "posts.title", - "operator": "notContains", - "values": ["ruby"] -} -``` - -This operator adds `IS NULL` check to include `NULL` values unless you add -`null` to `values`. For example: - -```json -{ - "member": "posts.title", - "operator": "notContains", - "values": ["ruby", null] -} -``` - -### <--{"id" : "Filters Operators"}--> startsWith - -The `startsWith` filter acts as a case-insensitive `LIKE` operator with a -wildcard at the beginning. In the majority of SQL backends, it uses the `ILIKE` -operator with `%` at the start of each value. It supports multiple values. - -- Dimension types: `string`. - -```json -{ - "member": "posts.title", - "operator": "startsWith", - "values": ["ruby"] -} -``` - -### <--{"id" : "Filters Operators"}--> endsWith - -The `endsWith` filter acts as a case-insensitive `LIKE` operator with a wildcard -at the end. In the majority of SQL backends, it uses the `ILIKE` operator with -`%` at the end of each value. It supports multiple values. - -- Dimension types: `string`. - -```json -{ - "member": "posts.title", - "operator": "endsWith", - "values": ["ruby"] -} -``` - -### <--{"id" : "Filters Operators"}--> gt - -The `gt` operator means **greater than** and is used with measures or dimensions -of type `number`. - -- Applied to measures. -- Dimension types: `number`. - -```json -{ - "member": "posts.upvotes_count", - "operator": "gt", - "values": ["100"] -} -``` - -### <--{"id" : "Filters Operators"}--> gte - -The `gte` operator means **greater than or equal to** and is used with measures -or dimensions of type `number`. - -- Applied to measures. -- Dimension types: `number`. - -```json -{ - "member": "posts.upvotes_count", - "operator": "gte", - "values": ["100"] -} -``` - -### <--{"id" : "Filters Operators"}--> lt - -The `lt` operator means **less than** and is used with measures or dimensions of -type `number`. - -- Applied to measures. -- Dimension types: `number`. - -```json -{ - "member": "posts.upvotes_count", - "operator": "lt", - "values": ["10"] -} -``` - -### <--{"id" : "Filters Operators"}--> lte - -The `lte` operator means **less than or equal to** and is used with measures or -dimensions of type `number`. - -- Applied to measures. -- Dimension types: `number`. - -```json -{ - "member": "posts.upvotes_count", - "operator": "lte", - "values": ["10"] -} -``` - -### <--{"id" : "Filters Operators"}--> set - -Operator `set` checks whether the value of the member **is not** `NULL`. You -don't need to pass `values` for this operator. - -- Applied to measures. -- Dimension types: `number`, `string`, `time`. - -```json -{ - "member": "posts.author_name", - "operator": "set" -} -``` - -### <--{"id" : "Filters Operators"}--> notSet - -An opposite to the `set` operator. It checks whether the value of the member -**is** `NULL`. You don't need to pass `values` for this operator. - -- Applied to measures. -- Dimension types: `number`, `string`, `time`. - -```json -{ - "member": "posts.author_name", - "operator": "notSet" -} -``` - -### <--{"id" : "Filters Operators"}--> inDateRange - - - -From a pre-aggregation standpoint, `inDateRange` filter is applied as a generic -filter. All pre-aggregation granularity matching rules aren't applied in this -case. It feels like pre-aggregation isn't matched. However, pre-aggregation is -just missing the filtered time dimension in -[dimensions][ref-schema-ref-preaggs-dimensions] list. If you want date range -filter to match [timeDimension][ref-schema-ref-preaggs-time-dimension] please -use [timeDimensions](#time-dimensions-format) `dateRange` instead. - - - -The operator `inDateRange` is used to filter a time dimension into a specific -date range. The values must be an array of dates with the following format -'YYYY-MM-DD'. If only one date specified the filter would be set exactly to this -date. - -There is a convenient way to use date filters with grouping - -[learn more about the `timeDimensions` property here](#time-dimensions-format) - -- Dimension types: `time`. - -```json -{ - "member": "posts.time", - "operator": "inDateRange", - "values": ["2015-01-01", "2015-12-31"] -} -``` - -### <--{"id" : "Filters Operators"}--> notInDateRange - - - -From a pre-aggregation standpoint, `notInDateRange` filter is applied as a -generic filter. All pre-aggregation granularity matching rules aren't applied in -this case. It feels like pre-aggregation isn't matched. However, pre-aggregation -is just missing the filtered time dimension in -[dimensions][ref-schema-ref-preaggs-dimensions] list. - - - -An opposite operator to `inDateRange`, use it when you want to exclude specific -dates. The values format is the same as for `inDateRange`. - -- Dimension types: `time`. - -```json -{ - "member": "posts.time", - "operator": "notInDateRange", - "values": ["2015-01-01", "2015-12-31"] -} -``` - -### <--{"id" : "Filters Operators"}--> beforeDate - - - -From a pre-aggregation standpoint, `beforeDate` filter is applied as a generic -filter. All pre-aggregation granularity matching rules aren't applied in this -case. It feels like pre-aggregation isn't matched. However, pre-aggregation is -just missing the filtered time dimension in -[dimensions][ref-schema-ref-preaggs-dimensions] list. - - - -Use it when you want to retrieve all results before some specific date. The -values should be an array of one element in `YYYY-MM-DD` format. - -- Dimension types: `time`. - -```json -{ - "member": "posts.time", - "operator": "beforeDate", - "values": ["2015-01-01"] -} -``` - -### <--{"id" : "Filters Operators"}--> afterDate - - - -From a pre-aggregation standpoint, `afterDate` filter is applied as a generic -filter. All pre-aggregation granularity matching rules aren't applied in this -case. It feels like pre-aggregation isn't matched. However, pre-aggregation is -just missing the filtered time dimension in -[dimensions][ref-schema-ref-preaggs-dimensions] list. - - - -The same as `beforeDate`, but is used to get all results after a specific date. - -- Dimension types: `time`. - -```json -{ - "member": "posts.time", - "operator": "afterDate", - "values": ["2015-01-01"] -} -``` - -### <--{"id" : "Filters Operators"}--> measureFilter - -The `measureFilter` operator is used to apply an existing measure's filters to -the current query. - -This usually happens when you call -[`ResultSet.drilldown()`][ref-client-core-resultset-drilldown], which will -return a query for the drill members. If the original query has a filter on a -measure, that filter will be added as otherwise the drilldown query will lose -that context. Not supported by pre-aggregations. - -- Applied to measures. - -```json -{ - "member": "Orders.count", - "operator": "measureFilter" -} -``` - -## Boolean logical operators - -Filters can contain `or` and `and` logical operators. Logical operators have -only one of the following properties: - -- `or` An array with one or more filters or other logical operators -- `and` An array with one or more filters or other logical operators - -```json -{ - "or": [ - { - "member": "visitors.source", - "operator": "equals", - "values": ["some"] - }, - { - "and": [ - { - "member": "visitors.source", - "operator": "equals", - "values": ["other"] - }, - { - "member": "visitor_checkins.cards_count", - "operator": "equals", - "values": ["0"] - } - ] - } - ] -} -``` - -> **Note:** You can not put dimensions and measures filters in the same logical -> operator. - -## Time Dimensions Format - -Since grouping and filtering by a time dimension is quite a common case, Cube -provides a convenient shortcut to pass a dimension and a filter as a -`timeDimension` property. - -- `dimension`: Time dimension name. -- `dateRange`: An array of dates with the following format `YYYY-MM-DD` or in - `YYYY-MM-DDTHH:mm:ss.SSS` format. Values should always be local and in query - `timezone`. Dates in `YYYY-MM-DD` format are also accepted. Such dates are - padded to the start and end of the day if used in start and end of date range - interval accordingly. Please note that for timestamp comparison, `>=` and `<=` - operators are used. It requires, for example, that the end date range date - `2020-01-01` is padded to `2020-01-01T23:59:59.999`. If only one date is - specified it's equivalent to passing two of the same dates as a date range. - You can also pass a string with a [relative date - range][ref-relative-date-range], for example, `last quarter`. -- `compareDateRange`: An array of date ranges to compare a measure change over - previous period -- `granularity`: A granularity for a time dimension. It supports the following - values `second`, `minute`, `hour`, `day`, `week`, `month`, `quarter`, `year`. - If you pass `null` to the granularity, Cube will only perform filtering by a - specified time dimension, without grouping. - -```json -{ - "measures": ["stories.count"], - "timeDimensions": [ - { - "dimension": "stories.time", - "dateRange": ["2015-01-01", "2015-12-31"], - "granularity": "month" - } - ] -} -``` - -You can use compare date range queries when you want to see, for example, how a -metric performed over a period in the past and how it performs now. You can pass -two or more date ranges where each of them is in the same format as a -`dateRange` - -```javascript -// ... -const resultSet = await cubejsApi.load({ - measures: ["stories.count"], - timeDimensions: [ - { - dimension: "stories.time", - compareDateRange: ["this week", ["2020-05-21", "2020-05-28"]], - granularity: "month", - }, - ], -}); -``` - -### <--{"id" : "Time Dimension Format"}--> Relative date range - -You can also use a string with a relative date range in the `dateRange` -property, for example: - -```json -{ - "measures": ["stories.count"], - "timeDimensions": [ - { - "dimension": "stories.time", - "dateRange": "last week", - "granularity": "day" - } - ] -} -``` - -Some of supported formats: - -- `today`, `yesterday`, or `tomorrow` -- `last year`, `last quarter`, or `last 360 days` -- `next month` or `last 6 months` (current date not included) -- `from 7 days ago to now` or `from now to 2 weeks from now` (current date - included) - - - -Cube uses the [Chrono][chrono-website] library to parse relative dates. Please -refer to its documentation for more examples. - - - -[ref-recipe-pagination]: /recipes/pagination -[ref-client-core-resultset-drilldown]: - /@cubejs-client-core#result-set-drill-down -[ref-schema-ref-preaggs-refreshkey]: - /schema/reference/pre-aggregations#parameters-refresh-key -[ref-schema-ref-preaggs-refreshkey-every]: - /schema/reference/pre-aggregations#parameters-refresh-key-every -[ref-schema-ref-preaggs-dimensions]: - /schema/reference/pre-aggregations#parameters-dimensions -[ref-schema-ref-preaggs-time-dimension]: - /schema/reference/pre-aggregations#parameters-time-dimension -[ref-relative-date-range]: #relative-date-range -[chrono-website]: https://github.com/wanasit/chrono diff --git a/docs/content/APIs-Integrations/REST-API/Real-Time-Data-Fetch.mdx b/docs/content/APIs-Integrations/REST-API/Real-Time-Data-Fetch.mdx deleted file mode 100644 index e0ffbf198057b..0000000000000 --- a/docs/content/APIs-Integrations/REST-API/Real-Time-Data-Fetch.mdx +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: Real-time data fetch -permalink: /real-time-data-fetch -category: APIs & Integrations -subCategory: REST API -menuOrder: 23 ---- - -Most of databases supported by Cube are retroactive. It means Cube should -continuously poll for changes rather than receive updates as a subscribed -listener. Cube provides a convenient way to create such polling database -subscriptions on your behalf. - -## Web Sockets - -To provide the best real-time experience it's recommended to use Web Sockets -transport instead of the default http long polling. Web sockets on backend can -be enabled using `CUBEJS_WEB_SOCKETS` environment variable: - -**.env:** - -```dotenv -CUBEJS_WEB_SOCKETS=true -``` - -Clients can be switched to Web Sockets by passing `WebSocketTransport` to -`CubejsApi` constructor: - -```javascript -import cubejs from '@cubejs-client/core'; -import WebSocketTransport from '@cubejs-client/ws-transport'; - -const cubejsApi = cubejs({ - transport: new WebSocketTransport({ - authorization: CUBEJS_TOKEN, - apiUrl: 'ws://localhost:4000/', - }), -}); -``` - -## Client Subscriptions - -Multiple APIs are provided to support data subscription scenarios. - -### <--{"id" : "Client Subscriptions"}--> Vanilla JavaScript - -```javascript -import cubejs from '@cubejs-client/core'; -import WebSocketTransport from '@cubejs-client/ws-transport'; - -const cubejsApi = cubejs({ - transport: new WebSocketTransport({ - authorization: CUBEJS_TOKEN, - apiUrl: 'ws://localhost:4000/', - }), -}); - -// Create a subscription -const subscription = cubejsApi.subscribe( - { - measures: ['logs.count'], - timeDimensions: [ - { - dimension: 'logs.time', - granularity: 'hour', - dateRange: 'last 1440 minutes', - }, - ], - }, - options, - (error, resultSet) => { - if (!error) { - // handle the update - } - } -); - -// Later on, unsubscribe from subscription -subscription.unsubscribe(); -``` - -### <--{"id" : "Client Subscriptions"}--> React hooks - -```javascript -import { useCubeQuery } from '@cubejs-client/react'; - -const Chart = ({ query }) => { - const { resultSet, error, isLoading } = useCubeQuery(query, { - // The component will automatically unsubscribe when unmounted - subscribe: true, - }); - - if (isLoading) { - return
Loading...
; - } - - if (error) { - return
{error.toString()}
; - } - - if (!resultSet) { - return null; - } - - return ; -}; -``` - -## Refresh Rate - -As in the case of a regular data fetch, real-time data fetch obeys -[`refresh_key` refresh rules](caching#refresh-keys). In order to provide a desired -refresh rate, `refresh_key` should reflect the rate of change of the underlying -data set; the querying time should also be much less than the desired refresh -rate. Please use the [`every`](/schema/reference/cube#refresh-key) -parameter to adjust the refresh interval. diff --git a/docs/content/APIs-Integrations/SQL-API/Authentication-and-Authorization.mdx b/docs/content/APIs-Integrations/SQL-API/Authentication-and-Authorization.mdx deleted file mode 100644 index 6362829731128..0000000000000 --- a/docs/content/APIs-Integrations/SQL-API/Authentication-and-Authorization.mdx +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: Authentication & Authorization -category: APIs & Integrations -subCategory: SQL API -permalink: /backend/sql/security -menuOrder: 12 ---- - -Cube can be configured with dynamic username & password verification system by -setting a [`checkSqlAuth()`][ref-config-check-sql-auth] function in the -`cube.js` configuration file. This function should verify username and return -object with password and security context. - -If password returned from this function matches provided in connection string -user will be authenticated with provided security context. - -```javascript -module.exports = { - checkSqlAuth: async (req, username) => { - if (username === "fooUser") { - return { - password: "mypassword", - securityContext: {}, - }; - } - - throw new Error("Incorrect user name or password"); - }, -}; -``` - -As Cube expects passwords to be provided by `checkSqlAuth` implementation best -practice is to use a generated password here instead of an actual user password. -Generated passwords can be implemented as an HMAC of the user name or requested -from some service that provides the mapping of the user name to passwords for -additional security. - -## Security Context (Row-Level Security) - -Cube's SQL API can also use the Security Context for [Dynamic data model -creation][ref-dynamic-schemas] or [`queryRewrite`][ref-config-queryrewrite] -property in your [`cube.js` configuration file][ref-config-js]. - -By default, the SQL API uses the current user's Security Context, but this -behaviour can be modified so that certain users are allowed to switch. To do -this, we must first define which user is allowed to change Security Context: - -First, you need to define what user is allowed to change security context: - -```dotenv -CUBEJS_SQL_SUPER_USER=admin -``` - -If it's not enough for your case, you define your logic for check with -`canSwitchSqlUser` property in your [`cube.js` configuration -file][ref-config-js]. - -You can change security context for specific query via virtual filter on: - -```sql -SELECT * FROM orders WHERE __user = 'anotheruser'; -``` - -[ref-config-check-sql-auth]: /config#check-sql-auth -[ref-config-queryrewrite]: /config#query-rewrite -[ref-dynamic-schemas]: /schema/dynamic-schema-creation -[ref-config-js]: /config diff --git a/docs/content/APIs-Integrations/SQL-API/Joins.mdx b/docs/content/APIs-Integrations/SQL-API/Joins.mdx deleted file mode 100644 index 4f7edb1248949..0000000000000 --- a/docs/content/APIs-Integrations/SQL-API/Joins.mdx +++ /dev/null @@ -1,183 +0,0 @@ ---- -title: Joins -category: APIs & Integrations -subCategory: SQL API -permalink: /backend/sql/reference/joins -menuOrder: 13 ---- - -The SQL API supports joins through `__cubeJoinField` virtual column, which is -available in every cube table. Join can also be done through `CROSS JOIN`. Usage -of `__cubeJoinField` in a join instructs Cube to perform join as it's defined in -a data model. Cube generates the correct joining conditions for the underlying -data source. - -For example, the following query joins the `orders` and `products` tables under -the hood with `orders.product_id = products.id`, exactly the same way as the -REST API query does: - -```sql -cube=> SELECT p.name, SUM(o.count) FROM orders o LEFT JOIN products p ON o.__cubeJoinField = p.__cubeJoinField GROUP BY 1 LIMIT 5; - name | SUM(o.count) ---------------------------+-------------- - Tasty Plastic Mouse | 121 - Intelligent Cotton Ball | 119 - Ergonomic Steel Tuna | 116 - Intelligent Rubber Pants | 116 - Generic Wooden Gloves | 116 -(5 rows) -``` - -Or through `CROSS JOIN`: - -```sql -cube=> SELECT p.name, sum(o.count) FROM orders o CROSS JOIN products p GROUP BY 1 LIMIT 5; - name | SUM(o.count) ---------------------------+-------------- - Tasty Plastic Mouse | 121 - Intelligent Cotton Ball | 119 - Ergonomic Steel Tuna | 116 - Intelligent Rubber Pants | 116 - Generic Wooden Gloves | 116 -(5 rows) -``` - -In the resulting query plan, you won't see any joins as you can't see those for -REST API queries either: - -```sql -cube=> EXPLAIN SELECT p.name, sum(o.count) FROM orders o LEFT JOIN products p ON o.__cubeJoinField = p.__cubeJoinField GROUP BY 1 LIMIT 5; - plan_type | plan ----------------+----------------------- - logical_plan | CubeScan: request={ + - | "measures": [ + - | "orders.count" + - | ], + - | "dimensions": [ + - | "products.name" + - | ], + - | "segments": [], + - | "limit": 5 + - | } - physical_plan | CubeScanExecutionPlan+ - | -(2 rows) -``` - -This feature allows you to join cubes even joined transitively only. - -In most of the BI tools you'd use `__cubeJoinField` to define joins between cube -tables. In tools that allow defining custom SQL datasets, you can use joined -tables as a dataset SQL. For example: - -```sql -SELECT o.count as count, p.name as product_name, p.description as product_description -FROM orders o -CROSS JOIN products p; -``` - -Please note we use aliasing to avoid name clashing between cube members in a -resulting data set. In this case, wrapped SQL will be properly processed by -Cube, pushing down all operations to Cube query: - -```sql -cube=> SELECT product_name, SUM(count) FROM ( - SELECT o.count as count, p.name as product_name, p.description as product_description - FROM orders o CROSS - JOIN products p -) joined -GROUP BY 1 -ORDER BY 2 DESC -LIMIT 5; - product_name | SUM(joined.count) ---------------------------+------------------- - Tasty Plastic Mouse | 121 - Intelligent Cotton Ball | 119 - Ergonomic Steel Tuna | 116 - Intelligent Rubber Pants | 116 - Generic Wooden Gloves | 116 -(5 rows) -``` - -We can see this by introspecting the `EXPLAIN` plan for this query: - -```sql -cube=> EXPLAIN SELECT product_name, SUM(count) FROM ( - SELECT o.count as count, p.name as product_name, p.description as product_description - FROM orders o - CROSS JOIN products p -) joined -GROUP BY 1 -ORDER BY 2 DESC -LIMIT 5; - plan_type | plan ----------------+----------------------- - logical_plan | CubeScan: request={ + - | "measures": [ + - | "orders.count" + - | ], + - | "dimensions": [ + - | "products.name" + - | ], + - | "segments": [], + - | "order": [ + - | [ + - | "orders.count",+ - | "desc" + - | ] + - | ], + - | "limit": 5 + - | } - physical_plan | CubeScanExecutionPlan+ - | -(2 rows) -``` - -Please note even if `product_description` is in the inner selection, it isn't -evaluated in the final query as it isn't used in any way. - -## Proxy Dimensions and Views - -As an alternative to achieve joins, it is also possible to define proxy -dimension or measure inside a cube or a view. This is the preferred way of -joining as it provides you control over the joining path for complex use cases. - - - -```yaml -views: - - name: orders_users - includes: - - orders - # This is a proxy dimension - - orders.users.city -``` - -```javascript -view(`orders_users`, { - includes: [ - orders, - // This is a proxy dimension - orders.users.city, - ], -}); -``` - - - -Now, it is possible to get orders count by users city with the following query. - -```sql -cube=> SELECT count, city FROM orders_users; - count | user_city --------+--------------- - 1416 | Los Angeles - 1412 | Seattle - 1365 | Mountain View - 1263 | New York - 1220 | Austin - 1164 | Chicago - 1101 | San Francisco - 1059 | Palo Alto -(8 rows) -``` diff --git a/docs/content/APIs-Integrations/SQL-API/Overview.mdx b/docs/content/APIs-Integrations/SQL-API/Overview.mdx deleted file mode 100644 index bc8806f19cf2f..0000000000000 --- a/docs/content/APIs-Integrations/SQL-API/Overview.mdx +++ /dev/null @@ -1,555 +0,0 @@ ---- -title: SQL API -category: APIs & Integrations -subCategory: SQL API -permalink: /backend/sql -menuOrder: 11 ---- - - - -Tune into [our webinar on June 21st](https://cube.dev/events/introducing-semantic-layer-sync-with-preset) -to see the easiest way to integrate semantics layers with the most popular open source BI tool, [Preset](https://preset.io). - - - -SQL API enables Cube to deliver data over the -[Postgres-compatible protocol][postgres-protocol] to certain kinds -of data applications, including but not limited to the following ones: - -* [Business intelligence (BI) and data exploration][ref-bi] tools, e.g., -[Superset][ref-superset] or [Tableau][ref-tableau] -* [Data notebooks][ref-notebooks], e.g., [Jupyter][ref-jupyter] or -[Hex][ref-hex] -* Reverse ETL tools, e.g., Census or Hightouch -* [Low-code tools][ref-low-code], e.g., [Retool][ref-retool] -* Automated jobs - -Often, the SQL API is used to enable internal or self-serve -[business intelligence][cube-issbi] use cases. - -SQL API has been tested with the following tools: - -- psql CLI -- Apache Superset -- Tableau Cloud -- Tableau Desktop with JDBC driver -- Power BI -- Metabase -- Google Data Studio -- Excel through Devart plugin -- Deepnote -- Hex -- Observable -- Streamlit -- Jupyter notebook -- Hightouch - - - -Please use [this GitHub issue](https://github.com/cube-js/cube/issues/3906) -to suggest tools of your interest and vote for already proposed ones. - - - -## Configuration - -### <--{"id" : "Configuration"}--> Cube Core - -SQL API is disabled by default. To enable the SQL API, set the -`CUBEJS_PG_SQL_PORT` to a port number you'd like to connect to with a -Postgres-compatible tool. - -Use `CUBEJS_SQL_USER` and `CUBEJS_SQL_PASSWORD` to configure credentials -required to connect to the SQL API. You can also use -[`checkSqlAuth`][ref-config-checksqlauth], [`canSwitchSqlUser`][ref-config-canswitchsqluser], -and `CUBEJS_SQL_SUPER_USER` to configure [custom authentication][ref-sql-api-auth]. - -### <--{"id" : "Configuration"}--> Cube Cloud - -SQL API is enabled by default. You can find credentials for the SQL API on -the Overview page by -clicking Connect to SQL API. - -You can also customize `CUBEJS_PG_SQL_PORT`, `CUBEJS_SQL_USER`, -and `CUBEJS_SQL_PASSWORD` environment variables by navigating -to Settings → Configration. - -## Querying Fundamentals - -Under the hood, SQL API uses -[Apache Datafusion](https://arrow.apache.org/datafusion/) as its SQL execution -engine. It's responsible for query planning and execution. As the conversion -process from SQL to Cube Query can be ambiguous, an additional step of query -rewriting is done before the query is executed. During this step, the query plan -is rewritten such that the maximum number of Cube Queries can be detected within -the given query plan. Overall, rewriting is a seamless process. There are some -practical considerations that you should keep in mind while querying, though. - -In the SQL API, each cube is represented as a table. Measures, dimensions, and -segments in this table are columns. We call these tables **cube tables**. - -Consider the `orders` cube in your data model, the following query is performing -a `SELECT` from the `orders` **cube**. - -```sql -SELECt - city, - SUM(amount) -FROM orders -WHERE status = 'shipped' -GROUP BY 1 -``` - -The SQL API transforms `SELECT` query fragments from **cube tables** into -[Cube's internal query format](/query-format). This process is called **Cube -query rewrite**. - -The SQL query above would be rewritten into the following Cube query: - -```json -{ - "measures": ["Orders.amount"], - "dimensions": ["Orders.city"], - "filters": [ - { - "member": "Orders.status", - "operator": "equals", - "values": ["shipped"] - } - ] -} -``` - -Because of this transformation, not all functions and expressions are supported -in query fragments performing `SELECT` from cube tables. Please refer to the -reference to see whether a specific expression or function is supported. For -example, the following query won't work because the SQL API can't push down the -`case` expression to Cube for processing. It is not possible to translate `case` -expression in measure. - -```sql --- This query won't work because of the Cube query rewrite -SELECT - city, - CASE - WHEN status = 'shipped' THEN 'done' - ELSE 'in-progress' - END real_status, - SUM(number) -FROM orders - CROSS JOIN Users -GROUP BY 1; -``` - -You can leverage nested queries in cases like this. You can wrap your `SELECT` -statement from a cube table into another `SELECT` statement to perform -calculations with expressions like `CASE`. This outer select is **not** part of -the SQL query that being rewritten and thus allows you to use more SQL -functions, operators and expressions. You can rewrite the above query as -follows, making sure to wrap the original `SELECT` statement: - -```sql ---- You can also use CTEs to achieve the same result -SELECT - city, - CASE - WHEN status = 'shipped' THEN 'done' - ELSE 'in-progress' - END real_status, - SUM(amount) AS total -FROM ( - SELECT - Users.city AS city, - SUM(number) AS amount, - orders.status - FROM orders - CROSS JOIN Users - GROUP BY 1, 3 -) AS inner -GROUP BY 1, 2 -ORDER BY 1; -``` - -The above query works because the `CASE` expression is supported in `SELECT` -queries **not** querying cube tables. - -When querying cube tables, it is important to understand fundamentals of Cube -query rewrite as well as the **pushdown** process. **Pushdown** is a process of -pushing the processing of a particular part of the query down to the inner -`SELECT` from the cube table. The following sections provide an overview of Cube -query rewrite and pushdown. Please refer to the reference to see whether -specific functions, operators or expressions are supported in Cube query -rewrite. - -### <--{"id" : "Querying cube tables"}--> Aggregated vs Non-aggregated queries - -There are two types of queries supported against **cube tables**: aggregated and -non-aggregated. Aggregated are those with `GROUP BY` statement, and -non-aggregated are those without. Cube queries issued to your database will -always be aggregated, and it doesn't matter if you provide `GROUP BY` in a query -or not. - -Whenever you use a non-aggregated query you need to provide only column names in -SQL: - -```sql -SELECT status, count FROM orders -``` - -The same aggregated query should always aggregate measure columns using a -corresponding aggregating function or special `MEASURE()` function: - - - -In cases where measure columns are not aggregated -`Projection references non-aggregate values` error will be thrown. It means -there are columns that are neither in `GROUP BY` or aggregated. This is a -standard SQL `GROUP BY` operation consistency check enforced by SQL API as well. - - - -```sql -SELECT status, SUM(count) FROM orders GROUP BY 1 -SELECT status, MEASURE(count) FROM orders GROUP BY 1 -``` - -### <--{"id" : "Querying cube tables"}--> Filtering - -Cube supports most simple equality operators like `=`, `<>`, `<`, `<=`, `>`, -`>=` as well as `IN` and `LIKE` operators. Cube tries to push down all filters -into Cube query. In some cases, SQL filters aren't available in Cube and can be -done in a post-processing phase. Time dimension filters will be converted to -time dimension date ranges whenever it's possible. - -### <--{"id" : "Querying cube tables"}--> Ordering - -Cube tries to push down all `ORDER BY` statements into Cube Query. - -If it can't be done ordering part would be done in a post-processing phase. In -case there are more than 50,000 rows in the result set, incorrect results can be -received in this case. Please use `EXPLAIN` in order to check if it's the case. - -Consider the following query. - -```sql -SELECT - status, - SUM(total_value) + 2 as transformed_amount -FROM ( - SELECT * FROM orders -) orders -GROUP BY status -ORDER BY status DESC -LIMIT 100 -``` - -Because of the expression `SUM(total_value) + 2` in the projection of outer -query, Cube can't push down `ORDER`. - -You can run `EXPLAIN` against the above query to look at the plan. As you can -see below, the sorting operation is done after Cube query and projection. - -```bash -+ GlobalLimitExec: skip=None, fetch=100 -+- SortExec: [transformed_amount@1 DESC] -+-- ProjectionExec: expr=[status@0 as status, SUM(orders.total_value)@1 + CAST(2 AS Float64) as transformed_amount] -+--- CubeScanExecutionPlan -``` - -Because of the default limit in Cube queries (50,000 rows), there is a -possibility of a wrong result if there are more than 50,000 rows. Given that -queries to Cube are usually aggregated, it is rare that they may return more -than 50,000 rows, but keep that limitation in mind when designing your queries. - -### <--{"id" : "Querying cube tables"}--> Limit - -Limit push down is supported by Cube however, a limit over 50,000 can't be -overridden. In future versions, paging and streaming would be used to avoid this -limitation. - -## Examples - -Consider the following data model: - - - -```yaml -cubes: - - name: orders - sql_table: orders - - measures: - - name: count - type: count - - dimensions: - - name: status - type: string - sql: status - - - name: created_at - type: time - sql: created_at -``` - -```javascript -cube(`orders`, { - sql_table: `orders`, - - measures: { - count: { - type: `count`, - }, - }, - - dimensions: { - status: { - sql: `status`, - type: `string`, - }, - - created_at: { - sql: `created_at`, - type: `time`, - }, - }, -}); -``` - - - -It would be represented as table in SQL API with `count`, `status`, `created` -columns. - -To get the count of orders grouped by status we can run the following query. - -```sql -cube=> SELECT count, status FROM orders; - count | status --------+------------ - 15513 | completed - 14652 | processing - 13829 | shipped -(3 rows) -``` - -Cube will automatically apply the `GROUP BY` clause in case it is missing in the -query. We can also provide the `GROUP BY` statement to control how results are -grouped. In the following example, we group orders by created month and also by -status within every month. - -```sql -cube=> SELECT MEASURE(count), status, DATE_TRUNC('month', created_at) date FROM orders GROUP BY date, status ORDER BY date asc; - measure(orders.count) | status | date ------------------------+------------+---------------------------- - 31 | shipped | 2016-01-01 00:00:00.000000 - 28 | completed | 2016-01-01 00:00:00.000000 - 28 | processing | 2016-01-01 00:00:00.000000 - 28 | shipped | 2016-02-01 00:00:00.000000 - 18 | processing | 2016-02-01 00:00:00.000000 - 28 | completed | 2016-02-01 00:00:00.000000 - 54 | processing | 2016-03-01 00:00:00.000000 - 57 | completed | 2016-03-01 00:00:00.000000 - 56 | shipped | 2016-03-01 00:00:00.000000 - 54 | shipped | 2016-04-01 00:00:00.000000 - 60 | completed | 2016-04-01 00:00:00.000000 - 43 | processing | 2016-04-01 00:00:00.000000 - 55 | shipped | 2016-05-01 00:00:00.000000 -``` - -### <--{"id" : "Examples"}--> Querying Dimensions - -Querying dimensions is straightforward, simply add any required fields to the -`SELECT` clause. - -```sql -cube=> SELECT status FROM orders; - status ------------- - completed - processing - shipped -(3 rows) -``` - -### <--{"id" : "Examples"}--> Querying Measures - -Measures can similarly be queried through Cube SQL. - -Because measures are already aggregated in Cube there is no need to apply -aggregate functions to them in SQL API if you don't have a `GROUP BY` statement -in query. - -```sql -cube=> SELECT count FROM orders; - count -------- - 43994 -(1 row) -``` - -Some BI systems or SQL constraints may require you to apply aggregate functions. -To support this, Cube allows aggregate functions on measures as long as they -match the type of the measure. - -`count` measure in our example is of type `count`, It means we can apply -`COUNT()` aggregate function to it. The below query is similar to the above one. - -```sql -cube=> SELECT COUNT(count) FROM orders; - COUNT(orders.count) ---------------------- - 43994 -(1 row) -``` - -There's also universal aggregate function `MEASURE()` that matches any measure -type. - -```sql -cube=> SELECT MEASURE(count) FROM orders; - measure(orders.count) ------------------------ - 43994 -(1 row) -``` - -Let's look at more measures types: - - - -```yaml -cubes: - - name: orders - # ... - - measures: - - name: count - type: count - - - name: distinct_count - type: count_distinct - sql: id - - - name: approx_distinct_count - type: count_distinct_approx - sql: id - - - name: min_value - type: min - sql: min_value - - - name: max_value - type: max - sql: max_value -``` - -```javascript -cube(`orders`, { - // ..., - - measures: { - count: { - type: `count`, - }, - distinct_count: { - sql: `id`, - type: `count_distinct`, - }, - approx_distinct_count: { - sql: `id`, - type: `count_distinct_approx`, - }, - min_value: { - sql: `min_value`, - type: `min`, - }, - max_value: { - sql: `max_value`, - type: `max`, - }, - }, -}); -``` - - - -As we can see, we have a mix of measure types in the above data model. To query -them, we could use the following SQL statements: - -```sql ---- Both the following statements are equivalent -SELECT count FROM orders -SELECT COUNT(*) FROM orders - ---- Count distinct, and count distinct approx ---- Both the following statements are equivalent -SELECT distinct_count FROM orders -SELECT COUNT(DISTINCT distinct_count) FROM orders - ---- Both the following statements are equivalent -SELECT approx_distinct_count FROM orders -SELECT COUNT(DISTINCT approx_distinct_count) FROM orders - ---- Both the following statements are equivalent -SELECT min_value FROM orders -SELECT MIN(min_value) FROM orders - ---- Both the following statements are equivalent -SELECT max_value FROM orders -SELECT MAX(max_value) FROM orders -``` - -### <--{"id" : "Examples"}--> Querying Segments - -Any segments defined in a data model can also be used in Cube SQL queries. -Looking at the data model below, we have one segment `is_completed`: - - - -```yaml -cubes: - - name: orders - # ... - segments: - - name: is_completed - sql: status = 'completed' -``` - -```javascript -cube("orders", { - // ..., - - segments: { - is_completed: { - sql: `${CUBE}.status = 'completed'`, - }, - }, -}); -``` - - - -Segments must be used as `boolean` types in Cube SQL queries: - -```sql -WHERE is_completed = true -``` - -[ref-config-js]: /config -[ref-dynamic-schemas]: /schema/dynamic-schema-creation -[ref-config-env]: /reference/environment-variables -[ref-sql-api-auth]: /backend/sql/security -[ref-config-checksqlauth]: /config#options-reference-check-sql-auth -[ref-config-canswitchsqluser]: /config#options-reference-can-switch-sql-user -[ref-bi]: /config/downstream#bi-data-exploration-tools -[ref-superset]: /config/downstream/superset -[ref-tableau]: /config/downstream/tableau -[ref-notebooks]: /config/downstream#notebooks -[ref-jupyter]: /config/downstream/jupyter -[ref-hex]: /config/downstream/hex -[ref-low-code]: /config/downstream#low-code-tools-internal-tool-builders -[ref-retool]: /config/downstream/retool -[postgres-protocol]: https://www.postgresql.org/docs/current/protocol.html -[cube-issbi]: https://cube.dev/use-cases/semantic-layer diff --git a/docs/content/APIs-Integrations/SQL-API/Template.mdx b/docs/content/APIs-Integrations/SQL-API/Template.mdx deleted file mode 100644 index bda476de4ecc4..0000000000000 --- a/docs/content/APIs-Integrations/SQL-API/Template.mdx +++ /dev/null @@ -1,64 +0,0 @@ ---- -title: SQL API Reference Templates -category: Internal ---- - -## Function/operator template - -Find and replace the following placeholders: - -- `FN_NAME` with the SQL function or operator name -- `FN_ARGS` with the function arguments -- `FN_DESCRIPTION` with the function or operator description -- `SELECT_EXAMPLE_OUTPUT` with the output of a selection SQL example -- `PROJECT_EXAMPLE_OUTPUT` with the output of a projection SQL example -- `OUTER_QUERY_EXAMPLE_OUTPUT` with the output of an outer query SQL example - -Complete the following tasks: - -- Test the function/operator as per the Cube Query Rewrite support table -- Save the outputs and use them as examples, remove unsupported examples if - necessary - - Update the Cube Query Rewrite support table - -### <--{"id" : "String Functions and Operators"}--> FN_NAME - - - -Within a Cube Query Rewrite, this function/operator may behave differently. Use -the table below for support: - -| | Selections | Projections | Outer Query | -| --------- | ---------- | ----------- | ---------------------------------------- | -| `FN_NAME` | ❓ | ❓ | [✅][ref-backend-sql-query-fundamentals] | - - - -FN_DESCRIPTION - -```sql -FN_NAME(FN_ARGS) -``` - -The example below uses `FN_NAME` in a selection within a Cube query rewrite: - -```sql -SELECT COUNT(*) FROM Orders WHERE FN_NAME(Orders.status) = 'completed'; -SELECT_EXAMPLE_OUTPUT -``` - -The example below uses `FN_NAME` in a projection within a Cube query rewrite: - -```sql -SELECT FN_NAME(Orders.status) FROM Orders LIMIT 1; -PROJECT_EXAMPLE_OUTPUT -``` - -The example below uses `FN_NAME` in post-processing with an outer query: - -```sql -SELECT FN_NAME(Orders.status) -FROM (SELECT * FROM Orders) orders -LIMIT 1; -OUTER_QUERY_EXAMPLE_OUTPUT -``` diff --git a/docs/content/Auth/Overview.mdx b/docs/content/Auth/Overview.mdx deleted file mode 100644 index e1b2c40d3b65d..0000000000000 --- a/docs/content/Auth/Overview.mdx +++ /dev/null @@ -1,270 +0,0 @@ ---- -title: Security Overview -menuTitle: Overview -permalink: /security -category: Authentication & Authorization -menuOrder: 1 ---- - -In Cube, authorization (or access control) is based on the **security context**. -The diagram below shows how it works during the request processing in Cube: - -
- -
- -Authentication is handled outside of Cube. A typical use case would be: - -1. A web server serves an HTML page containing the Cube client, which needs to - communicate securely with the Cube API. -2. The web server should generate a JWT with an expiry to achieve this. The - server could include the token in the HTML it serves or provide the token to - the frontend via an XHR request, which is then stored it in local storage or - a cookie. -3. The JavaScript client is initialized using this token, and includes it in - calls to the Cube API. -4. The token is received by Cube, and verified using any available JWKS (if - configured) -5. Once decoded, the token claims are injected into the [security - context][ref-sec-ctx]. - - - -**In development mode, the token is not required for authorization**, but you -can still use it to [pass a security context][ref-sec-ctx]. - - - -## Generating JSON Web Tokens (JWT) - -Authentication tokens are generated based on your API secret. Cube CLI generates -an API Secret when a project is scaffolded and saves this value in the `.env` -file as `CUBEJS_API_SECRET`. - -You can generate two types of tokens: - -- Without security context, which will mean that all users will have the same - data access permissions. -- With security context, which will allow you to implement role-based security - models where users will have different levels of access to data. - - - -It is considered best practice to use an `exp` expiration claim to limit the -lifetime of your public tokens. [Learn more in the JWT docs][link-jwt-docs]. - - - -You can find a library to generate JWTs for your programming language -[here][link-jwt-libs]. - -In Node.js, the following code shows how to generate a token which will expire -in 30 days. We recommend using the `jsonwebtoken` package for this. - -```javascript -const jwt = require('jsonwebtoken'); -const CUBE_API_SECRET = 'secret'; - -const cubejsToken = jwt.sign({}, CUBE_API_SECRET, { expiresIn: '30d' }); -``` - -Then, in a web server or cloud function, create a route which generates and -returns a token. In general, you will want to protect the URL that generates -your token using your own user authentication and authorization: - -```javascript -app.use((req, res, next) => { - if (!req.user) { - res.redirect('/login'); - return; - } - next(); -}); - -app.get('/auth/cubejs-token', (req, res) => { - res.json({ - // Take note: cubejs expects the JWT payload to contain an object! - token: jwt.sign(req.user, process.env.CUBEJS_API_SECRET, { - expiresIn: '1d', - }), - }); -}); -``` - -Then, on the client side, (assuming the user is signed in), fetch a token from -the web server: - -```javascript -let apiTokenPromise; - -const cubejsApi = cubejs( - () => { - if (!apiTokenPromise) { - apiTokenPromise = fetch(`${API_URL}/auth/cubejs-token`) - .then((res) => res.json()) - .then((r) => r.token); - } - return apiTokenPromise; - }, - { - apiUrl: `${API_URL}/cubejs-api/v1`, - } -); -``` - -You can optionally store this token in local storage or in a cookie, so that you -can then use it to query the Cube API. - -## Using JSON Web Key Sets (JWKS) - - - -Looking for a guide on how to connect a specific identity provider? Check out -our recipes for using [Auth0][ref-recipe-auth0] or [AWS Cognito][ref-recipe-cognito] with Cube. - - - -### <--{"id" : "Using JSON Web Key Sets (JWKS)"}--> Configuration - -As mentioned previously, Cube supports verifying JWTs using industry-standard -JWKS. The JWKS can be provided either from a URL, or as a JSON object conforming -to [JWK specification RFC 7517 Section 4][link-jwk-ref], encoded as a string. - -#### Using a key as a JSON string - -Add the following to your `cube.js` configuration file: - -```javascript -module.exports = { - jwt: { - key: '', - }, -}; -``` - -Or configure the same using environment variables: - -```dotenv -CUBEJS_JWT_KEY='' -``` - -#### Using a key from a URL - - - -When using a URL to fetch the JWKS, Cube will automatically cache the response, -re-use it and update if a key rotation has occurred. - - - -Add the following to your `cube.js` configuration file: - -```javascript -module.exports = { - jwt: { - jwkUrl: '', - }, -}; -``` - -Or configure the same using environment variables: - -```dotenv -CUBEJS_JWK_URL='' -``` - -### <--{"id" : "Using JSON Web Key Sets (JWKS)"}--> Verifying claims - -Cube can also verify the audience, subject and issuer claims in JWTs. Similarly -to JWK configuration, these can also be configured in the `cube.js` -configuration file: - -```javascript -module.exports = { - jwt: { - audience: '', - issuer: [''], - subject: '', - }, -}; -``` - -Using environment variables: - -```dotenv -CUBEJS_JWT_AUDIENCE='' -CUBEJS_JWT_ISSUER='' -CUBEJS_JWT_SUBJECT='' -``` - -### <--{"id" : "Using JSON Web Key Sets (JWKS)"}--> Custom claims namespace - -Cube can also extract claims defined in custom namespaces. Simply specify the -namespace in your `cube.js` configuration file: - -```javascript -module.exports = { - jwt: { - claimsNamespace: 'my-custom-namespace', - }, -}; -``` - -### <--{"id" : "Using JSON Web Key Sets (JWKS)"}--> Caching - -Cube caches JWKS by default when -[`CUBEJS_JWK_URL` or `jwt.jwkUrl` is specified](#configuration). - -- If the response contains a `Cache-Control` header, then Cube uses it to - determine cache expiry. -- The keys inside the JWKS are checked for expiry values and used for cache - expiry. -- If an inbound request supplies a JWT referencing a key not found in the cache, - the cache is refreshed. - -## Custom authentication - -Cube also allows you to provide your own JWT verification logic by setting a -[`checkAuth()`][ref-config-check-auth] function in the `cube.js` configuration -file. This function is expected to verify a JWT and assigns its' claims to the -security context. - - - -Previous versions of Cube allowed setting a `checkAuthMiddleware()` parameter, -which is now deprecated. We advise [migrating to a newer version of -Cube][ref-config-migrate-cubejs]. - - - -As an example, if you needed to retrieve user information from an LDAP server, -you might do the following: - -```javascript -module.exports = { - checkAuth: async (req, auth) => { - try { - const userInfo = await getUserFromLDAP(req.get('X-LDAP-User-ID')); - req.securityContext = userInfo; - } catch { - throw new Error('Could not authenticate user from LDAP'); - } - }, -}; -``` - -[link-jwt-docs]: - https://github.com/auth0/node-jsonwebtoken#token-expiration-exp-claim -[link-jwt-libs]: https://jwt.io/#libraries-io -[link-jwk-ref]: https://tools.ietf.org/html/rfc7517#section-4 -[ref-config-check-auth]: /config#check-auth -[ref-config-migrate-cubejs]: - /configuration/overview#migration-from-express-to-docker-template -[ref-recipe-auth0]: /recipes/authn-with-auth0 -[ref-recipe-cognito]: /recipes/authn-with-aws-cognito -[ref-sec-ctx]: /security/context -[link-slack]: https://slack.cube.dev/ diff --git a/docs/content/Auth/Security-Context.mdx b/docs/content/Auth/Security-Context.mdx deleted file mode 100644 index eafd54a9ea9f2..0000000000000 --- a/docs/content/Auth/Security-Context.mdx +++ /dev/null @@ -1,206 +0,0 @@ ---- -title: Security context -permalink: /security/context -category: Authentication & Authorization -menuOrder: 2 ---- - -Your authentication server issues JWTs to your client application, which, when -sent as part of the request, are verified and decoded by Cube to get security -context claims to evaluate access control rules. Inbound JWTs are decoded and -verified using industry-standard [JSON Web Key Sets (JWKS)][link-auth0-jwks]. - -For access control or authorization, Cube allows you to define granular access -control rules for every cube in your data model. Cube uses both the request and -security context claims in the JWT token to generate a SQL query, which includes -row-level constraints from the access control rules. - -JWTs sent to Cube should be passed in the `Authorization: ` header to -authenticate requests. - -JWTs can also be used to pass additional information about the user, known as a -**security context**. A security context is a verified set of claims about the -current user that the Cube server can use to ensure that users only have access -to the data that they are authorized to access. - -It will be accessible as the [`securityContext`][ref-config-sec-ctx] property -inside: - -- The [`queryRewrite`][ref-config-queryrewrite] configuration option in your - Cube configuration file. -- the [`COMPILE_CONTEXT`][ref-cubes-compile-ctx] global, which is used to - support [multi-tenant deployments][link-multitenancy]. - -## Using queryRewrite - -You can use [`queryRewrite`][ref-config-queryrewrite] to amend incoming queries -with filters. For example, let's take the following query: - -```json -{ - "dimensions": ["orders.status"], - "measures": ["orders.count", "orders.total"], - "timeDimensions": [ - { - "dimension": "orders.createdAt", - "dateRange": ["2015-01-01", "2015-12-31"], - "granularity": "month" - } - ] -} -``` - -We'll also use the following as a JWT payload; `user_id`, `sub` and `iat` will -be injected into the security context: - -```json -{ - "sub": "1234567890", - "iat": 1516239022, - "user_id": 131 -} -``` - - - -Cube expects the context to be an object. If you don't provide an object as the -JWT payload, you will receive the following error: - -```bash -Cannot create proxy with a non-object as target or handler -``` - - - -To ensure that users making this query only receive their own orders, define -`queryRewrite` in the `cube.js` configuration file: - -```javascript -module.exports = { - queryRewrite: (query, { securityContext }) => { - // Ensure `securityContext` has an `id` property - if (!securityContext.user_id) { - throw new Error('No id found in Security Context!'); - } - - query.filters.push({ - member: 'orders.user_id', - operator: 'equals', - values: [securityContext.user_id], - }); - - return query; - }, -}; -``` - -To test this, we can generate an API token as follows: - -```javascript -const jwt = require('jsonwebtoken'); -const CUBE_API_SECRET = 'secret'; - -const cubejsToken = jwt.sign({ user_id: 42 }, CUBEJS_API_SECRET, { - expiresIn: '30d', -}); -``` - -Using this token, we authorize our request to the Cube API by passing it in the -Authorization HTTP header. - -```bash{outputLines: 2-5} -curl \ - -H "Authorization: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1Ijp7ImlkIjo0Mn0sImlhdCI6MTU1NjAyNTM1MiwiZXhwIjoxNTU4NjE3MzUyfQ._8QBL6nip6SkIrFzZzGq2nSF8URhl5BSSSGZYp7IJZ4" \ - -G \ - --data-urlencode 'query={"measures":["orders.count"]}' \ - http://localhost:4000/cubejs-api/v1/load -``` - -And Cube will generate the following SQL: - -```sql -SELECT - count(*) "orders.count" - FROM ( - SELECT * FROM public.orders WHERE user_id = 42 - ) AS orders -LIMIT 10000 -``` - -## Using COMPILE_CONTEXT - -In the example below `user_id`, `company_id`, `sub` and `iat` will be injected -into the security context and will be accessible in both the [Security -Context][ref-schema-sec-ctx] and [`COMPILE_CONTEXT`][ref-cubes-compile-ctx] -global variable in the Cube data model. - - - -`COMPILE_CONTEXT` is used by Cube at data model compilation time, which allows -changing the underlying dataset completely; the Security Context is only used at -query execution time, which simply filters the dataset with a `WHERE` clause. - - - -```json -{ - "sub": "1234567890", - "iat": 1516239022, - "user_id": 131, - "company_id": 500 -} -``` - -With the same JWT payload as before, we can modify models before they are -compiled. The following cube will ensure users only see results for their -`company_id` in a multi-tenant deployment: - - - -```yaml -cubes: - - name: orders - sql_table: "{COMPILE_CONTEXT.security_context.company_id}.orders" - - measures: - - name: count - type: count -``` - -```javascript -cube(`orders`, { - sql_table: `${COMPILE_CONTEXT.security_context.company_id}.orders`, - - measures: { - count: { - type: `count` - } - } -}) -``` - - - -### <--{"id" : "Using COMPILE_CONTEXT"}--> Usage with pre-aggregations - -To generate pre-aggregations that rely on `COMPILE_CONTEXT`, [configure -`scheduledRefreshContexts` in your `cube.js` configuration -file][ref-config-sched-refresh]. - -## Testing during development - -During development, it is often useful to be able to edit the security context -to test access control rules. The [Developer -Playground][ref-devtools-playground] allows you to set your own JWTs, or you can -build one from a JSON object. - -[link-auth0-jwks]: - https://auth0.com/docs/tokens/json-web-tokens/json-web-key-sets -[link-multitenancy]: /config/multitenancy -[ref-config-queryrewrite]: /config#query-rewrite -[ref-config-sched-refresh]: /config#scheduled-refresh-contexts -[ref-config-sec-ctx]: /config#security-context -[ref-schema-sec-ctx]: /schema/reference/cube#security-context -[ref-cubes-compile-ctx]: https://cube.dev/docs/cube#compile-context -[ref-devtools-playground]: - /dev-tools/dev-playground#editing-the-security-context diff --git a/docs/content/Caching/Getting-Started-Pre-Aggregations.mdx b/docs/content/Caching/Getting-Started-Pre-Aggregations.mdx deleted file mode 100644 index 2b77155084010..0000000000000 --- a/docs/content/Caching/Getting-Started-Pre-Aggregations.mdx +++ /dev/null @@ -1,711 +0,0 @@ ---- -title: Getting started with pre-aggregations -permalink: /caching/pre-aggregations/getting-started -category: Caching -menuOrder: 2 ---- - -Often at the beginning of an analytical application's lifecycle - when there is -a smaller dataset that queries execute over - the application works well and -delivers responses within acceptable thresholds. However, as the size of the -dataset grows, the time-to-response from a user's perspective can often suffer -quite heavily. This is true of both application and purpose-built data -warehousing solutions. - -This leaves us with a chicken-and-egg problem; application databases can deliver -low-latency responses with small-to-large datasets, but struggle with massive -analytical datasets; data warehousing solutions _usually_ make no guarantees -except to deliver a response, which means latency can vary wildly on a -query-to-query basis. - -| Database Type | Low Latency? | Massive Datasets? | -| ------------------------------ | ------------ | ----------------- | -| Application (Postgres/MySQL) | ✅ | ❌ | -| Analytical (BigQuery/Redshift) | ❌ | ✅ | - -Cube provides a solution to this problem: pre-aggregations. In layman's terms, a -pre-aggregation is a condensed version of the source data. It specifies -attributes from the source, which Cube uses to condense (or crunch) the data. -This simple yet powerful optimization can reduce the size of the dataset by -several orders of magnitude, and ensures subsequent queries can be served by the -same condensed dataset if any matching attributes are found. - -[Pre-aggregations are defined within each cube's data -schema][ref-schema-preaggs], and cubes can have as many pre-aggregations as they -require. The pre-aggregated data [can be stored either alongside the source data -in the same database, in an external database][ref-caching-preaggs-storage] that -is supported by Cube, [or in Cube Store, a dedicated pre-aggregation storage -layer][ref-caching-preaggs-cubestore]. - -## Pre-Aggregations without Time Dimension - -To illustrate pre-aggregations with an example, let's use a sample e-commerce -database. We have a data model representing all our `orders`: - - - -```yaml -cubes: - - name: orders - sql_table: orders - - measures: - - name: count - type: count - - dimensions: - - name: id - sql: id - type: number - primary_key: true - - - name: status - sql: status - type: string - - - name: completed_at - sql: completed_at - type: time -``` - -```javascript -cube(`orders`, { - sql_table: `orders`, - - measures: { - count: { - type: `count`, - }, - }, - - dimensions: { - id: { - sql: `id`, - type: `number`, - primary_key: true, - }, - - status: { - sql: `status`, - type: `string`, - }, - - completed_at: { - sql: `completed_at`, - type: `time`, - }, - }, -}); -``` - - - -Some sample data from this table might look like: - -| **id** | **status** | **completed_at** | -| ------ | ---------- | ----------------------- | -| 1 | completed | 2021-02-15T12:21:11.290 | -| 2 | completed | 2021-02-25T18:15:12.369 | -| 3 | shipped | 2021-03-15T20:40:57.404 | -| 4 | processing | 2021-03-13T10:30:21.360 | -| 5 | completed | 2021-03-10T18:25:32.109 | - -Our first requirement is to populate a dropdown in our front-end application -which shows all possible statuses. The Cube query to retrieve this information -might look something like: - -```json -{ - "dimensions": ["orders.status"] -} -``` - -In that case, we can add the following pre-aggregation to the `orders` cube: - - - -```yaml -cubes: - - name: orders - # ... - - pre_aggregations: - - name: order_statuses - dimensions: - - status -``` - -```javascript -cube(`orders`, { - // ... - - pre_aggregations: { - order_statuses: { - dimensions: [status], - }, - }, -}); -``` - - - -## Pre-Aggregations with Time Dimension - -Using the same data model as before, we are now finding that users frequently -query for the number of orders completed per day, and that this query is -performing poorly. This query might look something like: - -```json -{ - "measures": ["orders.count"], - "timeDimensions": ["orders.completed_at"] -} -``` - -In order to improve the performance of this query, we can add another -pre-aggregation definition to the `orders` cube: - - - -```yaml -cubes: - - name: orders - # ... - - pre_aggregations: - - name: orders_by_completed_at - measures: - - count - time_dimension: completed_at - granularity: month -``` - -```javascript -cube(`orders`, { - // ... - - pre_aggregations: { - orders_by_completed_at: { - measures: [count], - time_dimension: completed_at, - granularity: `month`, - }, - }, -}); -``` - - - -Note that we have added a `granularity` property with a value of `month` to this -definition. This allows Cube to aggregate the dataset to a single entry for each -month. - -The next time the API receives the same JSON query, Cube will build (if it -doesn't already exist) the pre-aggregated dataset, store it in the source -database server and use that dataset for any subsequent queries. A sample of the -data in this pre-aggregated dataset might look like: - -| **completed_at** | **count** | -| ----------------------- | --------- | -| 2021-02-01T00:00:00.000 | 2 | -| 2021-03-01T00:00:00.000 | 3 | - -## Keeping pre-aggregations up-to-date - -Pre-aggregations can become out-of-date or out-of-sync if the original dataset -changes. [Cube uses a refresh key to check the freshness of the -data][ref-caching-preaggs-refresh]; if a change in the refresh key is detected, -the pre-aggregations are rebuilt. These refreshes are performed in the -background as a scheduled process, unless configured otherwise. - -## Ensuring pre-aggregations are targeted by queries - -Cube selects the best available pre-aggregation based on the incoming queries it -receives via the API. The process for selection is summarized below: - -1. Are all measures of type `count`, `sum`, `min`, `max` or - `count_distinct_approx`? - -2. If yes, then check if - - - The pre-aggregation contains all dimensions, filter dimensions and leaf - measures from the query - - The measures aren't multiplied ([via a `one_to_many` - relationship][ref-schema-joins-rel]) - -3. If no, then check if - - - The query's time dimension granularity is set - - All query filter dimensions are included in query dimensions - - The pre-aggregation defines the **exact** set of dimensions and measures - used in the query - -You can find a complete flowchart [here][self-select-pre-agg]. - -### <--{"id" : "Ensuring pre-aggregations are targeted by queries"}--> Additivity - -So far, we've described pre-aggregations as aggregated versions of your existing -data. However, there are some rules that apply when Cube uses the -pre-aggregation. The **additivity** of fields specified in both the query and in -the pre-aggregation determines this. - -So what is additivity? Let's add another cube called `line_items` to the previous -example to demonstrate. Many `line_items` can belong to any order from the `orders` cube, and are -[joined][ref-schema-joins] as such: - - - -```yaml -cubes: - - name: line_items - sql_table: line_items - - joins: - - name: orders - sql: "{CUBE}.order_id = {orders.id}" - relationship: many_to_one - - measures: - - name: count - type: count - - dimensions: - - name: id - sql: id - type: number - primary_key: true - - - name: created_at - sql: created_at - type: time -``` - -```javascript -cube(`line_items`, { - sql_table: `line_items`, - - joins: { - orders: { - sql: `${CUBE}.order_id = ${orders.id}`, - relationship: `many_to_one`, - }, - }, - - measures: { - count: { - type: `count`, - }, - }, - - dimensions: { - id: { - sql: `id`, - type: `number`, - primary_key: true, - }, - - created_at: { - sql: `created_at`, - type: `time`, - }, - }, -}); -``` - - - -Some sample data from the `line_items` table might look like: - -| **id** | **product_id** | **order_id** | **quantity** | **price** | **profit_margin** | **created_at** | -| ------ | -------------- | ------------ | ------------ | --------- | ----------------- | -------------------------- | -| 1 | 31 | 1 | 1 | 275 | 1 | 2021-01-20 00:00:00.000000 | -| 2 | 49 | 2 | 6 | 248 | 0.1 | 2021-01-20 00:00:00.000000 | -| 3 | 89 | 3 | 6 | 197 | 0.35 | 2021-01-21 00:00:00.000000 | -| 4 | 71 | 4 | 8 | 223 | 0.15 | 2021-01-21 00:00:00.000000 | -| 5 | 64 | 5 | 5 | 75 | 0.75 | 2021-01-22 00:00:00.000000 | -| 6 | 62 | 6 | 8 | 75 | 0.65 | 2021-01-22 00:00:00.000000 | - -Looking at the raw data, we can see that if the data were to be aggregated by -`created_at`, then we could simply add together the `quantity` and `price` -fields and still get a correct result: - -| **created_at** | **quantity** | **price** | -| -------------------------- | ------------ | --------- | -| 2021-01-20 00:00:00.000000 | 7 | 523 | -| 2021-01-21 00:00:00.000000 | 14 | 420 | -| 2021-01-22 00:00:00.000000 | 13 | 150 | - -This means that `quantity` and `price` are both **additive measures**, and we -can represent them in the `line_items` cube as follows: - - - -```yaml -cubes: - - name: line_items - # ... - - measures: - # ... - - - name: quantity - sql: quantity - type: sum - - - name: price - type: sum - sql: price - format: currency - - # ... -``` - -```javascript -cube(`line_items`, { - // ... - - measures: { - // ... - - quantity: { - sql: `quantity`, - type: `sum`, - }, - - price: { - type: `sum`, - sql: `price`, - format: `currency`, - }, - }, - - // ... -}); -``` - - - -Because neither `quantity` and `price` reference any other measures in our -`line_items` cube, we can also say that they are **additive leaf measures**. Any -query requesting only these two measures can be called a **leaf measure -additive** query. Additive leaf measures can only be of the following -[types][ref-schema-types-measure]: `count`, `sum`, `min`, `max` or -`count_distinct_approx`. - -[ref-schema-types-measure]: /types-and-formats#measures-types - -### <--{"id" : "Ensuring pre-aggregations are targeted by queries"}--> Non-Additivity - -Using the same sample data for `line_items`, there's a `profit_margin` field -which is different for each row. However, despite the value being numerical, it -doesn't actually make sense to add up this value. Let's look at the rows for -`2021-01-20` in the sample data: - -| **id** | **product_id** | **order_id** | **quantity** | **price** | **profit_margin** | **created_at** | -| ------ | -------------- | ------------ | ------------ | --------- | ----------------- | -------------------------- | -| 1 | 31 | 1 | 1 | 275 | 1 | 2021-01-20 00:00:00.000000 | -| 2 | 49 | 2 | 6 | 248 | 0.1 | 2021-01-20 00:00:00.000000 | - -And now let's try and aggregate them: - -| **created_at** | **quantity** | **price** | **profit_margin** | -| -------------------------- | ------------ | --------- | ----------------- | -| 2021-01-20 00:00:00.000000 | 7 | 523 | 1.1 | - -Using the source data, we'll manually calculate the profit margin and see if it -matches the above. We'll use the following formula: - -$$ -x + (x * y) = z -$$ - -Where `x` is the original cost of the item, `y` is the profit margin and `z` is -the price the item was sold for. Let's use the formula to find the original cost -for both items sold on `2021-01-20`. For the row with `id = 1`: - -$$ -x + (x * 1) = 275\\ -2x = 275\\ -x = 275 / 2\\ -x = 137.5 -$$ - -And for the row where `id = 2`: - -$$ -x + (x * 0.1) = 248\\ -1.1x = 248\\ -x = 248 / 1.1\\ -x = 225.454545454545455 -$$ - -Which means the total cost for both items was: - -$$ -225.454545454545455 + 137.5\\ -362.954545454545455 -$$ - -Now that we have the cost of each item, let's use the same formula in reverse to -see if applying a profit margin of `1.1` will give us the same total price -(`523`) as calculated earlier: - -$$ -362.954545454545455 + (362.954545454545455 * 1.1) = z\\ -762.204545454545455 = z\\ -z = 762.204545454545455 -$$ - -We can clearly see that `523` **does not** equal `762.204545454545455`, and we -cannot treat the `profit_margin` column the same as we would any other additive -measure. Armed with the above knowledge, we can add the `profit_margin` field to -our cube **as a [dimension][ref-schema-dims]**: - - - -```yaml -cubes: - - name: line_items - # ... - - dimensions: - # ... - - - name: profit_margin - sql: profit_margin - type: number - format: percent - - # ... -``` - -```javascript -cube(`line_items`, { - // ... - - dimensions: { - // ... - - profit_margin: { - sql: `profit_margin`, - type: `number`, - format: 'percentage', - }, - }, - - // ... -}); -``` - - - -Another approach might be to calculate the profit margin dynamically, and -instead saving the "cost" price. Because the cost price is an additive measure, -we are able to store it in a pre-aggregation: - - - -```yaml -cubes: - - name: line_items - # ... - - measures: - # ... - - - name: cost - sql: "{CUBE.price} / (1 + {CUBE.profit_margin}" - type: sum - - # ... -``` - -```javascript -cube(`line_items`, { - // ... - - measures: { - // ... - - cost: { - sql: `${CUBE.price} / (1 + ${CUBE.profit_margin})`, - type: `sum`, - }, - }, - - // ... -}); -``` - - - -Another example of a non-additive measure would be a distinct count of -`product_id`. If we took the distinct count of products sold over a month, and -then tried to sum the distinct count of products for each individual day and -compared them, we would not get the same results. We can add the measure like -this: - - - -```yaml -cubes: - - name: line_items - # ... - - measures: - # ... - - - name: count_distinct_products - sql: product_id - type: count_distinct - - # ... -``` - -```javascript -cube(`line_items`, { - // ... - - measures: { - // ... - - count_distinct_products: { - sql: `product_id`, - type: `count_distinct`, - }, - }, - - // ... -}); -``` - - - -However the above cannot be used in for a pre-aggregation. We can instead change -the `type` to `count_distinct_approx`, and then use the measure in a -pre-aggregation definition: - - - -```yaml -cubes: - - name: line_items - # ... - - measures: - # ... - - - name: count_distinct_products - sql: product_id - type: count_distinct_approx - - pre_aggregations: - - name: my_rollup - # ... - - measures: - - count_distinct_products - - # ... -``` - -```javascript -cube(`line_items`, { - // ... - - measures: { - // ... - - count_distinct_products: { - sql: `product_id`, - type: `count_distinct_approx`, - }, - }, - - pre_aggregations: { - my_rollup: { - // ... - - measures: [ count_distinct_products ], - } - }, - - // ... -}); -``` - - - -### <--{"id" : "Ensuring pre-aggregations are targeted by queries"}--> Selecting the pre-aggregation - -To recap what we've learnt so far: - -- **Additive measures** are measures whose values can be added together - -- **Multiplied measures** are measures that define `one_to_many` relationships - -- **Leaf measures** are measures that do not reference any other measures in - their definition - -- **Calculated measures** are measures that reference other dimensions and - measures in their definition - -- A query is **leaf measure additive** if all of its leaf measures are one of: - `count`, `sum`, `min`, `max` or `count_distinct_approx` - -Cube looks for matching pre-aggregations in the order they are defined in a -cube's data model file. Each defined pre-aggregation is then tested for a match -based on the criteria in the flowchart below: - -
- Pre-Aggregation Selection Flowchart -
- -Some extra considerations for pre-aggregation selection: - -- The query's time dimension and granularity must match the pre-aggregation. - -- The query's time dimension and granularity together act as a dimension. If the - date range isn't aligned with granularity, a common granularity is used. This - common granularity is selected using the [greatest common divisor][wiki-gcd] - across both the query and pre-aggregation. For example, the common granularity - between `hour` and `day` is `hour` because both `hour` and `day` can be - divided by `hour`. - -- The query's granularity's date range must match the start date and end date - from the time dimensions. For example, when using a granularity of `month`, - the values should be the start and end days of the month i.e. - `['2020-01-01T00:00:00.000', '2020-01-31T23:59:59.999']`; when the granularity - is `day`, the values should be the start and end hours of the day i.e. - `['2020-01-01T00:00:00.000', '2020-01-01T23:59:59.999']`. Date ranges are - inclusive, and the minimum granularity is `second`. - -- The order in which pre-aggregations are defined in models matter; the first - matching pre-aggregation for a query is the one that is used. Both the - measures and dimensions of any cubes specified in the query are checked to - find a matching `rollup`. - -- `rollup` pre-aggregations **always** have priority over `original_sql`. Thus, - if you have both `original_sql` and `rollup` defined, Cube will try to match - `rollup` pre-aggregations before trying to match `original_sql`. You can - instruct Cube to use the original SQL pre-aggregations by using - [`use_original_sql_pre_aggregations`][ref-schema-preaggs-origsql]. - -[ref-caching-preaggs-cubestore]: - /caching/using-pre-aggregations#pre-aggregations-storage -[ref-caching-preaggs-refresh]: /caching/using-pre-aggregations#refresh-strategy -[ref-caching-preaggs-storage]: - /caching/using-pre-aggregations#pre-aggregations-storage -[ref-schema-dims]: /schema/reference/dimensions -[ref-schema-joins]: /schema/reference/joins -[ref-schema-joins-rel]: /schema/reference/joins#relationship -[ref-schema-preaggs]: /schema/reference/pre-aggregations -[ref-schema-preaggs-origsql]: - /schema/reference/pre-aggregations#type-originalsql -[self-select-pre-agg]: #selecting-the-pre-aggregation -[wiki-gcd]: https://en.wikipedia.org/wiki/Greatest_common_divisor diff --git a/docs/content/Caching/Lambda-Pre-Aggregations.mdx b/docs/content/Caching/Lambda-Pre-Aggregations.mdx deleted file mode 100644 index 758bb1d07b3fc..0000000000000 --- a/docs/content/Caching/Lambda-Pre-Aggregations.mdx +++ /dev/null @@ -1,226 +0,0 @@ ---- -title: Lambda pre-aggregations -permalink: /caching/pre-aggregations/lambda-pre-aggregations -category: Caching -menuOrder: 4 ---- - -Lambda pre-aggregations follow the -[Lambda architecture](https://en.wikipedia.org/wiki/Lambda_architecture) design -to union real-time and batch data. Cube acts as a serving layer and uses -pre-aggregations as a batch layer and source data or other pre-aggregations, -usually [streaming][streaming-pre-agg], as a speed layer. Due to this design, -lambda pre-aggregations **only** work with data that is newer than the existing -batched pre-aggregations. - - - -Lambda pre-aggregations only work with Cube Store. - - - -## Use cases - -Below we are looking at the most common examples of using lambda -pre-aggregations. - -### Batch and source data - -Batch data is coming from pre-aggregation and real-time data is coming from the -data source. - -
- Lambda pre-aggregation batch and source diagram -
- -First, you need to create pre-aggregations that will contain your batch data. In -the following example, we call it `batch`. Please note, it must have a -`time_dimension` and `partition_granularity` specified. Cube will use these -properties to union batch data with freshly-retrieved source data. - -You may also control the batch part of your data with the `build_range_start` and -`build_range_end` properties of a pre-aggregation to determine a specific window -for your batched data. - -Next, you need to create a lambda pre-aggregation. To do that, create -pre-aggregation with type `rollup_lambda`, specify rollups you would like to use -with `rollups` property, and finally set `union_with_source_data: true` to use -source data as a real-time layer. - -Please make sure that the lambda pre-aggregation definition comes first when -defining your pre-aggregations. - - - -```yaml -cubes: - - name: users - # ... - - pre_aggregations: - - name: lambda - type: rollup_lambda - union_with_source_data: true - rollups: - - CUBE.batch - - - name: batch - measures: - - users.count - dimensions: - - users.name - time_dimension: users.created_at - granularity: day - partition_granularity: day - build_range_start: - sql: SELECT '2020-01-01' - build_range_end: - sql: SELECT '2022-05-30' -``` - -```javascript -cube('users', { - // ... - - pre_aggregations: { - lambda: { - type: `rollup_lambda`, - union_with_source_data: true, - rollups: [CUBE.batch] - }, - - batch: { - measures: [users.count], - dimensions: [users.name], - time_dimension: users.created_at, - granularity: `day`, - partition_granularity: `day`, - build_range_start: { - sql: `SELECT '2020-01-01'` - }, - build_range_end: { - sql: `SELECT '2022-05-30'` - }, - }, - }, -}) -``` - - - -### Batch and streaming data - -In this scenario, batch data is comes from one pre-aggregation and real-time -data comes from a [streaming pre-aggregation][streaming-pre-agg]. - -
- Lambda pre-aggregation batch and streaming diagram -
- -You can use lambda pre-aggregations to combine data from multiple -pre-aggregations, where one pre-aggregation can have batch data and another -streaming. - - - -```yaml -cubes: - - name: streaming_users - # This cube uses a streaming SQL data source such as ksqlDB - # ... - - pre_aggregations: - - name: streaming - type: rollup - measures: - - CUBE.count - dimensions: - - CUBE.name - time_dimension: CUBE.created_at - granularity: day, - partition_granularity: day - - - name: users - # This cube uses a data source such as ClickHouse or BigQuery - # ... - - pre_aggregations: - - name: batch_streaming_lambda - type: rollup_lambda - rollups: - - users.batch - - streaming_users.streaming - - - name: batch - type: rollup - measures: - - users.count - dimensions: - - users.name - time_dimension: users.created_at - granularity: day - partition_granularity: day - build_range_start: - sql: SELECT '2020-01-01' - build_range_end: - sql: SELECT '2022-05-30' -``` - -```javascript -// This cube uses a streaming SQL data source such as ksqlDB -cube('streaming_users', { - // ... - - pre_aggregations: { - streaming: { - type: `rollup`, - measures: [CUBE.count], - dimensions: [CUBE.name], - time_dimension: CUBE.created_at, - granularity: `day`, - partition_granularity: `day`, - }, - }, -}); - -// This cube uses a data source such as ClickHouse or BigQuery -cube('users', { - // ... - - pre_aggregations: { - batch_streaming_lambda: { - type: `rollup_lambda`, - rollups: [users.batch, streaming_users.streaming] - }, - - batch: { - type: `rollup`, - measures: [users.count], - dimensions: [users.name], - time_dimension: users.created_at, - granularity: `day`, - partition_granularity: `day`, - build_range_start: { - sql: `SELECT '2020-01-01'` - }, - build_range_end: { - sql: `SELECT '2022-05-30'` - }, - }, - }, -}); -``` - - - -[streaming-pre-agg]: /caching/using-pre-aggregations#streaming-pre-aggregations diff --git a/docs/content/Caching/Overview.mdx b/docs/content/Caching/Overview.mdx deleted file mode 100644 index 56462f9b73169..0000000000000 --- a/docs/content/Caching/Overview.mdx +++ /dev/null @@ -1,265 +0,0 @@ ---- -title: Caching Overview -menuTitle: Overview -permalink: /caching -category: Caching -menuOrder: 1 ---- - -
- Request vs Cube caching layers -
- -Cube provides a two-level caching system. The first level is **in-memory** cache -and is active by default. - -Cube's [in-memory cache](#in-memory-cache) acts as a buffer for your database -when there's a burst of requests hitting the same data from multiple concurrent -users while [pre-aggregations](#pre-aggregations) are designed to provide the -right balance between time to insight and querying performance. - -To reset the **in-memory** cache in development mode, just restart the server. - -The second level of caching is called **pre-aggregations**, and requires -explicit configuration to activate. - -We do not recommend changing the default **in-memory** caching configuration -unless it is necessary. To speed up query performance, consider using -**pre-aggregations**. - -## Pre-Aggregations - -Pre-aggregations is a layer of the aggregated data built and refreshed by Cube. -It can dramatically improve the query performance and provide a higher -concurrency. - - - -To start building pre-aggregations, depending on your data source, Cube may -require write access to the [pre-aggregations schema][ref-config-preagg-schema] -in the source database. In this case, Cube first builds pre-aggregations as -tables in the source database and then exports them into the pre-aggregations -storage. Please refer to the documentation for your specific driver to learn -more about read-only support and pre-aggregation build strategies. - - - -Pre-aggregations are defined in the data model. You can learn more about -defining pre-aggregations in [data modeling reference][ref-schema-ref-preaggs]. - - - -```yaml -cubes: - - name: orders - sql_table: orders - - measures: - - name: total_amount - sql: amount - type: sum - - dimensions: - - name: created_at - sql: created_at - type: time - - pre_aggregations: - - name: amount_by_created - measures: - - total_amount - time_dimension: created_at - granularity: month -``` - -```javascript -cube(`orders`, { - sql_table: `orders`, - - measures: { - total_amount: { - sql: `amount`, - type: `sum`, - }, - }, - - dimensions: { - created_at: { - sql: `created_at`, - type: `time`, - }, - }, - - pre_aggregations: { - amount_by_created: { - measures: [total_amount], - time_dimension: created_at, - granularity: `month`, - }, - }, -}); -``` - - - -## In-memory Cache - -Cube caches the results of executed queries using in-memory cache. The cache key -is a generated SQL statement with any existing query-dependent pre-aggregations. - -Upon receiving an incoming request, Cube first checks the cache using this key. -If nothing is found in the cache, the query is executed in the database and the -result set is returned as well as updating the cache. - -If an existing value is present in the cache and the `refresh_key` value for the -query hasn't changed, the cached value will be returned. Otherwise, an SQL query -will be executed against either the pre-aggregations storage or the source -database to populate the cache with the results and return them. - -### <--{"id" : "In-memory Cache"}--> Refresh Keys - -Cube takes great care to prevent unnecessary queries from hitting your database. -The first stage caching system caches query results, but Cube needs a way to -know if the data powering that query result has changed. If the underlying data -isn't any different, the cached result is valid and can be returned skipping an -expensive query, but if there is a difference, the query needs to be re-run and -its result cached. - -To aid with this, Cube defines a `refresh_key` for each cube. [Refresh -keys][ref-schema-ref-cube-refresh-key] are evaluated by Cube to assess if the -data needs to be refreshed. - -The following `refresh_key` tells Cube to refresh data every 5 minutes: - - - -```yaml -cubes: - - name: orders - # ... - - refresh_key: - every: 5 minute -``` - -```javascript -cube(`orders`, { - refresh_key: { - every: `5 minute`, - }, -}); -``` - - - -With the following `refresh_key`, Cube will only refresh the data if the value of -`MAX(created_at)` changes. By default, Cube will check this `refresh_key` -every 10 seconds: - - - -```yaml -cubes: - - name: orders - # ... - - refresh_key: - sql: SELECT MAX(created_at) FROM orders -``` - -```javascript -cube(`orders`, { - // ... - - refresh_key: { - sql: `SELECT MAX(created_at) FROM orders`, - }, -}); -``` - - - -By default, Cube will check and invalidate the cache in the background when in -[development mode][ref-development-mode]. In production environments, we -recommend [running a Refresh Worker as a separate -instance][ref-production-checklist-refresh]. - -We recommend enabling background cache invalidation in a separate Cube worker -for production deployments. Please consult the [Production -Checklist][ref-production-checklist] for more information. - -If background refresh is disabled, Cube will refresh the cache during query -execution. Since this could lead to delays in responding to end-users, we -recommend always enabling background refresh. - -### <--{"id" : "In-memory Cache"}--> Default Refresh Keys - -The default values for `refresh_key` are - -- `every: 2 minute` for BigQuery, Athena, Snowflake, and Presto. -- `every: 10 second` for all other databases. - -You can use a custom SQL query to check if a refresh is required by changing -the [`refresh_key`][ref-schema-ref-cube-refresh-key] property in a cube. Often, a -`MAX(updated_at_timestamp)` for OLTP data is a viable option, or examining a -metadata table for whatever system is managing the data to see when it last ran. - -### <--{"id" : "In-memory Cache"}--> Disabling the cache - -There's no straightforward way to disable caching in Cube. The reason is that -Cube not only stores cached values but also uses the cache as a point of -synchronization and coordination between nodes in a cluster. For the sake of -design simplicity, Cube doesn't distinguish client invocations, and all calls to -the data load API are idempotent. This provides excellent reliability and -scalability but has some drawbacks. One of those load data calls can't be traced -to specific clients, and as a consequence, there's no guaranteed way for a -client to initiate a new data loading query or know if the current invocation -wasn't initiated earlier by another client. Only Refresh Key freshness -guarantees are provided in this case. - -For situations like real-time analytics or responding to live user changes to -underlying data, the `refresh_key` query cache can prevent fresh data from -showing up immediately. For these situations, the cache can effectively be -disabled by setting the [`refresh_key.every`][ref-schema-ref-cube-refresh-key] -parameter to something very low, like `1 second`. - -## Inspecting Queries - -To inspect whether the query hits in-memory cache, pre-aggregation, or the -underlying data source, you can use the Playground or [Cube -Cloud][link-cube-cloud]. - -[Developer Playground][ref-dev-playground] can be used to inspect a single -query. To do that, click the "cache" button after executing the query. It will -show you the information about the `refresh_key` for the query and whether the -query uses any pre-aggregations. To inspect multiple queries or list existing -pre-aggregations, you can use [Cube Cloud][link-cube-cloud]. - -To inspect queries in the Cube Cloud, navigate to the "History" page. You can -filter queries by multiple parameters on this page, including whether they hit -the cache, pre-aggregations, or raw data. Additionally, you can click on the -query to see its details, such as time spent in the database, the database -queue's size at the point of query execution, generated SQL, query timeline, and -more. It will also show you the optimal pre-aggregations that could be used for -this query. - -To see existing pre-aggregations, navigate to the "Pre-Aggregations" page in the -Cube Cloud. The table shows all the pre-aggregations, the last refresh -timestamp, and the time spent to build the pre-aggregation. You can also inspect -every pre-aggregation's details: the list of queries it serves and all its -versions. - -[link-cube-cloud]: https://cube.dev/cloud -[ref-config-preagg-schema]: /config#pre-aggregations-schema -[ref-dev-playground]: /dev-tools/dev-playground -[ref-development-mode]: /configuration/overview#development-mode -[ref-production-checklist]: /deployment/production-checklist -[ref-production-checklist-refresh]: - /deployment/production-checklist#set-up-refresh-worker -[ref-schema-ref-cube-refresh-key]: /schema/reference/cube#refresh-key -[ref-schema-ref-preaggs]: /schema/reference/pre-aggregations diff --git a/docs/content/Caching/Running-in-Production.mdx b/docs/content/Caching/Running-in-Production.mdx deleted file mode 100644 index bcb0d6702743a..0000000000000 --- a/docs/content/Caching/Running-in-Production.mdx +++ /dev/null @@ -1,304 +0,0 @@ ---- -title: Running in production -permalink: /caching/running-in-production -category: Caching -menuOrder: 5 ---- - -Cube makes use of two different kinds of cache: - -- In-memory storage of query results -- Pre-aggregations - -Cube Store is enabled by default when running Cube in development mode. In -production, Cube Store **must** run as a separate process. The easiest way to do -this is to use the official Docker images for Cube and Cube Store. - - - -Using Windows? We **strongly** recommend using [WSL2 for Windows 10][link-wsl2] -to run the following commands. - - - -You can run Cube Store with Docker with the following command: - -```bash{promptUser: user} -docker run -p 3030:3030 cubejs/cubestore -``` - - - -Cube Store can further be configured via environment variables. To see a -complete reference, please consult the `CUBESTORE_*` environment variables in -the [Environment Variables reference][ref-config-env]. - - - -Next, run Cube and tell it to connect to Cube Store running on `localhost` (on -the default port `3030`): - -```bash{outputLines: 2-4} -docker run -p 4000:4000 \ - -e CUBEJS_CUBESTORE_HOST=localhost \ - -v ${PWD}:/cube/conf \ - cubejs/cube -``` - -In the command above, we're specifying `CUBEJS_CUBESTORE_HOST` to let Cube know -where Cube Store is running. - -You can also use Docker Compose to achieve the same: - -```yaml -version: '2.2' -services: - cubestore: - image: cubejs/cubestore:latest - environment: - - CUBESTORE_REMOTE_DIR=/cube/data - volumes: - - .cubestore:/cube/data - - cube: - image: cubejs/cube:latest - ports: - - 4000:4000 - environment: - - CUBEJS_CUBESTORE_HOST=localhost - depends_on: - - cubestore - links: - - cubestore - volumes: - - ./model:/cube/conf/model -``` - -## Architecture - -
- Cube Store cluster with Cube -
- -A Cube Store cluster consists of at least one Router and one or more Worker -instances. Cube sends queries to the Cube Store Router, which then distributes -the queries to the Cube Store Workers. The Workers then execute the queries and -return the results to the Router, which in turn returns the results to Cube. - -## Scaling - - - -Cube Store _can_ be run in a single instance mode, but this is usually -unsuitable for production deployments. For high concurrency and data throughput, -we **strongly** recommend running Cube Store as a cluster of multiple instances -instead. - - - -Scaling Cube Store for a higher concurrency is relatively simple when running in -cluster mode. Because [the storage layer](#storage) is decoupled from the query -processing engine, you can horizontally scale your Cube Store cluster for as -much concurrency as you require. - -In cluster mode, Cube Store runs two kinds of nodes: - -- one or more **router** nodes handle incoming client connections, manage - database metadata and serve simple queries. -- multiple **worker** nodes which execute SQL queries - -Cube Store querying performance is optimal when the count of partitions in a -single query is less than or equal to the worker count. For example, you have a -200 million rows table that is partitioned by day, which is ten daily Cube -partitions or 100 Cube Store partitions in total. The query sent by the user -contains filters, and the resulting scan requires reading 16 Cube Store -partitions in total. Optimal query performance, in this case, can be achieved -with 16 or more workers. You can use `EXPLAIN` and `EXPLAIN ANALYZE` SQL -commands to see how many partitions would be used in a specific Cube Store -query. - -Resources required for the main node and workers can vary depending on the -configuration. With default settings, you should expect to allocate at least 4 -CPUs and up to 8GB per main or worker node. - -The configuration required for each node can be found in the table below. More -information about these variables can be found [in the Environment Variables -reference][ref-config-env]. - -| Environment Variable | Specify on Router? | Specify on Worker? | -| ----------------------- | ------------------ | ------------------ | -| `CUBESTORE_SERVER_NAME` | Yes | Yes | -| `CUBESTORE_META_PORT` | Yes | - | -| `CUBESTORE_WORKERS` | Yes | Yes | -| `CUBESTORE_WORKER_PORT` | - | Yes | -| `CUBESTORE_META_ADDR` | - | Yes | - -`CUBESTORE_WORKERS` and `CUBESTORE_META_ADDR` variables should be set with -stable addresses, which should not change. You can use stable DNS names and put -load balancers in front of your worker and router instances to fulfill stable -name requirements in environments where stable IP addresses can't be guaranteed. - - - -To fully take advantage of the worker nodes in the cluster, we **strongly** -recommend using [partitioned pre-aggregations][ref-caching-partitioning]. - - - -A sample Docker Compose stack for the single machine setting this up might look -like: - -```yaml -version: '2.2' -services: - cubestore_router: - restart: always - image: cubejs/cubestore:latest - environment: - - CUBESTORE_SERVER_NAME=cubestore_router:9999 - - CUBESTORE_META_PORT=9999 - - CUBESTORE_WORKERS=cubestore_worker_1:9001,cubestore_worker_2:9001 - - CUBESTORE_REMOTE_DIR=/cube/data - volumes: - - .cubestore:/cube/data - cubestore_worker_1: - restart: always - image: cubejs/cubestore:latest - environment: - - CUBESTORE_SERVER_NAME=cubestore_worker_1:9001 - - CUBESTORE_WORKER_PORT=9001 - - CUBESTORE_META_ADDR=cubestore_router:9999 - - CUBESTORE_WORKERS=cubestore_worker_1:9001,cubestore_worker_2:9001 - - CUBESTORE_REMOTE_DIR=/cube/data - depends_on: - - cubestore_router - volumes: - - .cubestore:/cube/data - cubestore_worker_2: - restart: always - image: cubejs/cubestore:latest - environment: - - CUBESTORE_SERVER_NAME=cubestore_worker_2:9001 - - CUBESTORE_WORKER_PORT=9001 - - CUBESTORE_META_ADDR=cubestore_router:9999 - - CUBESTORE_WORKERS=cubestore_worker_1:9001,cubestore_worker_2:9001 - - CUBESTORE_REMOTE_DIR=/cube/data - depends_on: - - cubestore_router - volumes: - - .cubestore:/cube/data - cube: - image: cubejs/cube:latest - ports: - - 4000:4000 - environment: - - CUBEJS_CUBESTORE_HOST=cubestore_router - depends_on: - - cubestore_router - volumes: - - .:/cube/conf -``` - -## Replication and High Availability - -The open-source version of Cube Store doesn't support replicating any of its -nodes. The router node and every worker node should always have only one -instance copy if served behind the load balancer or service address. Replication -will lead to undefined behavior of the cluster, including connection errors and -data loss. If any cluster node is down, it'll lead to a complete cluster outage. -If Cube Store replication and high availability are required, please consider -using Cube Cloud. - -## Storage - - - -Cube Store can only use one type of remote storage at runtime. - - - -Cube Store makes use of a separate storage layer for storing metadata as well as -for persisting pre-aggregations as Parquet files. Cube Store [can be configured -to use either AWS S3 or Google Cloud Storage][ref-config-env]. If desired, local -path on the server can also be used in case all Cube Store cluster nodes are -co-located on a single machine. - - - -Cube Store requires strong consistency guarantees from underlying distributed -storage. AWS S3, Google Cloud Storage, and Azure Blob Storage (Cube Cloud only) -are the only known implementations that provide strong consistency. Using other -implementations in production is discouraged and can lead to consistency and -data corruption errors. - - - -A simplified example using AWS S3 might look like: - -```yaml -version: '2.2' -services: - cubestore_router: - image: cubejs/cubestore:latest - environment: - - CUBESTORE_SERVER_NAME=cubestore_router:9999 - - CUBESTORE_META_PORT=9999 - - CUBESTORE_WORKERS=cubestore_worker_1:9001 - - CUBESTORE_S3_BUCKET= - - CUBESTORE_S3_REGION= - - CUBESTORE_AWS_ACCESS_KEY_ID= - - CUBESTORE_AWS_SECRET_ACCESS_KEY= - cubestore_worker_1: - image: cubejs/cubestore:latest - environment: - - CUBESTORE_SERVER_NAME=cubestore_worker_1:9001 - - CUBESTORE_WORKER_PORT=9001 - - CUBESTORE_META_ADDR=cubestore_router:9999 - - CUBESTORE_WORKERS=cubestore_worker_1:9001 - - CUBESTORE_S3_BUCKET= - - CUBESTORE_S3_REGION= - - CUBESTORE_AWS_ACCESS_KEY_ID= - - CUBESTORE_AWS_SECRET_ACCESS_KEY= - depends_on: - - cubestore_router -``` - -### Local Storage - -Separately from remote storage, Cube Store requires local scratch space to warm -up partitions by downloading Parquet files before querying them. By default, -this directory should be mounted to `.cubestore/data` dir inside contained and -can be configured by [CUBESTORE_DATA_DIR][ref-config-env] environment variable. -It is advised to use local SSDs for this scratch space to maximize querying -performance. - -### <--{"id" : "Storage"}--> AWS - -Cube Store can retrieve security credentials from instance metadata -automatically. This means you can skip defining the -`CUBESTORE_AWS_ACCESS_KEY_ID` and `CUBESTORE_AWS_SECRET_ACCESS_KEY` environment -variables. - - - -Cube Store currently does not take the key expiration time returned from -instance metadata into account; instead the refresh duration for the key is -defined by `CUBESTORE_AWS_CREDS_REFRESH_EVERY_MINS`, which is set to `180` by -default. - - - -## Security - -Cube Store currently does not have any in-built authentication mechanisms. For -this reason, we recommend running your Cube Store cluster on a network that only -allows requests from the Cube deployment. - -[link-wsl2]: https://docs.microsoft.com/en-us/windows/wsl/install-win10 -[ref-caching-partitioning]: /caching/using-pre-aggregations#partitioning -[ref-config-env]: /reference/environment-variables diff --git a/docs/content/Caching/Using-Pre-Aggregations.mdx b/docs/content/Caching/Using-Pre-Aggregations.mdx deleted file mode 100644 index 6dae060a03756..0000000000000 --- a/docs/content/Caching/Using-Pre-Aggregations.mdx +++ /dev/null @@ -1,911 +0,0 @@ ---- -title: Using pre-aggregations -permalink: /caching/using-pre-aggregations -category: Caching -menuOrder: 3 ---- - -Pre-aggregations is a powerful way to speed up your Cube queries. There are many -configuration options to consider. Please make sure to also check [the -Pre-Aggregations reference in the data modeling -section][ref-schema-ref-preaggs]. - -## Refresh Strategy - -Refresh strategy can be customized by setting the -[`refresh_key`][ref-schema-ref-preaggs-refresh-key] property for the -pre-aggregation. - -The default value of [`refresh_key`][ref-schema-ref-preaggs-refresh-key] is -`every: 1 hour`. It can be redefined either by overriding the default value of -the [`every` property][ref-schema-ref-preaggs-refresh-key-every]: - - - -```yaml -cubes: - - name: orders - # ... - - pre_aggregations: - - name: amount_by_created - type: rollup - measures: - - amount - time_dimension: created_at - granularity: month - refresh_key: - every: 12 hour -``` - -```javascript -cube(`orders`, { - // ... - - pre_aggregations: { - amount_by_created: { - type: `rollup`, - measures: [amount], - time_dimension: created_at, - granularity: `month`, - refresh_key: { - every: `12 hour`, - }, - }, - }, -}); -``` - - - -Or by providing a [`sql` property][ref-schema-ref-preaggs-refresh-key-sql] -instead, and leaving `every` unchanged from its default value: - - - -```yaml -cubes: - - name: orders - # ... - - pre_aggregations: - - name: amount_by_created - measures: - - amount - time_dimension: created_at - granularity: month - refresh_key: - # every will default to `10 seconds` here - sql: SELECT MAX(created_at) FROM orders -``` - -```javascript -cube(`orders`, { - // ... - - pre_aggregations: { - amount_by_created: { - measures: [amount], - time_dimension: created_at, - granularity: `month`, - refresh_key: { - // every will default to `10 seconds` here - sql: `SELECT MAX(created_at) FROM orders`, - }, - }, - }, -}); -``` - - - -Or both `every` and `sql` can be defined together: - - - -```yaml -cubes: - - name: orders - # ... - - pre_aggregations: - - name: amount_by_created - measures: - - amount - time_dimension: created_at - granularity: month - refresh_key: - every: 12 hour - sql: SELECT MAX(created_at) FROM orders -``` - -```javascript -cube(`orders`, { - // ... - - pre_aggregations: { - amount_by_created: { - measures: [amount], - time_dimension: created_at, - granularity: `month`, - refresh_key: { - every: `12 hour`, - sql: `SELECT MAX(created_at) FROM orders`, - }, - }, - }, -}); -``` - - - -When `every` and `sql` are used together, Cube will run the query from the `sql` -property on an interval defined by the `every` property. If the query returns -new results, then the pre-aggregation will be refreshed. - -## Rollup Only Mode - -To make Cube _only_ serve requests from pre-aggregations, the -[`CUBEJS_ROLLUP_ONLY`][ref-config-env-rolluponly] environment variable can be -set to `true` on an API instance. This will prevent serving data on API requests -from the source database. - - - -When using this configuration in a single node deployment (where the API -instance and [Refresh Worker][ref-deploy-refresh-wrkr] are configured on the -same host), requests made to the API that cannot be satisfied by a rollup throw -an error. Scheduled refreshes will continue to work in the background. - - - -## Partitioning - -[Partitioning][wiki-partitioning] is an extremely effective optimization for -accelerating pre-aggregations build and refresh time. It effectively "shards" -the data between multiple tables, splitting them by a defined attribute. Cube -can be configured to incrementally refresh only the last set of partitions -through the `updateWindow` property. This leads to faster refresh times due to -unnecessary data not being reloaded, and even reduced cost for some databases -like [BigQuery](/config/databases/google-bigquery) or -[AWS Athena](/config/databases/aws-athena). - -Any `rollup` pre-aggregation can be partitioned by time using the -`partition_granularity` property in [a pre-aggregation -definition][ref-schema-ref-preaggs]. In the example below, the -`partition_granularity` is set to `month`, which means Cube will generate -separate tables for each month's worth of data. Once built, it will continue to -refresh on a daily basis the last 3 months of data. - - - -```yaml -cubes: - - name: orders - # ... - - pre_aggregations: - - name: category_and_date - measures: - - count - - revenue - dimensions: - - category - time_dimension: created_at - granularity: day - partition_granularity: month - refresh_key: - every: 1 day - incremental: true - update_window: 3 months -``` - -```javascript -cube(`orders`, { - // ... - - preAggregations: { - category_and_date: { - measures: [count, revenue], - dimensions: [category], - time_dimension: created_at, - granularity: `day`, - partition_granularity: `month`, - refresh_key: { - every: `1 day`, - incremental: true, - update_window: `3 months` - } - }, - }, -}); -``` - - - -## Using Indexes - -### When to use indexes? - -Indexes are great when you filter large amounts of data across one or several -dimension columns. You can read more about them -[here][ref-schema-ref-preaggs-index]. - -### Best Practices - -To maximize performance, you can introduce an index per type of query so the set -of dimensions used in the query overlap as much as possible with the ones -defined in the index. Measures are traditionally only used in indexes if you -plan to filter a measured value and the cardinality of the possible values of -the measure is low. - -The order in which columns are specified in the index is **very** important; -suboptimal ordering can lead to diminished performance. To improve the -performance of an index the main thing to consider is the order of the columns -defined in it. - -The rule of thumb for index column order is: -- Single value filters come first -- `GROUP BY` columns come second -- Everything else used in the query comes afterward - -**Example:** - -Suppose you have a pre-aggregation that has millions of rows with the following -structure: - -| timestamp | product_name | product_category | zip_code | order_total | -| ------------------- | ------------- | ---------------- | -------- | ----------- | -| 2023-01-01 10:00:00 | Plastic Chair | Furniture | 88523 | 2000 | -| 2023-01-01 10:00:00 | Keyboard | Electronics | 88523 | 1000 | -| 2023-01-01 10:00:00 | Mouse | Electronics | 88523 | 800 | -| 2023-01-01 11:00:00 | Plastic Chair | Furniture | 88524 | 3000 | -| 2023-01-01 11:00:00 | Keyboard | Electronics | 88524 | 2000 | - -The pre-aggregation code would look as follows: - - - -```javascript -cube('orders', { - // ... - - pre_aggregations: { - main: { - measures: [order_total], - dimensions: [product_name, product_category, zip_code], - time_dimension: timestamp, - granularity: `hour`, - partition_granularity: `day`, - allow_non_strict_date_range_match: true, - refresh_key: { - every: `1 hour`, - incremental: true, - update_window: `1 day`, - }, - build_range_start: { - sql: `SELECT DATE_SUB(NOW(), 365)`, - }, - build_range_end: { - sql: `SELECT NOW()`, - }, - }, - }, -}); -``` - -```yaml -cubes: - - name: orders - # ... - - pre_aggregations: - - name: main - measures: - - order_total - dimensions: - - product_name - - product_category - - zip_code - time_dimension: timestamp - granularity: hour - partition_granularity: day - allow_non_strict_date_range_match: true - refresh_key: - every: 1 hour - incremental: true - update_window: 1 day - build_range_start: - sql: SELECT DATE_SUB(NOW(), 365) - build_range_end: - sql: SELECT NOW() -``` - - - -You run the following query on a regular basis, with the only difference between -queries being the filter values: - -```JSON -{ - "measures": [ - "orders.order_total" - ], - "timeDimensions": [ - { - "dimension": "orders.timestamp", - "granularity": "hour", - "dateRange": [ - "2022-12-14T06:00:00.000", - "2023-01-13T06:00:00.000" - ] - } - ], - "order": { - "orders.timestamp": "asc" - }, - "filters": [ - { - "member": "orders.product_category", - "operator": "equals", - "values": [ - "Electronics" - ] - }, - { - "member": "orders.product_name", - "operator": "equals", - "values": [ - "Keyboard", - "Mouse" - ] - } - ], - "dimensions": [ - "orders.zip_code" - ], - "limit": 10000 -} -``` - -After running this on a dataset with millions of records you find that it's -taking a long time to run, so you decide to add an index to target this specific -query. Taking into account the best practices mentioned previously you should -define an index as follows: - - - -```javascript -cube('orders', { - // ... - - pre_aggregations: { - main: { - // ... - - indexes: { - category_productname_zipcode_index: { - columns: [product_category, zip_code, product_name], - }, - }, - }, - }, -}); -``` - -```yaml -cubes: - - name: orders - # ... - - pre_aggregations: - - name: main - # ... - - indexes: - - name: category_productname_zipcode_index - columns: - - product_category - - zip_code - - product_name -``` - - - -Then the data within `category_productname_zipcode_index` would look like: - -| product_category | product_name | zip_code | timestamp | order_total | -| ---------------- | ------------- | -------- | ------------------- | ----------- | -| Furniture | Plastic Chair | 88523 | 2023-01-01 10:00:00 | 2000 | -| Electronics | Keyboard | 88523 | 2023-01-01 10:00:00 | 1000 | -| Electronics | Mouse | 88523 | 2023-01-01 10:00:00 | 800 | -| Furniture | Plastic Chair | 88524 | 2023-01-01 11:00:00 | 3000 | -| Electronics | Keyboard | 88524 | 2023-01-01 11:00:00 | 2000 | - -`product_category` column comes first as it's a single value filter. -Then `zip_code` as it's `GROUP BY` column. -`product_name` comes last as it's a multiple value filter. - -It might sound counter-intuitive to have `GROUP BY` columns before filter ones, however Cube Store always performs scans on sorted data, and if `GROUP BY` matches index ordering, merge sort-based algorithms are used for querying, which are usually much faster than hash-based group by in case of index ordering doesn't match the query. If in doubt, always use `EXPLAIN` and `EXPLAIN ANALYZE` in Cube Store to figure out the final query plan. - -### Aggregated indexes - -Aggregated indexes can be defined as well. You can read more about them -[here][ref-schema-ref-preaggs-index]. - -Example: - - - -```javascript -cube('orders', { - // ... - - pre_aggregations: { - main: { - // ... - - indexes: { - // ... - - zip_code_index: { - columns: [zip_code], - type: `aggregate`, - }, - }, - }, - }, -}); -``` - -```yaml -cubes: - - name: orders - # ... - - pre_aggregations: - - name: main - # ... - - indexes: - # ... - - - name: zip_code_index - columns: - - zip_code - type: aggregate -``` - - - -And the data for `zip_code_index` would look like the following: - -| zip_code | order_total | -| -------- | ----------- | -| 88523 | 3800 | -| 88524 | 5000 | - -## Inspecting Pre-Aggregations - -Cube Store partially supports the MySQL protocol. This allows you to execute -simple queries using a familiar SQL syntax. You can connect using the MySQL CLI -client, for example: - -```bash{promptUser: user} -mysql -h --user=cubestore -pcubestore -``` - - - -Only Linux and Mac OS versions of MySQL client are supported as of right now. -You can install one on ubuntu using `apt-get install default-mysql-client` -command or `brew install mysql-client` on Mac OS. Windows versions of the MySQL -client aren't supported. - - - -To check which pre-aggregations are managed by Cube Store, you could run the -following query: - -```sql -SELECT * FROM information_schema.tables; -+----------------------+-----------------------------------------------+ -| table_schema | table_name | -+----------------------+-----------------------------------------------+ -| dev_pre_aggregations | orders_main20190101_23jnqarg_uiyfxd0f_1gifflf | -| dev_pre_aggregations | orders_main20190301_24ph0a1c_utzntnv_1gifflf | -| dev_pre_aggregations | orders_main20190201_zhrh5kj1_rkmsrffi_1gifflf | -| dev_pre_aggregations | orders_main20191001_mdw2hxku_waxajvwc_1gifflf | -| dev_pre_aggregations | orders_main20190701_izc2tl0h_bxsf1zlb_1gifflf | -+----------------------+-----------------------------------------------+ -5 rows in set (0.01 sec) -``` - -These pre-aggregations are stored as Parquet files under the `.cubestore/` -folder in the project root during development. - -### <--{"id" : "Inspecting Pre-Aggregations"}--> EXPLAIN queries - -Cube Store's MySQL protocol also supports `EXPLAIN` and `EXPLAIN ANALYZE` -queries both of which are useful for determining how much processing a query -will require. - -`EXPLAIN` queries show the logical plan for a query: - -```sql - EXPLAIN SELECT orders__platform, orders__gender, sum(orders__count) FROM dev_pre_aggregations.orders_general_o32v4dvq_vbyemtl2_1h5hs8r - GROUP BY orders__gender, orders__platform; -+-------------------------------------------------------------------------------------------------------------------------------------+ -| logical plan | -+--------------------------------------------------------------------------------------------------------------------------------------+ -| Projection, [dev_pre_aggregations.orders_general_o32v4dvq_vbyemtl2_1h5hs8r.orders__platform, dev_pre_aggregations.orders_general_o32v4dvq_vbyemtl2_1h5hs8r.orders__gender, SUM(dev_pre_aggregations.orders_general_o32v4dvq_vbyemtl2_1h5hs8r.orders__count)] - Aggregate - ClusterSend, indices: [[96]] - Scan dev_pre_aggregations.orders_general_o32v4dvq_vbyemtl2_1h5hs8r, source: CubeTable(index: orders_general_plat_gender_o32v4dvq_vbyemtl2_1h5hs8r:96:[123, 126]), fields: [orders__gender, orders__platform, orders__count] | -+-------------------------------------------------------------------------------------------------------------------------------------+ -``` - -`EXPLAIN ANALYZE` queries show the physical plan for the router and all workers -used for query processing: - -```sql - EXPLAIN ANALYZE SELECT orders__platform, orders__gender, sum(orders__count) FROM dev_pre_aggregations.orders_general_o32v4dvq_vbyemtl2_1h5hs8r - GROUP BY orders__gender, orders__platform - -+-----------+-----------------+--------------------------------------------------------------------------------------------------------------------------+ -| node type | node name | physical plan | -+-----------+-----------------+--------------------------------------------------------------------------------------------------------------------------+ -| router | | Projection, [orders__platform, orders__gender, SUM(dev_pre_aggregations.orders_general_o32v4dvq_vbyemtl2_1h5hs8r.orders__count)@2:SUM(orders__count)] - FinalInplaceAggregate - ClusterSend, partitions: [[123, 126]] | -| worker | 127.0.0.1:10001 | PartialInplaceAggregate - Merge - Scan, index: orders_general_plat_gender_o32v4dvq_vbyemtl2_1h5hs8r:96:[123, 126], fields: [orders__gender, orders__platform, orders__count] - Projection, [orders__gender, orders__platform, orders__count] - ParquetScan, files: /.cubestore/data/126-0qtyakym.parquet | -+-----------+-----------------+--------------------------------------------------------------------------------------------------------------------------+ -``` - -## Pre-Aggregations Storage - -The default pre-aggregations storage in Cube is its own purpose-built storage -layer: Cube Store. - -Alternatively, you can store pre-aggregations **internally** in the source -database. To store a pre-aggregation internally, set `external: false` in -pre-aggregation definition. - -Please note, that [original_sql][ref-original-sql] pre-aggregations are stored -**internally** by default. It is not recommended to store `original_sql` -pre-aggregations in Cube Store. - -## Joins between pre-aggregations - - - -This feature is in Preview and the API may change in a future release. Joining -pre-aggregations **only** works with databases of the same type, support for -joining pre-aggregations from different databases is coming soon. - - - -When making a query that joins data from two different cubes, Cube can use -pre-aggregations instead of running the base SQL queries. To get started, first -ensure both cubes have valid pre-aggregations: - - - -```yaml -cubes: - - name: orders - # ... - - pre_aggregations: - - name: orders_rollup - measures: - - CUBE.count - dimensions: - - CUBE.user_id - - CUBE.status - time_dimension: CUBE.created_at - granularity: day - - joins: - - name: users - sql: "{CUBE.user_id} = ${users.id}" - relationship: many_to_one - - - name: users - # ... - - pre_aggregations: - - name: users_rollup - dimensions: - - CUBE.id - - CUBE.name -``` - -```javascript -cube(`orders`, { - // ... - - pre_aggregations: { - orders_rollup: { - measures: [CUBE.count], - dimensions: [CUBE.user_id, CUBE.status], - time_dimension: CUBE.created_at, - granularity: `day`, - }, - }, - - joins: { - users: { - sql: `${CUBE.user_id} = ${users.id}`, - relationship: `many_to_one` - }, - }, -}); - -cube(`users`, { - // ... - - pre_aggregations: { - users_rollup: { - dimensions: [CUBE.id, CUBE.name], - }, - }, -}); -``` - - - -Before we continue, let's add an index to the `orders_rollup` pre-aggregation so -that the `rollup_join` pre-aggregation can work correctly: - - - -```yaml -cubes: - - name: orders - # ... - - pre_aggregations: - - name: orders_rollup - # ... - - indexes: - - name: user_index - columns: - - CUBE.user_id -``` - -```javascript -cube(`orders`, { - // ... - - pre_aggregations: { - orders_rollup: { - // ... - - indexes: { - user_index: { - columns: [CUBE.user_id], - }, - }, - }, - }, -}); -``` - - - -Now we can add a new pre-aggregation of type `rollup_join` to the `orders` cube: - - - -```yaml -cubes: - - name: orders - # ... - - pre_aggregations: - # ... - - - name: orders_with_users_rollup - type: rollup_join - measures: - - CUBE.count - dimensions: - - users.name - time_dimension: CUBE.created_at - granularity: day - rollups: - - users.users_rollup - - CUBE.orders_rollup -``` - -```javascript -cube(`orders`, { - // ... - - pre_aggregations: { - // ... - - orders_with_users_rollup: { - type: `rollup_join`, - measures: [CUBE.count], - dimensions: [users.name], - time_dimension: CUBE.created_at, - granularity: `day`, - rollups: [users.users_rollup, CUBE.orders_rollup], - }, - }, -}); -``` - - - -With all of the above set up, making a query such as the following will now use -`orders.orders_rollup` and `users.users_rollup`, avoiding a database request: - -```json -{ - "dimensions": ["users.name"], - "timeDimensions": [ - { - "dimension": "orders.created_at", - "dateRange": "This month" - } - ], - "order": { - "orders.count": "desc" - }, - "measures": ["orders.count"] -} -``` - -## Pre-Aggregation Build Strategies - - - -For ideal performance, pre-aggregations should be built using a dedicated -Refresh Worker. [See here for more details][ref-prod-list-refresh]. - - - -Cube supports three different strategies for building pre-aggregations. To see -which strategies your database supports, please refer to its individual page -from [Connecting to the Database][ref-config-db]. - -### <--{"id" : "Pre-Aggregation Build Strategies"}--> Simple - -When using the simple strategy, Cube will use the source database as a temporary -staging area for writing pre-aggregations to determine column types. The data is -loaded back into memory before writing them to Cube Store (or an external -database). - - - -For larger datasets, we strongly recommend using the [Batching][self-batching] -or [Export Bucket][self-export-bucket] strategies instead. - - - -
- Internal vs External vs External with Cube Store diagram -
- -### <--{"id" : "Pre-Aggregation Build Strategies"}--> Batching - -Batching is a more performant strategy where Cube sends compressed CSVs for Cube -Store to ingest. - -
- Internal vs External vs External with Cube Store diagram -
- -The performance scales to the amount of memory available on the Cube instance. -Batching is automatically enabled for any databases that can support it. - -### <--{"id" : "Pre-Aggregation Build Strategies"}--> Export bucket - - - -The export bucket strategy requires permission to execute `CREATE TABLE` -statements in the data source as part of the pre-aggregation build process. - - - - - -Do not confuse the export bucket with the Cube Store storage bucket. -Those are two separate storages and should never be mixed. - - - -When dealing with larger pre-aggregations (more than 100k rows), performance can -be significantly improved by using an export bucket. This allows the source -database to temporarily materialize the data locally, which is then loaded into -Cube Store in parallel: - -
- Internal vs External vs External with Cube Store diagram -
- -Enabling the export bucket functionality requires extra configuration; please -refer to the database-specific documentation for more details: - -- [AWS Athena][ref-connect-db-athena] -- [AWS Redshift][ref-connect-db-redshift] -- [BigQuery][ref-connect-db-bigquery] -- [Snowflake][ref-connect-db-snowflake] - -When using cloud storage, it is important to correctly configure any data -retention policies to clean up the data in the export bucket as Cube does not -currently manage this. For most use-cases, 1 day is sufficient. - -## Streaming pre-aggregations - -Streaming pre-aggregations are different from traditional pre-aggregations in -the way they are being updated. Traditional pre-aggregations follow the “pull” -model — Cube **pulls updates** from the data source based on some cadence and/or -condition. Streaming pre-aggregations follow the “push” model — Cube -**subscribes to the updates** from the data source and always keeps -pre-aggregation up to date. - -You don’t need to define `refresh_key` for streaming pre-aggregations. Whether -pre-aggregation is streaming or not is defined by the data source. - -Currently, Cube supports only one streaming data source - -[ksqlDB](/config/databases/ksqldb). All pre-aggregations where data source is -ksqlDB are streaming. - -We are working on supporting more data sources for streaming pre-aggregations, -please [let us know](https://cube.dev/contact) if you are interested in early -access to any of these drivers or would like Cube to support any other SQL -streaming engine. - -[ref-caching-in-mem-default-refresh-key]: /caching#default-refresh-keys -[ref-config-db]: /config/databases -[ref-config-driverfactory]: /config#driver-factory -[ref-config-env-rolluponly]: /reference/environment-variables#cubejs-rollup-only -[ref-config-extdriverfactory]: /config#external-driver-factory -[ref-connect-db-athena]: /config/databases/aws-athena -[ref-connect-db-redshift]: /config/databases/aws-redshift -[ref-connect-db-bigquery]: /config/databases/google-bigquery -[ref-connect-db-mysql]: /config/databases/mysql -[ref-connect-db-postgres]: /config/databases/postgres -[ref-connect-db-snowflake]: /config/databases/snowflake -[ref-schema-timedimension]: /types-and-formats#types-time -[ref-schema-ref-preaggs]: /schema/reference/pre-aggregations -[ref-schema-ref-preaggs-index]: /schema/reference/pre-aggregations#indexes -[ref-schema-ref-preaggs-refresh-key]: - /schema/reference/pre-aggregations#refresh-key -[ref-schema-ref-preaggs-refresh-key-every]: - /schema/reference/pre-aggregations#refresh-key-every -[ref-schema-ref-preaggs-refresh-key-sql]: - /schema/reference/pre-aggregations#refresh-key-sql -[ref-deploy-refresh-wrkr]: /deployment/overview#refresh-worker -[ref-schema-ref-preaggs-sched-refresh]: - /schema/reference/pre-aggregations#scheduled-refresh -[ref-prod-list-refresh]: /deployment/production-checklist#set-up-refresh-worker -[ref-original-sql]: - /schema/reference/pre-aggregations#parameters-type-originalsql -[self-batching]: #batching -[self-export-bucket]: #export-bucket -[wiki-partitioning]: https://en.wikipedia.org/wiki/Partition_(database) diff --git a/docs/content/Configuration/Advanced/Multiple-Data-Sources.mdx b/docs/content/Configuration/Advanced/Multiple-Data-Sources.mdx deleted file mode 100644 index b7956480fc01d..0000000000000 --- a/docs/content/Configuration/Advanced/Multiple-Data-Sources.mdx +++ /dev/null @@ -1,112 +0,0 @@ ---- -title: Multiple data sources -permalink: /config/multiple-data-sources -category: Configuration -subCategory: Advanced -menuOrder: 2 ---- - -Cube supports connecting to multiple data sources, so that different -[cubes](/schema/reference/cube) reference data from different databases. - -Usually, data sources are configured **statically** (see below). However, Cube -can also lookup data sources **dynamically** which is useful in complex -scenarios involving [multitenancy][ref-config-multitenancy]. - -## Environment variables - -Declare the list of data sources using the `CUBEJS_DATASOURCES` environment -variable, then use -[decorated environment variables](#decorated-environment-variables) to configure -each data source: - -```dotenv -CUBEJS_DATASOURCES=default,datasource1 -CUBEJS_DB_TYPE=postgres -CUBEJS_DB_NAME=ecom -CUBEJS_DB_HOST=localhost -CUBEJS_DS_DATASOURCE1_DB_TYPE=postgres -CUBEJS_DS_DATASOURCE1_DB_NAME=ecom -CUBEJS_DS_DATASOURCE1_DB_HOST=remotehost -``` - - - -Cube expects that the `default` data source is **always** defined. Ensure that -`CUBEJS_DB_*` environment variables are set **or** that the `default` data -source is defined using [`driverFactory`] [ref-config-ref-driverfactory] in your -`cube.js` file. - - - -### Decorated environment variables - -Cube allows database-specific environment variables to be decorated with a data -source name: - -```dotenv -CUBEJS_[DS__] -``` - -For example, using the `datasource1` data source, `CUBEJS_DB_TYPE` could be -decorated as: - -```dotenv -CUBEJS_DS_DATASOURCE1_DB_TYPE=postgres -``` - -For more information on environment variables that support decoration, check the -[environment variables reference][ref-config-ref-env] or [database-specific -pages][ref-config-db]. - -## Data model - -Use the [`data_source`](/schema/reference/cube#parameters-data-source) property -to set a data source for each cube: - - - -```yaml -cubes: - - name: orders - # ... - - data_source: default - - - name: orders_from_other_data_source - # ... - - data_source: other_data_source -``` - -```javascript -cube(`orders`, { - // ... - - data_source: `default`, -}); - -cube(`orders_from_other_data_source`, { - // ... - - data_source: `other_data_source`, -}); -``` - - - -[ref-config-ref-env]: /reference/environment-variables -[ref-config-ref-driverfactory]: /config#options-reference-driver-factory -[ref-config-db]: /config/databases -[ref-config-multitenancy]: - /config/multitenancy#multitenancy-multitenancy-vs-multiple-data-sources - -## Cube Cloud - -Follow these steps to connect to multiple data sources in Cube Cloud: - -- Set up the `default` database connection when creating a new deployment. -- Ensure you have the correct [multitenancy](/config/multitenancy) configuration - in your `cube.js` file. -- Configure the corresponding environment variables in Settings → - Environment variables. diff --git a/docs/content/Configuration/Advanced/Multitenancy.mdx b/docs/content/Configuration/Advanced/Multitenancy.mdx deleted file mode 100644 index d7943a37cbffc..0000000000000 --- a/docs/content/Configuration/Advanced/Multitenancy.mdx +++ /dev/null @@ -1,391 +0,0 @@ ---- -title: Multitenancy -permalink: /config/multitenancy -category: Configuration -subCategory: Advanced -menuOrder: 3 ---- - -Cube supports multitenancy out of the box, both on database and data model -levels. Multiple drivers are also supported, meaning that you can have one -customer’s data in MongoDB and others in Postgres with one Cube instance. - -There are 6 [configuration options][ref-config-opts] you can leverage to make -your multitenancy setup. You can use all of them or just a couple, depending on -your specific case. The options are: - -- `contextToAppId` -- `contextToOrchestratorId` -- `driverFactory` -- `repositoryFactory` -- `preAggregationsSchema` -- `queryRewrite` - -All of the above options are functions, which you provide to Cube in the -[`cube.js` configuration file][ref-config]. The functions accept one argument - -a context object, which has a [`securityContext`][ref-config-security-ctx] -property where you can provide all the necessary data to identify a user e.g., -organization, app, etc. By default, the -[`securityContext`][ref-config-security-ctx] is defined by [Cube API -Token][ref-security]. - -There are several multitenancy setup scenarios that can be achieved by using -combinations of these configuration options. - -### <--{"id" : "Multitenancy"}--> Multitenancy vs Multiple Data Sources - -In cases where your Cube data model is spread across multiple different data -sources, consider using the [`data_source` cube property][ref-cube-datasource] -instead of multitenancy. Multitenancy is designed for cases where you need to -serve different datasets for multiple users, or tenants which aren't related to -each other. - -On the other hand, multitenancy can be used for scenarios where users need to -access the same data but from different databases. The multitenancy and multiple -data sources features aren't mutually exclusive and can be used together. - - - -A `default` data source **must** exist and be configured. It is used to resolve -target query data source for now. This behavior **will** be changed in future -releases. - - - -A simple configuration with two data sources might look like: - -**cube.js:** - -```javascript -module.exports = { - driverFactory: ({ dataSource } = {}) => { - if (dataSource === 'db1') { - return { - type: 'postgres', - database: process.env.DB1_NAME, - host: process.env.DB1_HOST, - user: process.env.DB1_USER, - password: process.env.DB1_PASS, - port: process.env.DB1_PORT, - }; - } else { - return { - type: 'postgres', - database: process.env.DB2_NAME, - host: process.env.DB2_HOST, - user: process.env.DB2_USER, - password: process.env.DB2_PASS, - port: process.env.DB2_PORT, - }; - } - }, -}; -``` - -A more advanced example that uses multiple [data sources][ref-config-db] could -look like: - -**cube.js:** - -```javascript -module.exports = { - driverFactory: ({ dataSource } = {}) => { - if (dataSource === 'web') { - return { - type: 'athena', - database: dataSource, - - // ... - }; - } else if (dataSource === 'googleAnalytics') { - return { - type: 'bigquery', - - // ... - }; - } else if (dataSource === 'financials') { - return { - type: 'postgres', - database: 'financials', - host: 'financials-db.acme.com', - user: process.env.FINANCIALS_DB_USER, - password: process.env.FINANCIALS_DB_PASS, - }; - } else { - return { - type: 'postgres', - - // ... - }; - } - }, -}; -``` - -More information can be found on the [Multiple Data Sources -page][ref-config-multi-data-src]. - -### <--{"id" : "Multitenancy"}--> queryRewrite vs Multitenant Compile Context - -As a rule of thumb, the [`queryRewrite`][ref-config-query-rewrite] should be -used in scenarios when you want to define row-level security within the same -database for different users of such database. For example, to separate access -of two e-commerce administrators who work on different product categories within -the same e-commerce store, you could configure your project as follows. - -Use the following `cube.js` configuration file: - -```javascript -module.exports = { - queryRewrite: (query, { securityContext }) => { - if (securityContext.categoryId) { - query.filters.push({ - member: 'products.category_id', - operator: 'equals', - values: [securityContext.categoryId], - }); - } - return query; - }, -}; -``` - -Also, you can use a data model like this: - - - -```yaml -cubes: - - name: products - sql_table: products -``` - -```javascript -cube(`products`, { - sql_table: `products`, -}); -``` - - - -On the other hand, multi-tenant [`COMPILE_CONTEXT`][ref-cube-security-ctx] -should be used when users need access to different databases. For example, if -you provide SaaS ecommerce hosting and each of your customers have a separate -database, then each e-commerce store should be modeled as a separate tenant. - - - -```yaml -cubes: - - name: products - sql_table: "{COMPILE_CONTEXT.security_context.userId}.products" -``` - -```javascript -cube(`products`, { - sql_table: `${COMPILE_CONTEXT.security_context.userId}.products` -}); -``` - - - -### <--{"id" : "Multitenancy"}--> Running in Production - -Each unique id generated by `contextToAppId` or `contextToOrchestratorId` will -generate a dedicated set of resources, including data model compile cache, SQL -compile cache, query queues, in-memory result caching, etc. Depending on your -data model complexity and usage patterns, those resources can have a pretty -sizable memory footprint ranging from single-digit MBs on the lower end and -dozens of MBs on the higher end. So you should make sure Node VM has enough -memory reserved for that. - -There're multiple strategies in terms of memory resource utilization here. The -first one is to bucket your actual tenants into variable-size buckets with -assigned `contextToAppId` or `contextToOrchestratorId` by some bucketing rule. -For example, you can bucket your biggest tenants in separate buckets and all the -smaller ones into a single bucket. This way, you'll end up with a very small -count of buckets that will easily fit a single node. - -Another strategy is to split all your tenants between different Cube nodes and -route traffic between them so that each Cube API node serves only its own set of -tenants and never serves traffic for another node. In that case, memory usage is -limited by the number of tenants served by each node. Cube Cloud utilizes -precisely this approach for scaling. Please note that in this case, you should -also split refresh workers and assign appropriate `scheduledRefreshContexts` to -them. - -## Same DB Instance with per Tenant Row Level Security - -Per tenant row-level security can be achieved by configuring -[`queryRewrite`][ref-config-query-rewrite], which adds a tenant identifier -filter to the original query. It uses the -[`securityContext`][ref-config-security-ctx] to determine which tenant is -requesting data. This way, every tenant starts to see their own data. However, -resources such as query queue and pre-aggregations are shared between all -tenants. - -**cube.js:** - -```javascript -module.exports = { - queryRewrite: (query, { securityContext }) => { - const user = securityContext; - if (user.id) { - query.filters.push({ - member: 'users.id', - operator: 'equals', - values: [user.id], - }); - } - return query; - }, -}; -``` - -## Multiple DB Instances with Same Data Model - -Let's consider an example where we store data for different users in different -databases, but on the same Postgres host. The database name format is -`my_app__`, so `my_app_1_2` is a valid database name. - -To make it work with Cube, first we need to pass the `appId` and `userId` as -context to every query. We should first ensure our JWTs contain those properties -so we can access them through the [security context][ref-config-security-ctx]. - -```javascript -const jwt = require('jsonwebtoken'); -const CUBE_API_SECRET = 'secret'; - -const cubejsToken = jwt.sign({ appId: '1', userId: '2' }, CUBE_API_SECRET, { - expiresIn: '30d', -}); -``` - -Now, we can access them through the [`securityContext`][ref-config-security-ctx] -property inside the context object. Let's use -[`contextToAppId`][ref-config-ctx-to-appid] and -[`contextToOrchestratorId`][ref-config-ctx-to-orch-id] to create a dynamic Cube -App ID and Orchestrator ID for every combination of `appId` and `userId`, as -well as defining [`driverFactory`][ref-config-driverfactory] to dynamically -select the database, based on the `appId` and `userId`: - - - -The App ID (the result of [`contextToAppId`][ref-config-ctx-to-appid]) is used -as a caching key for various in-memory structures like data model compilation -results, connection pool. The Orchestrator ID (the result of -[`contextToOrchestratorId`][ref-config-ctx-to-orch-id]) is used as a caching key -for database connections, execution queues and pre-aggregation table caches. Not -declaring these properties will result in unexpected caching issues such as the -data model or data of one tenant being used for another. - - - -**cube.js:** - -```javascript -module.exports = { - contextToAppId: ({ securityContext }) => - `CUBEJS_APP_${securityContext.appId}_${securityContext.userId}`, - contextToOrchestratorId: ({ securityContext }) => - `CUBEJS_APP_${securityContext.appId}_${securityContext.userId}`, - driverFactory: ({ securityContext }) => ({ - type: 'postgres', - database: `my_app_${securityContext.appId}_${securityContext.userId}`, - }), -}; -``` - -## Same DB Instance with per Tenant Pre-Aggregations - -To support per-tenant pre-aggregation of data within the same database instance, -you should configure the [`preAggregationsSchema`][ref-config-preagg-schema] -option in your `cube.js` configuration file. You should use also -[`securityContext`][ref-config-security-ctx] to determine which tenant is -requesting data. - -**cube.js:** - -```javascript -module.exports = { - contextToAppId: ({ securityContext }) => - `CUBEJS_APP_${securityContext.userId}`, - preAggregationsSchema: ({ securityContext }) => - `pre_aggregations_${securityContext.userId}`, -}; -``` - -## Multiple Data Models and Drivers - -What if for application with ID 3, the data is stored not in Postgres, but in -MongoDB? - -We can instruct Cube to connect to MongoDB in that case, instead of Postgres. To -do this, we'll use the [`driverFactory`][ref-config-driverfactory] option to -dynamically set database type. We will also need to modify our -[`securityContext`][ref-config-security-ctx] to determine which tenant is -requesting data. Finally, we want to have separate data models for every -application. We can use the [`repositoryFactory`][ref-config-repofactory] option -to dynamically set a repository with data model files depending on the `appId`: - -**cube.js:** - -```javascript -const { FileRepository } = require('@cubejs-backend/server-core'); - -module.exports = { - contextToAppId: ({ securityContext }) => - `CUBEJS_APP_${securityContext.appId}_${securityContext.userId}`, - contextToOrchestratorId: ({ securityContext }) => - `CUBEJS_APP_${securityContext.appId}_${securityContext.userId}`, - driverFactory: ({ securityContext }) => { - if (securityContext.appId === 3) { - return { - type: 'mongobi', - database: `my_app_${securityContext.appId}_${securityContext.userId}`, - port: 3307, - }; - } else { - return { - type: 'postgres', - database: `my_app_${securityContext.appId}_${securityContext.userId}`, - }; - } - }, - repositoryFactory: ({ securityContext }) => - new FileRepository(`model/${securityContext.appId}`), -}; -``` - -## Scheduled Refreshes for Pre-Aggregations - -If you need scheduled refreshes for your pre-aggregations in a multi-tenant -deployment, ensure you have configured -[`scheduledRefreshContexts`][ref-config-refresh-ctx] correctly. You may also -need to configure [`scheduledRefreshTimeZones`][ref-config-refresh-tz]. - - - -Leaving [`scheduledRefreshContexts`][ref-config-refresh-ctx] unconfigured will -lead to issues where the security context will be `undefined`. This is because -there is no way for Cube to know how to generate a context without the required -input. - - - -[ref-config]: /config -[ref-config-opts]: /config#options-reference -[ref-config-db]: /config/databases -[ref-config-driverfactory]: /config#driver-factory -[ref-config-repofactory]: /config#repository-factory -[ref-config-preagg-schema]: /config#pre-aggregations-schema -[ref-config-ctx-to-appid]: /config#context-to-app-id -[ref-config-ctx-to-orch-id]: /config#context-to-orchestrator-id -[ref-config-multi-data-src]: /config/multiple-data-sources -[ref-config-query-rewrite]: /config#query-rewrite -[ref-config-refresh-ctx]: /config#scheduled-refresh-contexts -[ref-config-refresh-tz]: /config#scheduled-refresh-time-zones -[ref-config-security-ctx]: /config#security-context -[ref-security]: /security -[ref-cube-datasource]: /schema/reference/cube#data-source -[ref-cube-security-ctx]: /schema/reference/cube#security-context diff --git a/docs/content/Configuration/Connecting-to-Downstream-Tools.mdx b/docs/content/Configuration/Connecting-to-Downstream-Tools.mdx deleted file mode 100644 index 865082ca08fae..0000000000000 --- a/docs/content/Configuration/Connecting-to-Downstream-Tools.mdx +++ /dev/null @@ -1,169 +0,0 @@ ---- -title: Connecting to visualization tools -permalink: /config/downstream -category: Configuration -menuOrder: 3 ---- - -Choose a tool to get started with below. - - - -If you'd like to connect to a tool which is not yet supported or present on -this page, please [file an issue](https://github.com/cube-js/cube/issues) on GitHub. - - - -## BI & data exploration tools - - - - - - - - - - - - - -You can learn more about SQL API on the [reference page](/backend/sql), -including how to connect to other BIs or visualization tools not listed here. - -## Notebooks - - - - - - - - - -## Custom data experiences - -The following tools deliver data insights closer to end users, e.g., by -providing a conversational interface for semantic layer: - - - - - -## Low-code tools & internal tool builders - - - - - - - - -You can also find relevant -step-by-step guides in the blog. - -## Frontend integrations - -Cube provides integration libraries for popular front-end frameworks: - - - - - - - -## APIs references - -All integrations above are powered by the following APIs. If you're a data -engineer, please explore the SQL API. If you're an application developer, check -out REST and GraphQL APIs. - - - - - - diff --git a/docs/content/Configuration/Connecting-to-the-Database.mdx b/docs/content/Configuration/Connecting-to-the-Database.mdx deleted file mode 100644 index 331890cfc41f0..0000000000000 --- a/docs/content/Configuration/Connecting-to-the-Database.mdx +++ /dev/null @@ -1,183 +0,0 @@ ---- -title: Connecting to data sources -permalink: /config/databases -category: Configuration -menuOrder: 2 -redirect_from: - - /connecting-to-the-database ---- - -Choose a data store to get started with below. - -Note that Cube also supports connecting to [multiple data -stores][ref-config-multi-data-src] out of the box. - - - - If you'd like to connect to a data store which is not yet listed on this page, - please [file an issue](https://github.com/cube-js/cube/issues) on - GitHub. You can also contribute a driver, see this [step-by-step guide](https://github.com/cube-js/cube/blob/master/CONTRIBUTING.md#implementing-driver). - - - -## Data Warehouses - - - - - - - - - - -## Query Engines - - - - - - - - -## Transactional Databases - - - - - - - - - -## Time Series Databases - - - - - -## Streaming - - - - - - -## NoSQL & Other Data Sources - - - - - - - -## Driver Support - -Most of the drivers for data sources are supported either directly by the Cube -team or by their vendors. The rest are community-supported and will be -highlighted as such in their respective pages. - -## Third-party Drivers - -- [CosmosDB driver](https://www.npmjs.com/package/cosmosdb-cubejs-driver) -- [SAP Hana driver](https://www.npmjs.com/package/cubejs-hana-driver) -- [ArangoDB driver](https://www.npmjs.com/package/arangodb-cubejs-driver) -- [OpenDistro Elastic driver](https://www.npmjs.com/package/opendistro-cubejs-driver) -- [Mydremio driver](https://www.npmjs.com/package/mydremio-cubejs-driver) -- [Trino driver](https://www.npmjs.com/package/trino-cubejs-driver) -- [Cratedb driver](https://www.npmjs.com/package/cratedb-cubejs-driver) -- [Dremio ODBC driver](https://www.npmjs.com/package/dremio-odbc-cubejs-driver) - -You're more than welcome to contribute new drivers as well as new features and -patches to -[existing drivers](https://github.com/cube-js/cube/tree/master/packages). Please -check the -[contribution guidelines](https://github.com/cube-js/cube/blob/master/CONTRIBUTING.md#contributing-database-drivers) -and join the `#contributors` channel in our -[Slack community](https://slack.cube.dev). - -[ref-config-multi-data-src]: /config/multiple-data-sources diff --git a/docs/content/Configuration/Databases/AWS-Athena.mdx b/docs/content/Configuration/Databases/AWS-Athena.mdx deleted file mode 100644 index babc50d631c64..0000000000000 --- a/docs/content/Configuration/Databases/AWS-Athena.mdx +++ /dev/null @@ -1,161 +0,0 @@ ---- -title: AWS Athena -permalink: /config/databases/aws-athena ---- - -## Prerequisites - -- [A set of IAM credentials][aws-docs-athena-access] which allow access to [AWS - Athena][aws-athena] -- [The AWS region][aws-docs-regions] -- [The S3 bucket][aws-s3] on AWS to [store query results][aws-docs-athena-query] - -## Setup - -### <--{"id" : "Setup"}--> Manual - -Add the following to a `.env` file in your Cube project: - -```dotenv -CUBEJS_DB_TYPE=athena -CUBEJS_AWS_KEY=AKIA************ -CUBEJS_AWS_SECRET=**************************************** -CUBEJS_AWS_REGION=us-east-1 -CUBEJS_AWS_S3_OUTPUT_LOCATION=s3://my-athena-output-bucket -CUBEJS_AWS_ATHENA_WORKGROUP=primary -CUBEJS_AWS_ATHENA_CATALOG=AwsDataCatalog -``` - -### <--{"id" : "Setup"}--> Cube Cloud - - - -In some cases you'll need to allow connections from your Cube Cloud deployment IP -address to your database. You can copy the IP address from either the Database -Setup step in deployment creation, or from Settings → -Configuration in your deployment. - - - -In Cube Cloud, select **AWS Athena** when creating a new deployment and fill in -the required fields: - - - -Cube Cloud also supports connecting to data sources within private VPCs. If you already have VPCs enabled in -your account, check out the [VPC documentation][ref-cloud-conf-vpc] to learn how -to get started. - - - -VPC connectivity is available in Cube Cloud on [Premium](https://cube.dev/pricing) tier. [Contact us][cube-contact] for details. - - - -[cube-pricing]: https://cube.dev/pricing/ -[cube-contact]: https://cube.dev/contact/ -[ref-cloud-conf-vpc]: /cloud/configuration/connecting-with-a-vpc - -## Environment Variables - -| Environment Variable | Description | Possible Values | Required | [Supports multiple data sources?][ref-config-multiple-ds-decorating-env] | -| ------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------ | :------: | :----------------------------------------------------------------------: | -| `CUBEJS_AWS_KEY` | The AWS Access Key ID to use for database connections | A valid AWS Access Key ID | ✅ | ✅ | -| `CUBEJS_AWS_SECRET` | The AWS Secret Access Key to use for database connections | A valid AWS Secret Access Key | ✅ | ✅ | -| `CUBEJS_AWS_REGION` | The AWS region of the Cube deployment | [A valid AWS region][aws-docs-regions] | ✅ | ✅ | -| `CUBEJS_AWS_S3_OUTPUT_LOCATION` | The S3 path to store query results made by the Cube deployment | A valid S3 path | ❌ | ✅ | -| `CUBEJS_AWS_ATHENA_WORKGROUP` | The name of the workgroup in which the query is being started | [A valid Athena Workgroup][aws-athena-workgroup] | ❌ | ✅ | -| `CUBEJS_AWS_ATHENA_CATALOG` | The name of the catalog to use by default | [A valid Athena Catalog name][awsdatacatalog] | ❌ | ✅ | -| `CUBEJS_DB_SCHEMA` | The name of the schema to use as `information_schema` filter. Reduces count of tables loaded during schema generation. | A valid schema name | ❌ | ✅ | -| `CUBEJS_CONCURRENCY` | The number of concurrent connections each queue has to the database. Default is `5` | A valid number | ❌ | ❌ | - -## Pre-Aggregation Feature Support - -### count_distinct_approx - -Measures of type -[`count_distinct_approx`][ref-schema-ref-types-formats-countdistinctapprox] can be -used in pre-aggregations when using AWS Athena as a source database. To learn -more about AWS Athena's support for approximate aggregate functions, [click -here][aws-athena-docs-approx-agg-fns]. - -## Pre-Aggregation Build Strategies - - - -To learn more about pre-aggregation build strategies, [head -here][ref-caching-using-preaggs-build-strats]. - - - -| Feature | Works with read-only mode? | Is default? | -| ------------- | :------------------------: | :---------: | -| Batching | ❌ | ✅ | -| Export Bucket | ❌ | ❌ | - -By default, AWS Athena uses a [batching][self-preaggs-batching] strategy to -build pre-aggregations. - -### Batching - -No extra configuration is required to configure batching for AWS Athena. - -### Export Bucket - - - -AWS Athena **only** supports using AWS S3 for export buckets. - - - -#### AWS S3 - -For [improved pre-aggregation performance with large -datasets][ref-caching-large-preaggs], enable export bucket functionality by -configuring Cube with the following environment variables: - - - -Ensure the AWS credentials are correctly configured in IAM to allow reads and -writes to the export bucket in S3. - - - -```dotenv -CUBEJS_DB_EXPORT_BUCKET_TYPE=s3 -CUBEJS_DB_EXPORT_BUCKET=my.bucket.on.s3 -CUBEJS_DB_EXPORT_BUCKET_AWS_KEY= -CUBEJS_DB_EXPORT_BUCKET_AWS_SECRET= -CUBEJS_DB_EXPORT_BUCKET_AWS_REGION= -``` - -## SSL - -Cube does not require any additional configuration to enable SSL as AWS Athena -connections are made over HTTPS. - -[aws-athena]: https://aws.amazon.com/athena -[aws-athena-workgroup]: - https://docs.aws.amazon.com/athena/latest/ug/workgroups-benefits.html -[awsdatacatalog]: - https://docs.aws.amazon.com/athena/latest/ug/understanding-tables-databases-and-the-data-catalog.html -[aws-s3]: https://aws.amazon.com/s3/ -[aws-docs-athena-access]: - https://docs.aws.amazon.com/athena/latest/ug/security-iam-athena.html -[aws-docs-athena-query]: - https://docs.aws.amazon.com/athena/latest/ug/querying.html -[aws-docs-regions]: - https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions -[aws-athena-docs-approx-agg-fns]: - https://prestodb.io/docs/0.217/functions/aggregate.html#approximate-aggregate-functions -[ref-caching-large-preaggs]: /caching/using-pre-aggregations#export-bucket -[ref-caching-using-preaggs-build-strats]: - /caching/using-pre-aggregations#pre-aggregation-build-strategies -[ref-config-multiple-ds-decorating-env]: - /config/multiple-data-sources#configuring-data-sources-with-environment-variables-decorated-environment-variables -[ref-schema-ref-types-formats-countdistinctapprox]: - /schema/reference/types-and-formats#count-distinct-approx -[self-preaggs-batching]: #batching diff --git a/docs/content/Configuration/Databases/AWS-Redshift.mdx b/docs/content/Configuration/Databases/AWS-Redshift.mdx deleted file mode 100644 index 35187bdb4a6e2..0000000000000 --- a/docs/content/Configuration/Databases/AWS-Redshift.mdx +++ /dev/null @@ -1,161 +0,0 @@ ---- -title: AWS Redshift -permalink: /config/databases/aws-redshift ---- - -## Prerequisites - - - -If the cluster is configured within a [VPC][aws-vpc], then Cube **must** have a -network route to the cluster. - - - -- The [hostname][aws-redshift-docs-connection-string] for the [AWS - Redshift][aws-redshift] cluster -- The [username/password][aws-redshift-docs-users] for the [AWS - Redshift][aws-redshift] cluster -- The name of the database to use within the [AWS Redshift][aws-redshift] - cluster - -## Setup - -### <--{"id" : "Setup"}--> Manual - -Add the following to a `.env` file in your Cube project: - -```dotenv -CUBEJS_DB_TYPE=redshift -CUBEJS_DB_HOST=my-redshift-cluster.cfbs3dkw1io8.eu-west-1.redshift.amazonaws.com -CUBEJS_DB_NAME=my_redshift_database -CUBEJS_DB_USER=redshift_user -CUBEJS_DB_PASS=********** -``` - -### <--{"id" : "Setup"}--> Cube Cloud - - - -In some cases you'll need to allow connections from your Cube Cloud deployment IP -address to your database. You can copy the IP address from either the Database -Setup step in deployment creation, or from Settings → -Configuration in your deployment. - - - -The following fields are required when creating an AWS Redshift connection: - - - -Cube Cloud also supports connecting to data sources within private VPCs. If you already have VPCs enabled in -your account, check out the [VPC documentation][ref-cloud-conf-vpc] to learn how -to get started. - - - -VPC connectivity is available in Cube Cloud on [Premium](https://cube.dev/pricing) tier. [Contact us][cube-contact] for details. - - - -[cube-pricing]: https://cube.dev/pricing/ -[cube-contact]: https://cube.dev/contact/ -[ref-cloud-conf-vpc]: /cloud/configuration/connecting-with-a-vpc - -## Environment Variables - -| Environment Variable | Description | Possible Values | Required | [Supports multiple data sources?][ref-config-multiple-ds-decorating-env] | -| -------------------------------------- | ----------------------------------------------------------------------------------- | ------------------------- | :------: | :----------------------------------------------------------------------: | -| `CUBEJS_DB_HOST` | The host URL for a database | A valid database host URL | ✅ | ✅ | -| `CUBEJS_DB_PORT` | The port for the database connection | A valid port number | ❌ | ✅ | -| `CUBEJS_DB_NAME` | The name of the database to connect to | A valid database name | ✅ | ✅ | -| `CUBEJS_DB_USER` | The username used to connect to the database | A valid database username | ✅ | ✅ | -| `CUBEJS_DB_PASS` | The password used to connect to the database | A valid database password | ✅ | ✅ | -| `CUBEJS_DB_SSL` | If `true`, enables SSL encryption for database connections from Cube | `true`, `false` | ❌ | ✅ | -| `CUBEJS_CONCURRENCY` | The number of concurrent connections each queue has to the database. Default is `4` | A valid number | ❌ | ❌ | -| `CUBEJS_DB_MAX_POOL` | The maximum number of concurrent database connections to pool. Default is `16` | A valid number | ❌ | ✅ | -| `CUBEJS_DB_EXPORT_BUCKET_REDSHIFT_ARN` | | | ❌ | ✅ | - -## Pre-Aggregation Feature Support - -### count_distinct_approx - -Measures of type -[`count_distinct_approx`][ref-schema-ref-types-formats-countdistinctapprox] can -not be used in pre-aggregations when using AWS Redshift as a source database. - -## Pre-Aggregation Build Strategies - - - -To learn more about pre-aggregation build strategies, [head -here][ref-caching-using-preaggs-build-strats]. - - - -| Feature | Works with read-only mode? | Is default? | -| ------------- | :------------------------: | :---------: | -| Batching | ❌ | ✅ | -| Export Bucket | ❌ | ❌ | - -By default, AWS Redshift uses [batching][self-preaggs-batching] to build -pre-aggregations. - -### Batching - -No extra configuration is required to configure batching for AWS Redshift. - -### Export bucket - - - -AWS Redshift **only** supports using AWS S3 for export buckets. - - - -#### AWS S3 - -For [improved pre-aggregation performance with large -datasets][ref-caching-large-preaggs], enable export bucket functionality by -configuring Cube with the following environment variables: - - - -Ensure the AWS credentials are correctly configured in IAM to allow reads and -writes to the export bucket in S3. - - - -```dotenv -CUBEJS_DB_EXPORT_BUCKET_TYPE=s3 -CUBEJS_DB_EXPORT_BUCKET=my.bucket.on.s3 -CUBEJS_DB_EXPORT_BUCKET_AWS_KEY= -CUBEJS_DB_EXPORT_BUCKET_AWS_SECRET= -CUBEJS_DB_EXPORT_BUCKET_AWS_REGION= -``` - -## SSL - -To enable SSL-encrypted connections between Cube and AWS Redshift, set the -`CUBEJS_DB_SSL` environment variable to `true`. For more information on how to -configure custom certificates, please check out [Enable SSL Connections to the -Database][ref-recipe-enable-ssl]. - -[aws-redshift-docs-connection-string]: - https://docs.aws.amazon.com/redshift/latest/mgmt/configuring-connections.html#connecting-drivers -[aws-redshift-docs-users]: - https://docs.aws.amazon.com/redshift/latest/dg/r_Users.html -[aws-redshift]: https://aws.amazon.com/redshift/ -[aws-vpc]: https://aws.amazon.com/vpc/ -[ref-caching-large-preaggs]: /caching/using-pre-aggregations#export-bucket -[ref-caching-using-preaggs-build-strats]: - /caching/using-pre-aggregations#pre-aggregation-build-strategies -[ref-config-multiple-ds-decorating-env]: - /config/multiple-data-sources#configuring-data-sources-with-environment-variables-decorated-environment-variables -[ref-recipe-enable-ssl]: /recipes/enable-ssl-connections-to-database -[ref-schema-ref-types-formats-countdistinctapprox]: - /schema/reference/types-and-formats#count-distinct-approx -[self-preaggs-batching]: #batching diff --git a/docs/content/Configuration/Databases/ClickHouse.mdx b/docs/content/Configuration/Databases/ClickHouse.mdx deleted file mode 100644 index ab1d595e0377c..0000000000000 --- a/docs/content/Configuration/Databases/ClickHouse.mdx +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: ClickHouse -permalink: /config/databases/clickhouse ---- - -## Prerequisites - -- The hostname for the [ClickHouse][clickhouse] database server -- The [username/password][clickhouse-docs-users] for the - [ClickHouse][clickhouse] database server - -## Setup - -### <--{"id" : "Setup"}--> Manual - -Add the following to a `.env` file in your Cube project: - -```dotenv -CUBEJS_DB_TYPE=clickhouse -CUBEJS_DB_HOST=my.clickhouse.host -CUBEJS_DB_NAME=my_clickhouse_database -CUBEJS_DB_USER=clickhouse_user -CUBEJS_DB_PASS=********** -``` - -## Environment Variables - -| Environment Variable | Description | Possible Values | Required | [Supports multiple data sources?][ref-config-multiple-ds-decorating-env] | -| ------------------------------- | ----------------------------------------------------------------------------------- | ------------------------- | :------: | :----------------------------------------------------------------------: | -| `CUBEJS_DB_HOST` | The host URL for a database | A valid database host URL | ✅ | ✅ | -| `CUBEJS_DB_PORT` | The port for the database connection | A valid port number | ❌ | ✅ | -| `CUBEJS_DB_NAME` | The name of the database to connect to | A valid database name | ✅ | ✅ | -| `CUBEJS_DB_USER` | The username used to connect to the database | A valid database username | ✅ | ✅ | -| `CUBEJS_DB_PASS` | The password used to connect to the database | A valid database password | ✅ | ✅ | -| `CUBEJS_DB_CLICKHOUSE_READONLY` | Whether the ClickHouse user has read-only access or not | `true`, `false` | ❌ | ✅ | -| `CUBEJS_CONCURRENCY` | The number of concurrent connections each queue has to the database. Default is `5` | A valid number | ❌ | ❌ | -| `CUBEJS_DB_MAX_POOL` | The maximum number of concurrent database connections to pool. Default is `20` | A valid number | ❌ | ✅ | - -## Pre-Aggregation Feature Support - -### count_distinct_approx - -Measures of type -[`count_distinct_approx`][ref-schema-ref-types-formats-countdistinctapprox] can -not be used in pre-aggregations when using ClickHouse as a source database. - -## Pre-Aggregation Build Strategies - - - -To learn more about pre-aggregation build strategies, [head -here][ref-caching-using-preaggs-build-strats]. - - - -| Feature | Works with read-only mode? | Is default? | -| ------------- | :------------------------: | :---------: | -| Batching | ✅ | ✅ | -| Export Bucket | - | - | - -By default, ClickHouse uses [batching][self-preaggs-batching] to build -pre-aggregations. - -### Batching - -No extra configuration is required to configure batching for ClickHouse. - -### Export Bucket - -ClickHouse does not support export buckets. - -## SSL - -To enable SSL-encrypted connections between Cube and ClickHouse, set the -`CUBEJS_DB_SSL` environment variable to `true`. For more information on how to -configure custom certificates, please check out [Enable SSL Connections to the -Database][ref-recipe-enable-ssl]. - -## Additional Configuration - -You can connect to a ClickHouse database when your user's permissions are -[restricted][clickhouse-readonly] to read-only, by setting -`CUBEJS_DB_CLICKHOUSE_READONLY` to `true`. - -[clickhouse]: https://clickhouse.tech/ -[clickhouse-docs-users]: - https://clickhouse.tech/docs/en/operations/settings/settings-users/ -[clickhouse-readonly]: - https://clickhouse.tech/docs/en/operations/settings/permissions-for-queries/#settings_readonly -[ref-caching-using-preaggs-build-strats]: - /caching/using-pre-aggregations#pre-aggregation-build-strategies -[ref-config-multiple-ds-decorating-env]: - /config/multiple-data-sources#configuring-data-sources-with-environment-variables-decorated-environment-variables -[ref-recipe-enable-ssl]: /recipes/enable-ssl-connections-to-database -[ref-schema-ref-types-formats-countdistinctapprox]: - /schema/reference/types-and-formats#count-distinct-approx -[self-preaggs-batching]: #batching diff --git a/docs/content/Configuration/Databases/Databricks-JDBC.mdx b/docs/content/Configuration/Databases/Databricks-JDBC.mdx deleted file mode 100644 index da932538b27db..0000000000000 --- a/docs/content/Configuration/Databases/Databricks-JDBC.mdx +++ /dev/null @@ -1,167 +0,0 @@ ---- -title: Databricks JDBC -permalink: /config/databases/databricks/jdbc ---- - -## Prerequisites - -- [A JDK installation][gh-cubejs-jdbc-install] -- The [JDBC URL][databricks-docs-jdbc-url] for the [Databricks][databricks] - cluster - -## Setup - -### <--{"id" : "Setup"}--> Environment Variables - -Add the following to a `.env` file in your Cube project: - -```dotenv -CUBEJS_DB_TYPE=databricks-jdbc -# CUBEJS_DB_NAME is optional -CUBEJS_DB_NAME=default -# You can find this inside the cluster's configuration -CUBEJS_DB_DATABRICKS_URL=jdbc:databricks://dbc-XXXXXXX-XXXX.cloud.databricks.com:443/default;transportMode=http;ssl=1;httpPath=sql/protocolv1/o/XXXXX/XXXXX;AuthMech=3;UID=token -# You can specify the personal access token separately from `CUBEJS_DB_DATABRICKS_URL` by doing this: -CUBEJS_DB_DATABRICKS_TOKEN=XXXXX -# This accepts the Databricks usage policy and must be set to `true` to use the Databricks JDBC driver -CUBEJS_DB_DATABRICKS_ACCEPT_POLICY=true -``` - -### <--{"id" : "Setup"}--> Docker - -Create a `.env` file [as above](#setup-environment-variables), then extend the -`cubejs/cube:jdk` Docker image tag to build a Cube image with the JDBC driver: - -```dockerfile -FROM cubejs/cube:jdk - -COPY . . -RUN npm install -``` - -You can then build and run the image using the following commands: - -```bash{promptUser: user} -docker build -t cubejs-jdk . -docker run -it -p 4000:4000 --env-file=.env cubejs-jdk -``` - -## Environment Variables - -| Environment Variable | Description | Possible Values | Required | [Supports multiple data sources?][ref-config-multiple-ds-decorating-env] | -| ------------------------------------ | ----------------------------------------------------------------------------------------------- | --------------------- | :------: | :----------------------------------------------------------------------: | -| `CUBEJS_DB_NAME` | The name of the database to connect to | A valid database name | ✅ | ✅ | -| `CUBEJS_DB_DATABRICKS_URL` | The URL for a JDBC connection | A valid JDBC URL | ✅ | ✅ | -| `CUBEJS_DB_DATABRICKS_ACCEPT_POLICY` | Whether or not to accept the license terms for the Databricks JDBC driver | `true`, `false` | ✅ | ✅ | -| `CUBEJS_DB_DATABRICKS_TOKEN` | The [personal access token][databricks-docs-pat] used to authenticate the Databricks connection | A valid token | ✅ | ✅ | -| `CUBEJS_DB_EXPORT_BUCKET_MOUNT_DIR` | The path for the [Databricks DBFS mount][databricks-docs-dbfs] | A valid mount path | ❌ | ✅ | -| `CUBEJS_CONCURRENCY` | The number of concurrent connections each queue has to the database. Default is `2` | A valid number | ❌ | ❌ | -| `CUBEJS_DB_MAX_POOL` | The maximum number of concurrent database connections to pool. Default is `8` | A valid number | ❌ | ✅ | - -## Pre-Aggregation Feature Support - -### count_distinct_approx - -Measures of type -[`count_distinct_approx`][ref-schema-ref-types-formats-countdistinctapprox] can -not be used in pre-aggregations when using Databricks as a data source. - -## Pre-Aggregation Build Strategies - - - -To learn more about pre-aggregation build strategies, [head -here][ref-caching-using-preaggs-build-strats]. - - - -| Feature | Works with read-only mode? | Is default? | -| ------------- | :------------------------: | :---------: | -| Simple | ❌ | ✅ | -| Export Bucket | ❌ | ❌ | - -By default, Databricks JDBC uses a [simple][self-preaggs-simple] strategy to -build pre-aggregations. - -### Simple - -No extra configuration is required to configure simple pre-aggregation builds -for Databricks. - -### Export Bucket - -Databricks supports using both [AWS S3][aws-s3] and [Azure Blob -Storage][azure-bs] for export bucket functionality. - -#### AWS S3 - -To use AWS S3 as an export bucket, first complete [the Databricks guide on -mounting S3 buckets to Databricks DBFS][databricks-docs-dbfs-s3]. - - - -Ensure the AWS credentials are correctly configured in IAM to allow reads and -writes to the export bucket in S3. - - - -```dotenv -CUBEJS_DB_EXPORT_BUCKET_TYPE=s3 -CUBEJS_DB_EXPORT_BUCKET=s3://my.bucket.on.s3 -CUBEJS_DB_EXPORT_BUCKET_AWS_KEY= -CUBEJS_DB_EXPORT_BUCKET_AWS_SECRET= -CUBEJS_DB_EXPORT_BUCKET_AWS_REGION= -``` - -#### Azure Blob Storage - -To use Azure Blob Storage as an export bucket, follow [the Databricks guide on -mounting Azure Blob Storage to Databricks DBFS][databricks-docs-dbfs-azure]. - -[Retrieve the storage account access key][azure-bs-docs-get-key] from your Azure -account and use as follows: - -```dotenv -CUBEJS_DB_EXPORT_BUCKET_TYPE=azure -CUBEJS_DB_EXPORT_BUCKET=wasbs://my-bucket@my-account.blob.core.windows.net -CUBEJS_DB_EXPORT_BUCKET_AZURE_KEY= -``` - -## SSL/TLS - -Cube does not require any additional configuration to enable SSL/TLS for -Databricks JDBC connections. - -## Additional Configuration - -### Cube Cloud - -To accurately show partition sizes in the Cube Cloud APM, [an export -bucket][self-preaggs-export-bucket] **must be** configured. - -[aws-s3]: https://aws.amazon.com/s3/ -[azure-bs]: https://azure.microsoft.com/en-gb/services/storage/blobs/ -[azure-bs-docs-get-key]: - https://docs.microsoft.com/en-us/azure/storage/common/storage-account-keys-manage?toc=%2Fazure%2Fstorage%2Fblobs%2Ftoc.json&tabs=azure-portal#view-account-access-keys -[databricks]: https://databricks.com/ -[databricks-docs-dbfs]: - https://docs.databricks.com/data/databricks-file-system.html#mount-object-storage-to-dbfs -[databricks-docs-dbfs-azure]: - https://docs.databricks.com/data/data-sources/azure/azure-storage.html#mount-azure-blob-storage-containers-to-dbfs -[databricks-docs-dbfs-s3]: - https://docs.databricks.com/data/data-sources/aws/amazon-s3.html#access-s3-buckets-through-dbfs -[databricks-docs-jdbc-url]: - https://docs.databricks.com/integrations/bi/jdbc-odbc-bi.html#get-server-hostname-port-http-path-and-jdbc-url -[databricks-docs-pat]: - https://docs.databricks.com/dev-tools/api/latest/authentication.html#token-management -[gh-cubejs-jdbc-install]: - https://github.com/cube-js/cube/blob/master/packages/cubejs-jdbc-driver/README.md#java-installation -[ref-caching-large-preaggs]: /caching/using-pre-aggregations#export-bucket -[ref-caching-using-preaggs-build-strats]: - /caching/using-pre-aggregations#pre-aggregation-build-strategies -[ref-config-multiple-ds-decorating-env]: - /config/multiple-data-sources#configuring-data-sources-with-environment-variables-decorated-environment-variables -[ref-schema-ref-types-formats-countdistinctapprox]: - /schema/reference/types-and-formats#count-distinct-approx -[self-preaggs-simple]: #simple -[self-preaggs-export-bucket]: #export-bucket diff --git a/docs/content/Configuration/Databases/Druid.mdx b/docs/content/Configuration/Databases/Druid.mdx deleted file mode 100644 index dde5671c7e6aa..0000000000000 --- a/docs/content/Configuration/Databases/Druid.mdx +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: Druid -permalink: /config/databases/druid ---- - - - The driver for Druid is{' '} - community-supported and is not - supported by Cube or the vendor. - - -## Prerequisites - -- The URL for the [Druid][druid] database -- The username/password for the [Druid][druid] database server - -## Setup - -### <--{"id" : "Setup"}--> Manual - -Add the following to a `.env` file in your Cube project: - -```dotenv -CUBEJS_DB_TYPE=druid -CUBEJS_DB_URL=https://my.druid.host:8082 -CUBEJS_DB_USER=druid -CUBEJS_DB_PASS=********** -``` - -## Environment Variables - -| Environment Variable | Description | Possible Values | Required | [Supports multiple data sources?][ref-config-multiple-ds-decorating-env] | -| -------------------- | ----------------------------------------------------------------------------------- | ------------------------------ | :------: | :----------------------------------------------------------------------: | -| `CUBEJS_DB_URL` | The URL for a database | A valid database URL for Druid | ✅ | ✅ | -| `CUBEJS_DB_USER` | The username used to connect to the database | A valid database username | ✅ | ✅ | -| `CUBEJS_DB_PASS` | The password used to connect to the database | A valid database password | ✅ | ✅ | -| `CUBEJS_CONCURRENCY` | The number of concurrent connections each queue has to the database. Default is `2` | A valid number | ❌ | ❌ | -| `CUBEJS_DB_MAX_POOL` | The maximum number of concurrent database connections to pool. Default is `8` | A valid number | ❌ | ✅ | - -## SSL - -Cube does not require any additional configuration to enable SSL as Druid -connections are made over HTTPS. - -[druid]: https://druid.apache.org/ -[ref-config-multiple-ds-decorating-env]: - /config/multiple-data-sources#configuring-data-sources-with-environment-variables-decorated-environment-variables diff --git a/docs/content/Configuration/Databases/Elasticsearch.mdx b/docs/content/Configuration/Databases/Elasticsearch.mdx deleted file mode 100644 index da1b5bec02286..0000000000000 --- a/docs/content/Configuration/Databases/Elasticsearch.mdx +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: Elasticsearch -permalink: /config/databases/elasticsearch ---- - - - The driver for Elasticsearch is{' '} - community-supported and is not - supported by Cube or the vendor. - - -## Prerequisites - -To connect to an [Elasticsearch][elasticsearch] database, use `CUBEJS_DB_URL` -with the username and password embedded in the URL, if required. - - - -If you're not using [Elastic Cloud][elastic-cloud], you **must** specify -`CUBEJS_DB_ELASTIC_QUERY_FORMAT`. - - - -## Setup - -### <--{"id" : "Setup"}--> Manual - -For a self-hosted Elasticsearch instance, add the following to a `.env` file in -your Cube project: - -```dotenv -CUBEJS_DB_TYPE=elasticsearch -CUBEJS_DB_URL=https://my.elasticsearch.host:9200 -CUBEJS_DB_ELASTIC_QUERY_FORMAT=json -``` - -For an Elasticsearch instanced hosted by [Elastic.co][elastic-co], add the -following to a `.env` file in your Cube project: - -```dotenv -CUBEJS_DB_TYPE=elasticsearch -CUBEJS_DB_URL=https://my.elasticsearch.host:9200 -CUBEJS_DB_ELASTIC_APIKEY_ID=VuaCfGcBCdbkQm-e5aOx -CUBEJS_DB_ELASTIC_APIKEY_KEY=ui2lp2axTNmsyakw9tvNnw -``` - -## Environment Variables - -| Environment Variable | Description | Possible Values | Required | [Supports multiple data sources?][ref-config-multiple-ds-decorating-env] | -| -------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------- | :------: | :----------------------------------------------------------------------: | -| `CUBEJS_DB_URL` | The URL for a database | A valid database URL for Elasticsearch | ✅ | ✅ | -| `CUBEJS_DB_ELASTIC_QUERY_FORMAT` | By default, queries return data in JDBC format, but you can also return data in standard Elasticsearch JDBC, JSON, CSV, YAML or raw formats (only JSON and JDBC are currently supported) | `json`, `jdbc` | ❌ | ✅ | -| `CUBEJS_DB_ELASTIC_OPENDISTRO` | If `true`, then use the Open Distro for Elasticsearch | `true`, `false` | ❌ | ✅ | -| `CUBEJS_DB_ELASTIC_APIKEY_ID` | [ID of the API key from elastic.co][elastic-docs-api-keys] | A valid Elastic.co API key ID | ❌ | ✅ | -| `CUBEJS_DB_ELASTIC_APIKEY_KEY` | [Value of the API key from elastic.co][elastic-docs-api-keys] | A valid Elastic.co API key value | ❌ | ✅ | -| `CUBEJS_CONCURRENCY` | The number of concurrent connections each queue has to the database. Default is `2` | A valid number | ❌ | ❌ | -| `CUBEJS_DB_MAX_POOL` | The maximum number of concurrent database connections to pool. Default is `8` | A valid number | ❌ | ✅ | - -## SSL - -Cube does not require any additional configuration to enable SSL as -Elasticsearch connections are made over HTTPS. - -[elastic-co]: https://elastic.co/ -[elastic-cloud]: https://cloud.elastic.co/ -[elasticsearch]: https://www.elastic.co/elasticsearch/ -[elastic-docs-api-keys]: - https://www.elastic.co/guide/en/kibana/master/api-keys.html#create-api-key -[ref-config-multiple-ds-decorating-env]: - /config/multiple-data-sources#configuring-data-sources-with-environment-variables-decorated-environment-variables diff --git a/docs/content/Configuration/Databases/Firebolt.mdx b/docs/content/Configuration/Databases/Firebolt.mdx deleted file mode 100644 index f179695bac748..0000000000000 --- a/docs/content/Configuration/Databases/Firebolt.mdx +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: Firebolt -permalink: /config/databases/firebolt ---- - - - -The driver for Firebolt is supported by its vendor. Please report any issues to -their [Help Center][firebolt-help]. - - - -## Prerequisites - -- The username/password for your [Firebolt][firebolt] account - -## Setup - -### <--{"id" : "Setup"}--> Manual - -Add the following to a `.env` file in your Cube project: - -```dotenv -CUBEJS_DB_NAME=firebolt_database -CUBEJS_DB_USER=firebolt_user@customer.com -CUBEJS_DB_PASS=********** -CUBEJS_FIREBOLT_ACCOUNT=cube -CUBEJS_FIREBOLT_ENGINE_NAME=engine_name -``` - -## Environment Variables - -| Environment Variable | Description | Possible Values | Required | [Supports multiple data sources?][ref-config-multiple-ds-decorating-env] | -| ------------------------------ | ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------- | :------: | :----------------------------------------------------------------------: | -| `CUBEJS_DB_NAME` | The name of the database to connect to | A valid database name | ✅ | ✅ | -| `CUBEJS_DB_USER` | The username used to connect to the database | A valid database username | ✅ | ✅ | -| `CUBEJS_DB_PASS` | The password used to connect to the database | A valid database password | ✅ | ✅ | -| `CUBEJS_FIREBOLT_ACCOUNT` | Account name | An account name | - | ✅ | -| `CUBEJS_FIREBOLT_ENGINE_NAME` | Engine name to connect to | A valid engine name | ✅ | ✅ | -| `CUBEJS_FIREBOLT_API_ENDPOINT` | Firebolt API endpoint. Used for authentication | `api.dev.firebolt.io`, `api.staging.firebolt.io`, `api.app.firebolt.io` | - | ✅ | -| `CUBEJS_CONCURRENCY` | The number of concurrent connections each queue has to the database. Default is `5` | A valid number | ❌ | ❌ | -| `CUBEJS_DB_MAX_POOL` | The maximum number of concurrent database connections to pool. Default is `20` | A valid number | ❌ | ✅ | - -[firebolt]: https://www.firebolt.io/ -[firebolt-help]: https://help.firebolt.io/ -[ref-config-multiple-ds-decorating-env]: - /config/multiple-data-sources#configuring-data-sources-with-environment-variables-decorated-environment-variables diff --git a/docs/content/Configuration/Databases/Google-BigQuery.mdx b/docs/content/Configuration/Databases/Google-BigQuery.mdx deleted file mode 100644 index 09d0c337dc83e..0000000000000 --- a/docs/content/Configuration/Databases/Google-BigQuery.mdx +++ /dev/null @@ -1,170 +0,0 @@ ---- -title: Google BigQuery -permalink: /config/databases/google-bigquery ---- - -## Prerequisites - - - -In order to connect Google BigQuery to Cube, you need to provide service account -credentials. Cube requires the service account to have **BigQuery Data Viewer** -and **BigQuery Job User** roles enabled. You can learn more about acquiring -Google BigQuery credentials [here][bq-docs-getting-started]. - - - -- The [Google Cloud Project ID][google-cloud-docs-projects] for the - [BigQuery][bq] project -- A set of [Google Cloud service credentials][google-support-create-svc-account] - which [allow access][bq-docs-getting-started] to the [BigQuery][bq] project -- The [Google Cloud region][bq-docs-regional-locations] for the [BigQuery][bq] - project - -## Setup - -### <--{"id" : "Setup"}--> Manual - -Add the following to a `.env` file in your Cube project: - -```dotenv -CUBEJS_DB_TYPE=bigquery -CUBEJS_DB_BQ_PROJECT_ID=my-bigquery-project-12345 -CUBEJS_DB_BQ_KEY_FILE=/path/to/my/keyfile.json -``` - -You could also encode the key file using Base64 and set the result to -`CUBEJS_DB_BQ_CREDENTIALS`: - -```dotenv -CUBEJS_DB_BQ_CREDENTIALS=$(cat /path/to/my/keyfile.json | base64) -``` - -### <--{"id" : "Setup"}--> Cube Cloud - - - -In some cases you'll need to allow connections from your Cube Cloud deployment IP -address to your database. You can copy the IP address from either the Database -Setup step in deployment creation, or from Settings → -Configuration in your deployment. - - - -The following fields are required when creating a BigQuery connection: - - - -Cube Cloud also supports connecting to data sources within private VPCs. If you already have VPCs enabled in -your account, check out the [VPC documentation][ref-cloud-conf-vpc] to learn how -to get started. - - - -VPC connectivity is available in Cube Cloud on [Premium](https://cube.dev/pricing) tier. [Contact us][cube-contact] for details. - - - -[cube-pricing]: https://cube.dev/pricing/ -[cube-contact]: https://cube.dev/contact/ -[ref-cloud-conf-vpc]: /cloud/configuration/connecting-with-a-vpc - -## Environment Variables - -| Environment Variable | Description | Possible Values | Required | [Supports multiple data sources?][ref-config-multiple-ds-decorating-env] | -| ------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------- | :------: | :----------------------------------------------------------------------: | -| `CUBEJS_DB_BQ_PROJECT_ID` | The Google BigQuery project ID to connect to | A valid Google BigQuery Project ID | ✅ | ✅ | -| `CUBEJS_DB_BQ_KEY_FILE` | The path to a JSON key file for connecting to Google BigQuery | A valid Google BigQuery JSON key file | ✅ | ✅ | -| `CUBEJS_DB_BQ_CREDENTIALS` | A Base64 encoded JSON key file for connecting to Google BigQuery | A valid Google BigQuery JSON key file encoded as a Base64 string | ❌ | ✅ | -| `CUBEJS_DB_BQ_LOCATION` | The Google BigQuery dataset location to connect to. Required if used with pre-aggregations outside of US. If not set then BQ driver will fail with `Dataset was not found in location US` error | [A valid Google BigQuery regional location][bq-docs-regional-locations] | ⚠️ | ✅ | -| `CUBEJS_DB_EXPORT_BUCKET` | The name of a bucket in cloud storage | A valid bucket name from cloud storage | ❌ | ✅ | -| `CUBEJS_DB_EXPORT_BUCKET_TYPE` | The cloud provider where the bucket is hosted | `gcp` | ❌ | ✅ | -| `CUBEJS_CONCURRENCY` | The number of concurrent connections each queue has to the database. Default is `10` | A valid number | ❌ | ❌ | -| `CUBEJS_DB_MAX_POOL` | The maximum number of concurrent database connections to pool. Default is `40` | A valid number | ❌ | ✅ | - -## Pre-Aggregation Feature Support - -### count_distinct_approx - -Measures of type -[`count_distinct_approx`][ref-schema-ref-types-formats-countdistinctapprox] can be -used in pre-aggregations when using Google BigQuery as a source database. To -learn more about Google BigQuery's support for approximate aggregate functions, -[click here][bq-docs-approx-agg-fns]. - -## Pre-Aggregation Build Strategies - - - -To learn more about pre-aggregation build strategies, [head -here][ref-caching-using-preaggs-build-strats]. - - - -| Feature | Works with read-only mode? | Is default? | -| ------------- | :------------------------: | :---------: | -| Batching | ❌ | ✅ | -| Export Bucket | ❌ | ❌ | - -By default, Google BigQuery uses [batching][self-preaggs-batching] to build -pre-aggregations. - -### Batching - -No extra configuration is required to configure batching for Google BigQuery. - -### Export bucket - - - -BigQuery only supports using Google Cloud Storage for export buckets. - - - -#### Google Cloud Storage - -For [improved pre-aggregation performance with large -datasets][ref-caching-large-preaggs], enable export bucket functionality by -configuring Cube with the following environment variables: - - - -When using an export bucket, remember to assign the **BigQuery Data Editor** and -**Storage Object Admin** role to your BigQuery service account. - - - -```dotenv -CUBEJS_DB_EXPORT_BUCKET=export_data_58148478376 -CUBEJS_DB_EXPORT_BUCKET_TYPE=gcp -``` - -## SSL - -Cube does not require any additional configuration to enable SSL as Google -BigQuery connections are made over HTTPS. - -[bq]: https://cloud.google.com/bigquery -[bq-docs-getting-started]: - https://cloud.google.com/docs/authentication/getting-started -[bq-docs-credentials]: - https://console.cloud.google.com/apis/credentials/serviceaccountkey -[bq-docs-regional-locations]: - https://cloud.google.com/bigquery/docs/locations#regional-locations -[bq-docs-approx-agg-fns]: - https://cloud.google.com/bigquery/docs/reference/standard-sql/approximate_aggregate_functions -[google-cloud-docs-projects]: - https://cloud.google.com/resource-manager/docs/creating-managing-projects#before_you_begin -[google-support-create-svc-account]: - https://support.google.com/a/answer/7378726?hl=en -[ref-caching-large-preaggs]: /caching/using-pre-aggregations#export-bucket -[ref-caching-using-preaggs-build-strats]: - /caching/using-pre-aggregations#pre-aggregation-build-strategies -[ref-config-multiple-ds-decorating-env]: - /config/multiple-data-sources#configuring-data-sources-with-environment-variables-decorated-environment-variables -[ref-schema-ref-types-formats-countdistinctapprox]: - /schema/reference/types-and-formats#count-distinct-approx -[self-preaggs-batching]: #batching diff --git a/docs/content/Configuration/Databases/Hive.mdx b/docs/content/Configuration/Databases/Hive.mdx deleted file mode 100644 index 435e9f544345a..0000000000000 --- a/docs/content/Configuration/Databases/Hive.mdx +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: Hive/SparkSQL -permalink: /config/databases/hive-sparksql ---- - - - The driver for Hive/SparkSQL is{' '} - community-supported and is not - supported by Cube or the vendor. - - -## Prerequisites - -- The hostname for the [Hive][hive] database server -- The username/password for the [Hive][hive] database server - -## Setup - -### <--{"id" : "Setup"}--> Manual - -Add the following to a `.env` file in your Cube project: - -```dotenv -CUBEJS_DB_TYPE=hive -CUBEJS_DB_HOST=my.hive.host -CUBEJS_DB_NAME=my_hive_database -CUBEJS_DB_USER=hive_user -CUBEJS_DB_PASS=********** -``` - -## Environment Variables - -| Environment Variable | Description | Possible Values | Required | [Supports multiple data sources?][ref-config-multiple-ds-decorating-env] | -| --------------------------- | ----------------------------------------------------------------------------------- | ------------------------- | :------: | :----------------------------------------------------------------------: | -| `CUBEJS_DB_HOST` | The host URL for a database | A valid database host URL | ✅ | ✅ | -| `CUBEJS_DB_PORT` | The port for the database connection | A valid port number | ❌ | ✅ | -| `CUBEJS_DB_NAME` | The name of the database to connect to | A valid database name | ✅ | ✅ | -| `CUBEJS_DB_USER` | The username used to connect to the database | A valid database username | ✅ | ✅ | -| `CUBEJS_DB_PASS` | The password used to connect to the database | A valid database password | ✅ | ✅ | -| `CUBEJS_DB_HIVE_TYPE` | | | ❌ | ✅ | -| `CUBEJS_DB_HIVE_VER` | | | ❌ | ✅ | -| `CUBEJS_DB_HIVE_THRIFT_VER` | | | ❌ | ✅ | -| `CUBEJS_DB_HIVE_CDH_VER` | | | ❌ | ✅ | -| `CUBEJS_CONCURRENCY` | The number of concurrent connections each queue has to the database. Default is `2` | A valid number | ❌ | ❌ | -| `CUBEJS_DB_MAX_POOL` | The maximum number of concurrent database connections to pool. Default is `8` | A valid number | ❌ | ✅ | - -[hive]: https://hive.apache.org/ -[ref-config-multiple-ds-decorating-env]: - /config/multiple-data-sources#configuring-data-sources-with-environment-variables-decorated-environment-variables diff --git a/docs/content/Configuration/Databases/MS-SQL.mdx b/docs/content/Configuration/Databases/MS-SQL.mdx deleted file mode 100644 index 9a494560a8b56..0000000000000 --- a/docs/content/Configuration/Databases/MS-SQL.mdx +++ /dev/null @@ -1,110 +0,0 @@ ---- -title: MS SQL -permalink: /config/databases/mssql ---- - -## Prerequisites - -- The hostname for the [MS SQL][mssql] database server -- The username/password for the [MS SQL][mssql] database server -- The name of the database to use within the [MS SQL][mssql] database server - -## Setup - -### <--{"id" : "Setup"}--> Manual - -Add the following to a `.env` file in your Cube project: - -```dotenv -CUBEJS_DB_TYPE=mssql -CUBEJS_DB_HOST=my.mssql.host -CUBEJS_DB_NAME=my_mssql_database -CUBEJS_DB_USER=mssql_user -CUBEJS_DB_PASS=********** -``` - -## Environment Variables - -| Environment Variable | Description | Possible Values | Required | [Supports multiple data sources?][ref-config-multiple-ds-decorating-env] | -| -------------------- | ----------------------------------------------------------------------------------- | ------------------------------------------- | :------: | :----------------------------------------------------------------------: | -| `CUBEJS_DB_HOST` | The host URL for a database | A valid database host URL | ✅ | ✅ | -| `CUBEJS_DB_PORT` | The port for the database connection | A valid port number | ❌ | ✅ | -| `CUBEJS_DB_NAME` | The name of the database to connect to | A valid database name | ✅ | ✅ | -| `CUBEJS_DB_USER` | The username used to connect to the database | A valid database username | ✅ | ✅ | -| `CUBEJS_DB_PASS` | The password used to connect to the database | A valid database password | ✅ | ✅ | -| `CUBEJS_DB_DOMAIN` | A domain name within the database to connect to | A valid domain name within a MSSQL database | ❌ | ✅ | -| `CUBEJS_DB_SSL` | If `true`, enables SSL encryption for database connections from Cube | `true`, `false` | ❌ | ✅ | -| `CUBEJS_CONCURRENCY` | The number of concurrent connections each queue has to the database. Default is `2` | A valid number | ❌ | ❌ | -| `CUBEJS_DB_MAX_POOL` | The maximum number of concurrent database connections to pool. Default is `8` | A valid number | ❌ | ✅ | - -## Pre-Aggregation Feature Support - -### count_distinct_approx - -Measures of type -[`count_distinct_approx`][ref-schema-ref-types-formats-countdistinctapprox] can -not be used in pre-aggregations when using MSSQL as a source database. - -## Pre-Aggregation Build Strategies - - - -To learn more about pre-aggregation build strategies, [head -here][ref-caching-using-preaggs-build-strats]. - - - -| Feature | Works with read-only mode? | Is default? | -| ------------- | :------------------------: | :---------: | -| Simple | ✅ | ✅ | -| Batching | - | - | -| Export Bucket | - | - | - -By default, MSSQL uses a [simple][self-preaggs-simple] strategy to build -pre-aggregations. - -### Simple - -No extra configuration is required to configure simple pre-aggregation builds -for MSSQL. - -### Batching - -MSSQL does not support batching. - -### Export Bucket - -MSSQL does not support export buckets. - -## SSL - -To enable SSL-encrypted connections between Cube and MS SQL, set the -`CUBEJS_DB_SSL` environment variable to `true`. For more information on how to -configure custom certificates, please check out [Enable SSL Connections to the -Database][ref-recipe-enable-ssl]. - -## Additional Configuration - -### <--{"id" : "Additional Configuration"}--> Windows Authentication - -To connect to a MSSQL database using Windows Authentication (also sometimes -known as `trustedConnection`), instantiate the driver with -`trustedConnection: true` in your `cube.js` configuration file: - -```javascript -const MssqlDriver = require('@cubejs-backend/mssql-driver'); -module.exports = { - driverFactory: ({ dataSource }) => - new MssqlDriver({ database: dataSource, trustedConnection: true }), -}; -``` - -[mssql]: https://www.microsoft.com/en-gb/sql-server/sql-server-2019 -[ref-caching-using-preaggs-build-strats]: - /caching/using-pre-aggregations#pre-aggregation-build-strategies -[ref-config-multiple-ds-decorating-env]: - /config/multiple-data-sources#configuring-data-sources-with-environment-variables-decorated-environment-variables -[ref-recipe-enable-ssl]: /recipes/enable-ssl-connections-to-database -[ref-schema-ref-types-formats-countdistinctapprox]: - /schema/reference/types-and-formats#count-distinct-approx -[self-preaggs-simple]: #simple diff --git a/docs/content/Configuration/Databases/Materialize.mdx b/docs/content/Configuration/Databases/Materialize.mdx deleted file mode 100644 index aeb6eeecb9ba2..0000000000000 --- a/docs/content/Configuration/Databases/Materialize.mdx +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Materialize -permalink: /config/databases/materialize ---- - - - -The driver for Materialize is supported by its vendor. Please report any issues -to their [Slack][materialize-slack]. - - - -## Prerequisites - -- The hostname for the [Materialize][materialize] database server - -## Setup - -### <--{"id" : "Setup"}--> Manual - -Add the following to a `.env` file in your Cube project: - -```dotenv -CUBEJS_DB_TYPE=materialize -CUBEJS_DB_HOST=my.materialize.host -CUBEJS_DB_PORT=6875 -CUBEJS_DB_NAME=materialize -CUBEJS_DB_USER=materialize -CUBEJS_DB_PASS=materialize -``` - -## Environment Variables - -| Environment Variable | Description | Possible Values | Required | [Supports multiple data sources?][ref-config-multiple-ds-decorating-env] | -| -------------------- | ----------------------------------------------------------------------------------- | ------------------------- | :------: | :----------------------------------------------------------------------: | -| `CUBEJS_DB_HOST` | The host URL for a database | A valid database host URL | ✅ | ✅ | -| `CUBEJS_DB_PORT` | The port for the database connection | A valid port number | ✅ | ✅ | -| `CUBEJS_DB_NAME` | The name of the database to connect to | A valid database name | ✅ | ✅ | -| `CUBEJS_DB_USER` | The username used to connect to the database | A valid database username | ✅ | ✅ | -| `CUBEJS_DB_PASS` | The password used to connect to the database | A valid database password | ✅ | ✅ | -| `CUBEJS_CONCURRENCY` | The number of concurrent connections each queue has to the database. Default is `2` | A valid number | ❌ | ❌ | -| `CUBEJS_DB_MAX_POOL` | The maximum number of concurrent database connections to pool. Default is `8` | A valid number | ❌ | ✅ | - -## SSL - -To enable SSL-encrypted connections between Cube and Materialize, set the -`CUBEJS_DB_SSL` environment variable to `true`. For more information on how to -configure custom certificates, please check out [Enable SSL Connections to the -Database][ref-recipe-enable-ssl]. - -[materialize]: https://materialize.com/docs/ -[materialize-slack]: https://materialize.com/s/chat -[ref-config-multiple-ds-decorating-env]: - /config/multiple-data-sources#configuring-data-sources-with-environment-variables-decorated-environment-variables -[ref-recipe-enable-ssl]: /recipes/enable-ssl-connections-to-database diff --git a/docs/content/Configuration/Databases/MongoDB.mdx b/docs/content/Configuration/Databases/MongoDB.mdx deleted file mode 100644 index edfd006c67146..0000000000000 --- a/docs/content/Configuration/Databases/MongoDB.mdx +++ /dev/null @@ -1,119 +0,0 @@ ---- -title: MongoDB -permalink: /config/databases/mongodb ---- - - - The driver for MongoDB is{' '} - community-supported and is not - supported by Cube or the vendor. - - -## Prerequisites - - - -To use Cube with MongoDB you need to install the [MongoDB Connector for -BI][mongobi-download]. [Learn more about setup for MongoDB -here][cube-blog-mongodb]. - - - -- [MongoDB Connector for BI][mongobi-download] -- The hostname for the [MongoDB][mongodb] database server -- The username/password for the [MongoDB][mongodb] database server - -## Setup - -### <--{"id" : "Setup"}--> Manual - -Add the following to a `.env` file in your Cube project: - -```dotenv -CUBEJS_DB_TYPE=mongobi -# The MongoBI connector host. If using on local machine, it should be either `localhost` or `127.0.0.1`: -CUBEJS_DB_HOST=my.mongobi.host -# The default port of the MongoBI connector service -CUBEJS_DB_PORT=3307 -CUBEJS_DB_NAME=my_mongodb_database -CUBEJS_DB_USER=mongodb_server_user -CUBEJS_DB_PASS=mongodb_server_password -# MongoBI requires SSL connections, so set the following to `true`: -CUBEJS_DB_SSL=true -``` - -If you are connecting to a local MongoBI Connector, which is pointing to a local -MongoDB instance, If MongoBI Connector and MongoDB are both running locally, -then the above should work. To connect to a remote MongoDB instance, first -configure `mongosqld` appropriately. See [here for an example config -file][mongobi-with-remote-db]. - -## Environment Variables - -| Environment Variable | Description | Possible Values | Required | [Supports multiple data sources?][ref-config-multiple-ds-decorating-env] | -| -------------------- | ----------------------------------------------------------------------------------- | ------------------------- | :------: | :----------------------------------------------------------------------: | -| `CUBEJS_DB_HOST` | The host URL for a database | A valid database host URL | ✅ | ✅ | -| `CUBEJS_DB_PORT` | The port for the database connection | A valid port number | ❌ | ✅ | -| `CUBEJS_DB_NAME` | The name of the database to connect to | A valid database name | ✅ | ✅ | -| `CUBEJS_DB_USER` | The username used to connect to the database | A valid database username | ✅ | ✅ | -| `CUBEJS_DB_PASS` | The password used to connect to the database | A valid database password | ✅ | ✅ | -| `CUBEJS_DB_SSL` | If `true`, enables SSL encryption for database connections from Cube | `true`, `false` | ✅ | ✅ | -| `CUBEJS_CONCURRENCY` | The number of concurrent connections each queue has to the database. Default is `2` | A valid number | ❌ | ❌ | -| `CUBEJS_DB_MAX_POOL` | The maximum number of concurrent database connections to pool. Default is `8` | A valid number | ❌ | ✅ | - -## Pre-Aggregation Feature Support - -### count_distinct_approx - -Measures of type -[`count_distinct_approx`][ref-schema-ref-types-formats-countdistinctapprox] can -not be used in pre-aggregations when using MongoDB as a source database. - -## Pre-Aggregation Build Strategies - - - -To learn more about pre-aggregation build strategies, [head -here][ref-caching-using-preaggs-build-strats]. - - - -| Feature | Works with read-only mode? | Is default? | -| ------------- | :------------------------: | :---------: | -| Batching | ✅ | ✅ | -| Export Bucket | - | - | - -By default, MongoDB uses [batching][self-preaggs-batching] to build -pre-aggregations. - -### Batching - -No extra configuration is required to configure batching for MongoDB. - -### Export Bucket - -MongoDB does not support export buckets. - -## SSL - -To enable SSL-encrypted connections between Cube and MongoDB, set the -`CUBEJS_DB_SSL` environment variable to `true`. For more information on how to -configure custom certificates, please check out [Enable SSL Connections to the -Database][ref-recipe-enable-ssl]. - -[mongodb]: https://www.mongodb.com/ -[mongobi-with-remote-db]: - https://docs.mongodb.com/bi-connector/current/reference/mongosqld/#example-configuration-file -[cube-blog-mongodb]: - https://cube.dev/blog/building-mongodb-dashboard-using-nodejs -[mongobi-download]: https://www.mongodb.com/download-center/bi-connector -[nodejs-docs-tls-ciphers]: - https://nodejs.org/docs/latest/api/tls.html#tls_modifying_the_default_tls_cipher_suite -[ref-caching-using-preaggs-build-strats]: - /caching/using-pre-aggregations#pre-aggregation-build-strategies -[ref-config-multiple-ds-decorating-env]: - /config/multiple-data-sources#configuring-data-sources-with-environment-variables-decorated-environment-variables -[ref-recipe-enable-ssl]: /recipes/enable-ssl-connections-to-database -[ref-schema-ref-types-formats-countdistinctapprox]: - /schema/reference/types-and-formats#count-distinct-approx -[self-preaggs-batching]: #batching diff --git a/docs/content/Configuration/Databases/MySQL.mdx b/docs/content/Configuration/Databases/MySQL.mdx deleted file mode 100644 index bb8973632199e..0000000000000 --- a/docs/content/Configuration/Databases/MySQL.mdx +++ /dev/null @@ -1,99 +0,0 @@ ---- -title: MySQL -permalink: /config/databases/mysql ---- - -## Prerequisites - -- The hostname for the [MySQL][mysql] database server -- The username/password for the [MySQL][mysql] database server -- The name of the database to use within the [MySQL][mysql] database server - -## Setup - -### <--{"id" : "Setup"}--> Manual - -Add the following to a `.env` file in your Cube project: - -```dotenv -CUBEJS_DB_TYPE=mysql -CUBEJS_DB_HOST=my.mysql.host -CUBEJS_DB_NAME=my_mysql_database -CUBEJS_DB_USER=mysql_user -CUBEJS_DB_PASS=********** -``` - -## Environment Variables - -| Environment Variable | Description | Possible Values | Required | [Supports multiple data sources?][ref-config-multiple-ds-decorating-env] | -| -------------------- | ----------------------------------------------------------------------------------- | ------------------------- | :------: | :----------------------------------------------------------------------: | -| `CUBEJS_DB_HOST` | The host URL for a database | A valid database host URL | ✅ | ✅ | -| `CUBEJS_DB_PORT` | The port for the database connection | A valid port number | ❌ | ✅ | -| `CUBEJS_DB_NAME` | The name of the database to connect to | A valid database name | ✅ | ✅ | -| `CUBEJS_DB_USER` | The username used to connect to the database | A valid database username | ✅ | ✅ | -| `CUBEJS_DB_PASS` | The password used to connect to the database | A valid database password | ✅ | ✅ | -| `CUBEJS_DB_SSL` | If `true`, enables SSL encryption for database connections from Cube | `true`, `false` | ❌ | ✅ | -| `CUBEJS_CONCURRENCY` | The number of concurrent connections each queue has to the database. Default is `2` | A valid number | ❌ | ❌ | -| `CUBEJS_DB_MAX_POOL` | The maximum number of concurrent database connections to pool. Default is `8` | A valid number | ❌ | ✅ | - -## Pre-Aggregation Feature Support - -### count_distinct_approx - -Measures of type -[`count_distinct_approx`][ref-schema-ref-types-formats-countdistinctapprox] can -not be used in pre-aggregations when using MySQL as a source database. - -## Pre-Aggregation Build Strategies - - - -To learn more about pre-aggregation build strategies, [head -here][ref-caching-using-preaggs-build-strats]. - - - -| Feature | Works with read-only mode? | Is default? | -| ------------- | :------------------------: | :---------: | -| Batching | ✅ | ✅ | -| Export Bucket | - | - | - -By default, MySQL uses [batching][self-preaggs-batching] to build -pre-aggregations. - -### Batching - -No extra configuration is required to configure batching for MySQL. - -### Export Bucket - -MySQL does not support export buckets. - -## SSL - -To enable SSL-encrypted connections between Cube and MySQL, set the -`CUBEJS_DB_SSL` environment variable to `true`. For more information on how to -configure custom certificates, please check out [Enable SSL Connections to the -Database][ref-recipe-enable-ssl]. - -## Additional Configuration - -### <--{"id" : "Additional Configuration"}--> Local/Docker - -To connect to a local MySQL database using a Unix socket, use -`CUBEJS_DB_SOCKET_PATH`. When doing so, `CUBEJS_DB_HOST` will be ignored. - -You can connect to an SSL-enabled MySQL database by setting `CUBEJS_DB_SSL` to -`true`. All other SSL-related environment variables can be left unset. See [the -SSL section][self-ssl] above for more details. - -[mysql]: https://www.mysql.com/ -[ref-caching-using-preaggs-build-strats]: - /caching/using-pre-aggregations#pre-aggregation-build-strategies -[ref-config-multiple-ds-decorating-env]: - /config/multiple-data-sources#configuring-data-sources-with-environment-variables-decorated-environment-variables -[ref-recipe-enable-ssl]: /recipes/enable-ssl-connections-to-database -[ref-schema-ref-types-formats-countdistinctapprox]: - /schema/reference/types-and-formats#count-distinct-approx -[self-preaggs-batching]: #batching -[self-ssl]: #ssl diff --git a/docs/content/Configuration/Databases/Oracle.mdx b/docs/content/Configuration/Databases/Oracle.mdx deleted file mode 100644 index 105af4ce51326..0000000000000 --- a/docs/content/Configuration/Databases/Oracle.mdx +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Oracle -permalink: /config/databases/oracle ---- - - - The driver for Oracle is{' '} - community-supported and is not - supported by Cube or the vendor. - - -## Prerequisites - -- The hostname for the [Oracle][oracle] database server -- The username/password for the [Oracle][oracle] database server -- The name of the database to use within the [Oracle][oracle] database server - -## Setup - -### <--{"id" : "Setup"}--> Manual - -Add the following to a `.env` file in your Cube project: - -```dotenv -CUBEJS_DB_TYPE=oracle -CUBEJS_DB_HOST=my.oracle.host -CUBEJS_DB_NAME=my_oracle_database -CUBEJS_DB_USER=oracle_user -CUBEJS_DB_PASS=********** -``` - -## Environment Variables - -| Environment Variable | Description | Possible Values | Required | [Supports multiple data sources?][ref-config-multiple-ds-decorating-env] | -| -------------------- | ----------------------------------------------------------------------------------- | ------------------------- | :------: | :----------------------------------------------------------------------: | -| `CUBEJS_DB_HOST` | The host URL for a database | A valid database host URL | ✅ | ✅ | -| `CUBEJS_DB_PORT` | The port for the database connection | A valid port number | ❌ | ✅ | -| `CUBEJS_DB_NAME` | The name of the database to connect to | A valid database name | ✅ | ✅ | -| `CUBEJS_DB_USER` | The username used to connect to the database | A valid database username | ✅ | ✅ | -| `CUBEJS_DB_PASS` | The password used to connect to the database | A valid database password | ✅ | ✅ | -| `CUBEJS_DB_SSL` | If `true`, enables SSL encryption for database connections from Cube | `true`, `false` | ❌ | ✅ | -| `CUBEJS_CONCURRENCY` | The number of concurrent connections each queue has to the database. Default is `2` | A valid number | ❌ | ❌ | -| `CUBEJS_DB_MAX_POOL` | The maximum number of concurrent database connections to pool. Default is `8` | A valid number | ❌ | ✅ | - -## SSL - -To enable SSL-encrypted connections between Cube and Oracle, set the -`CUBEJS_DB_SSL` environment variable to `true`. For more information on how to -configure custom certificates, please check out [Enable SSL Connections to the -Database][ref-recipe-enable-ssl]. - -[oracle]: https://www.oracle.com/uk/index.html -[ref-config-multiple-ds-decorating-env]: - /config/multiple-data-sources#configuring-data-sources-with-environment-variables-decorated-environment-variables -[ref-recipe-enable-ssl]: /recipes/enable-ssl-connections-to-database diff --git a/docs/content/Configuration/Databases/Postgres.mdx b/docs/content/Configuration/Databases/Postgres.mdx deleted file mode 100644 index 8488478912d43..0000000000000 --- a/docs/content/Configuration/Databases/Postgres.mdx +++ /dev/null @@ -1,133 +0,0 @@ ---- -title: Postgres -permalink: /config/databases/postgres ---- - -## Prerequisites - -- The hostname for the [Postgres][postgres] database server -- The username/password for the [Postgres][postgres] database server -- The name of the database to use within the [Postgres][postgres] database - server - -## Setup - -### <--{"id" : "Setup"}--> Manual - -Add the following to a `.env` file in your Cube project: - -```dotenv -CUBEJS_DB_TYPE=postgres -CUBEJS_DB_HOST=my.postgres.host -CUBEJS_DB_NAME=my_postgres_database -CUBEJS_DB_USER=postgres_user -CUBEJS_DB_PASS=********** -``` - -## Environment Variables - -| Environment Variable | Description | Possible Values | Required | [Supports multiple data sources?][ref-config-multiple-ds-decorating-env] | -| -------------------- | ----------------------------------------------------------------------------------- | ------------------------- | :------: | :----------------------------------------------------------------------: | -| `CUBEJS_DB_HOST` | The host URL for a database | A valid database host URL | ✅ | ✅ | -| `CUBEJS_DB_PORT` | The port for the database connection | A valid port number | ❌ | ✅ | -| `CUBEJS_DB_NAME` | The name of the database to connect to | A valid database name | ✅ | ✅ | -| `CUBEJS_DB_USER` | The username used to connect to the database | A valid database username | ✅ | ✅ | -| `CUBEJS_DB_PASS` | The password used to connect to the database | A valid database password | ✅ | ✅ | -| `CUBEJS_DB_SSL` | If `true`, enables SSL encryption for database connections from Cube | `true`, `false` | ❌ | ✅ | -| `CUBEJS_CONCURRENCY` | The number of concurrent connections each queue has to the database. Default is `2` | A valid number | ❌ | ❌ | -| `CUBEJS_DB_MAX_POOL` | The maximum number of concurrent database connections to pool. Default is `8` | A valid number | ❌ | ✅ | - -## Pre-Aggregation Feature Support - -### count_distinct_approx - -Measures of type -[`count_distinct_approx`][ref-schema-ref-types-formats-countdistinctapprox] can -not be used in pre-aggregations when using Postgres as a source database. - -## Pre-Aggregation Build Strategies - - - -To learn more about pre-aggregation build strategies, [head -here][ref-caching-using-preaggs-build-strats]. - - - -| Feature | Works with read-only mode? | Is default? | -| ------------- | :------------------------: | :---------: | -| Batching | ✅ | ✅ | -| Export Bucket | - | - | - -By default, Postgres uses [batching][self-preaggs-batching] to build -pre-aggregations. - -### Batching - -No extra configuration is required to configure batching for Postgres. - -### Export Bucket - -Postgres does not support export buckets. - -## SSL - -To enable SSL-encrypted connections between Cube and Postgres, set the -`CUBEJS_DB_SSL` environment variable to `true`. For more information on how to -configure custom certificates, please check out [Enable SSL Connections to the -Database][ref-recipe-enable-ssl]. - -## Additional Configuration - -### <--{"id" : "Additional Configuration"}--> AWS RDS - -Use `CUBEJS_DB_SSL=true` to enable SSL if you have SSL enabled for your RDS -cluster. Download the new certificate [here][aws-rds-pem], and provide the -contents of the downloaded file to `CUBEJS_DB_SSL_CA`. All other SSL-related -environment variables can be left unset. See [the SSL section][self-ssl] for -more details. More info on AWS RDS SSL can be found [here][aws-docs-rds-ssl]. - -### <--{"id" : "Additional Configuration"}--> Google Cloud SQL - -You can connect to an SSL-enabled MySQL database by setting `CUBEJS_DB_SSL` to -`true`. You may also need to set `CUBEJS_DB_SSL_SERVERNAME`, depending on how -you are [connecting to Cloud SQL][gcp-docs-sql-connect]. - -### <--{"id" : "Additional Configuration"}--> Heroku - -Unless you're using a Private or Shield Heroku Postgres database, Heroku -Postgres does not currently support verifiable certificates. [Here is the -description of the issue from Heroku][heroku-postgres-issue]. - -As a workaround, you can set `rejectUnauthorized` option to `false` in the Cube -Postgres driver: - -```javascript -const PostgresDriver = require('@cubejs-backend/postgres-driver'); -module.exports = { - driverFactory: () => - new PostgresDriver({ - ssl: { - rejectUnauthorized: false, - }, - }), -}; -``` - -[aws-docs-rds-ssl]: - https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.SSL.html -[aws-rds-pem]: https://s3.amazonaws.com/rds-downloads/rds-ca-2019-root.pem -[gcp-docs-sql-connect]: - https://cloud.google.com/sql/docs/postgres/connect-functions#connecting_to -[heroku-postgres-issue]: - https://help.heroku.com/3DELT3RK/why-can-t-my-third-party-utility-connect-to-heroku-postgres-with-ssl -[postgres]: https://www.postgresql.org/ -[ref-caching-using-preaggs-build-strats]: - /caching/using-pre-aggregations#pre-aggregation-build-strategies -[ref-config-multiple-ds-decorating-env]: - /config/multiple-data-sources#configuring-data-sources-with-environment-variables-decorated-environment-variables -[ref-recipe-enable-ssl]: /recipes/enable-ssl-connections-to-database -[ref-schema-ref-types-formats-countdistinctapprox]: - /schema/reference/types-and-formats#count-distinct-approx -[self-preaggs-batching]: #batching -[self-ssl]: #ssl diff --git a/docs/content/Configuration/Databases/Presto.mdx b/docs/content/Configuration/Databases/Presto.mdx deleted file mode 100644 index 3669885d576a1..0000000000000 --- a/docs/content/Configuration/Databases/Presto.mdx +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: Presto -permalink: /config/databases/presto -redirect_from: - - /config/databases/prestodb ---- - -## Prerequisites - -- The hostname for the [Presto][presto] database server -- The username/password for the [Presto][presto] database server -- The name of the database to use within the [Presto][presto] database server - -## Setup - -### <--{"id" : "Setup"}--> Manual - -Add the following to a `.env` file in your Cube project: - -```dotenv -CUBEJS_DB_TYPE=prestodb -CUBEJS_DB_HOST=my.presto.host -CUBEJS_DB_USER=presto_user -CUBEJS_DB_PASS=********** -CUBEJS_DB_PRESTO_CATALOG=my_presto_catalog -CUBEJS_DB_SCHEMA=my_presto_schema -``` - -## Environment Variables - -| Environment Variable | Description | Possible Values | Required | [Supports multiple data sources?][ref-config-multiple-ds-decorating-env] | -| -------------------------- | ----------------------------------------------------------------------------------- | --------------------------------------------- | :------: | :----------------------------------------------------------------------: | -| `CUBEJS_DB_HOST` | The host URL for a database | A valid database host URL | ✅ | ✅ | -| `CUBEJS_DB_PORT` | The port for the database connection | A valid port number | ❌ | ✅ | -| `CUBEJS_DB_USER` | The username used to connect to the database | A valid database username | ✅ | ✅ | -| `CUBEJS_DB_PASS` | The password used to connect to the database | A valid database password | ✅ | ✅ | -| `CUBEJS_DB_PRESTO_CATALOG` | The catalog within Presto to connect to | A valid catalog name within a Presto database | ✅ | ✅ | -| `CUBEJS_DB_SCHEMA` | The schema within the database to connect to | A valid schema name within a Presto database | ✅ | ✅ | -| `CUBEJS_DB_SSL` | If `true`, enables SSL encryption for database connections from Cube | `true`, `false` | ❌ | ✅ | -| `CUBEJS_CONCURRENCY` | The number of concurrent connections each queue has to the database. Default is `2` | A valid number | ❌ | ❌ | -| `CUBEJS_DB_MAX_POOL` | The maximum number of concurrent database connections to pool. Default is `8` | A valid number | ❌ | ✅ | - -## Pre-Aggregation Feature Support - -### count_distinct_approx - -Measures of type -[`count_distinct_approx`][ref-schema-ref-types-formats-countdistinctapprox] can be -used in pre-aggregations when using Presto as a source database. To learn more -about Presto support for approximate aggregate functions, [click -here][presto-docs-approx-agg-fns]. - -## SSL - -To enable SSL-encrypted connections between Cube and Presto, set the -`CUBEJS_DB_SSL` environment variable to `true`. For more information on how to -configure custom certificates, please check out [Enable SSL Connections to the -Database][ref-recipe-enable-ssl]. - -[presto]: https://prestodb.io/ -[presto-docs-approx-agg-fns]: - https://prestodb.io/docs/current/functions/aggregate.html -[ref-config-multiple-ds-decorating-env]: - /config/multiple-data-sources#configuring-data-sources-with-environment-variables-decorated-environment-variables -[ref-recipe-enable-ssl]: /recipes/enable-ssl-connections-to-database -[ref-schema-ref-types-formats-countdistinctapprox]: - /schema/reference/types-and-formats#count-distinct-approx diff --git a/docs/content/Configuration/Databases/QuestDB.mdx b/docs/content/Configuration/Databases/QuestDB.mdx deleted file mode 100644 index 02ebc7f7b087d..0000000000000 --- a/docs/content/Configuration/Databases/QuestDB.mdx +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: QuestDB -permalink: /config/databases/questdb ---- - - - -The driver for QuestDB is supported by its vendor. Please report any issues to -their [Slack][questdb-slack]. - - - -## Prerequisites - -- The hostname for the [QuestDB][questdb] database server - -## Setup - -### <--{"id" : "Setup"}--> Manual - -Add the following to a `.env` file in your Cube project: - -```dotenv -CUBEJS_DB_TYPE=questdb -CUBEJS_DB_HOST=my.questdb.host -CUBEJS_DB_PORT=8812 -CUBEJS_DB_NAME=qdb -CUBEJS_DB_USER=admin -CUBEJS_DB_PASS=quest -``` - -## Environment Variables - -| Environment Variable | Description | Possible Values | Required | [Supports multiple data sources?][ref-config-multiple-ds-decorating-env] | -| -------------------- | ----------------------------------------------------------------------------------- | ------------------------- | :------: | :----------------------------------------------------------------------: | -| `CUBEJS_DB_HOST` | The host URL for a database | A valid database host URL | ✅ | ✅ | -| `CUBEJS_DB_PORT` | The port for the database connection | A valid port number | ✅ | ✅ | -| `CUBEJS_DB_NAME` | The name of the database to connect to | A valid database name | ✅ | ✅ | -| `CUBEJS_DB_USER` | The username used to connect to the database | A valid database username | ✅ | ✅ | -| `CUBEJS_DB_PASS` | The password used to connect to the database | A valid database password | ✅ | ✅ | -| `CUBEJS_DB_SSL` | If `true`, enables SSL encryption for database connections from Cube | `true`, `false` | ❌ | ✅ | -| `CUBEJS_CONCURRENCY` | The number of concurrent connections each queue has to the database. Default is `2` | A valid number | ❌ | ❌ | -| `CUBEJS_DB_MAX_POOL` | The maximum number of concurrent database connections to pool. Default is `8` | A valid number | ❌ | ✅ | - -[questdb]: https://questdb.io/ -[questdb-slack]: https://slack.questdb.io/ -[ref-config-multiple-ds-decorating-env]: - /config/multiple-data-sources#configuring-data-sources-with-environment-variables-decorated-environment-variables diff --git a/docs/content/Configuration/Databases/SQLite.mdx b/docs/content/Configuration/Databases/SQLite.mdx deleted file mode 100644 index 33e598f19bee9..0000000000000 --- a/docs/content/Configuration/Databases/SQLite.mdx +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: SQLite -permalink: /config/databases/sqlite ---- - - - The driver for SQLite is{' '} - community-supported and is not - supported by Cube or the vendor. - - -## Prerequisites - -## Setup - -### <--{"id" : "Setup"}--> Manual - -Add the following to a `.env` file in your Cube project: - -```dotenv -CUBEJS_DB_TYPE=sqlite -CUBEJS_DB_NAME=my_sqlite_database -``` - -## Environment Variables - -| Environment Variable | Description | Possible Values | Required | [Supports multiple data sources?][ref-config-multiple-ds-decorating-env] | -| -------------------- | ----------------------------------------------------------------------------------- | --------------------- | :------: | :----------------------------------------------------------------------: | -| `CUBEJS_DB_NAME` | The name of the database to connect to | A valid database name | ✅ | ✅ | -| `CUBEJS_CONCURRENCY` | The number of concurrent connections each queue has to the database. Default is `2` | A valid number | ❌ | ❌ | -| `CUBEJS_DB_MAX_POOL` | The maximum number of concurrent database connections to pool. Default is `8` | A valid number | ❌ | ✅ | - -## SSL - -SQLite does not support SSL connections. - -[ref-config-multiple-ds-decorating-env]: - /config/multiple-data-sources#configuring-data-sources-with-environment-variables-decorated-environment-variables diff --git a/docs/content/Configuration/Databases/Snowflake.mdx b/docs/content/Configuration/Databases/Snowflake.mdx deleted file mode 100644 index ad605df76cb84..0000000000000 --- a/docs/content/Configuration/Databases/Snowflake.mdx +++ /dev/null @@ -1,186 +0,0 @@ ---- -title: Snowflake -permalink: /config/databases/snowflake ---- - - - -[Join our upcoming Office Hours on July 26 at 9am PST](https://cube.dev/events/unlock-data-cube-snowflake) on Getting Started with Cube Cloud and Snowflake. -Learn how to easily connect Cube Cloud and Snowflake, load your data, and integrate your BI tools. - - - -## Prerequisites - -- [The account ID][snowflake-docs-account-id] for [Snowflake][snowflake] -- The warehouse name in the [Snowflake][snowflake] account -- [The region][snowflake-docs-regions] for the [Snowflake][snowflake] warehouse -- The username/password for the [Snowflake][snowflake] account - -## Setup - -### <--{"id" : "Setup"}--> Manual - -Add the following to a `.env` file in your Cube project: - -```dotenv -CUBEJS_DB_TYPE=snowflake -CUBEJS_DB_SNOWFLAKE_ACCOUNT=XXXXXXXXX.us-east-1 -CUBEJS_DB_SNOWFLAKE_WAREHOUSE=MY_SNOWFLAKE_WAREHOUSE -CUBEJS_DB_NAME=my_snowflake_database -CUBEJS_DB_USER=snowflake_user -CUBEJS_DB_PASS=********** -``` - -### <--{"id" : "Setup"}--> Cube Cloud - - - -In some cases you'll need to allow connections from your Cube Cloud deployment IP -address to your database. You can copy the IP address from either the Database -Setup step in deployment creation, or from Settings → -Configuration in your deployment. - - - -The following fields are required when creating a Snowflake connection: - - - -Cube Cloud also supports connecting to data sources within private VPCs. If you already have VPCs enabled in -your account, check out the [VPC documentation][ref-cloud-conf-vpc] to learn how -to get started. - - - -VPC connectivity is available in Cube Cloud on [Premium](https://cube.dev/pricing) tier. [Contact us][cube-contact] for details. - - - -[cube-pricing]: https://cube.dev/pricing/ -[cube-contact]: https://cube.dev/contact/ -[ref-cloud-conf-vpc]: /cloud/configuration/connecting-with-a-vpc - -## Environment Variables - -| Environment Variable | Description | Possible Values | Required | [Supports multiple data sources?][ref-config-multiple-ds-decorating-env] | -| ----------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------- | :------: | :----------------------------------------------------------------------: | -| `CUBEJS_DB_SNOWFLAKE_ACCOUNT` | The Snowflake account identifier to use when connecting to the database | [A valid Snowflake account ID][snowflake-docs-account-id] | ✅ | ✅ | -| `CUBEJS_DB_SNOWFLAKE_REGION` | The Snowflake region to use when connecting to the database | [A valid Snowflake region][snowflake-docs-regions] | ❌ | ✅ | -| `CUBEJS_DB_SNOWFLAKE_WAREHOUSE` | The Snowflake warehouse to use when connecting to the database | [A valid Snowflake warehouse][snowflake-docs-warehouse] in the account | ✅ | ✅ | -| `CUBEJS_DB_SNOWFLAKE_ROLE` | The Snowflake role to use when connecting to the database | [A valid Snowflake role][snowflake-docs-roles] in the account | ❌ | ✅ | -| `CUBEJS_DB_SNOWFLAKE_CLIENT_SESSION_KEEP_ALIVE` | If `true`, [keep the Snowflake connection alive indefinitely][snowflake-docs-connection-options] | `true`, `false` | ❌ | ✅ | -| `CUBEJS_DB_NAME` | The name of the database to connect to | A valid database name | ✅ | ✅ | -| `CUBEJS_DB_USER` | The username used to connect to the database | A valid database username | ✅ | ✅ | -| `CUBEJS_DB_PASS` | The password used to connect to the database | A valid database password | ✅ | ✅ | -| `CUBEJS_DB_SNOWFLAKE_AUTHENTICATOR` | The type of authenticator to use with Snowflake. Use `SNOWFLAKE` with username/password, or `SNOWFLAKE_JWT` with key pairs. Defaults to `SNOWFLAKE` | `SNOWFLAKE`, `SNOWFLAKE_JWT` | ❌ | ✅ | -| `CUBEJS_DB_SNOWFLAKE_PRIVATE_KEY_PATH` | The path to the private RSA key folder | A valid path to the private RSA key | ❌ | ✅ | -| `CUBEJS_DB_SNOWFLAKE_PRIVATE_KEY_PASS` | The password for the private RSA key. Only required for encrypted keys | A valid password for the encrypted private RSA key | ❌ | ✅ | -| `CUBEJS_CONCURRENCY` | The number of concurrent connections each queue has to the database. Default is `5` | A valid number | ❌ | ❌ | -| `CUBEJS_DB_MAX_POOL` | The maximum number of concurrent database connections to pool. Default is `20` | A valid number | ❌ | ✅ | - -## Pre-Aggregation Feature Support - -### count_distinct_approx - -Measures of type -[`count_distinct_approx`][ref-schema-ref-types-formats-countdistinctapprox] can be -used in pre-aggregations when using Snowflake as a source database. To learn -more about Snowflake's support for approximate aggregate functions, [click -here][snowflake-docs-approx-agg-fns]. - -## Pre-Aggregation Build Strategies - - - -To learn more about pre-aggregation build strategies, [head -here][ref-caching-using-preaggs-build-strats]. - - - -| Feature | Works with read-only mode? | Is default? | -| ------------- | :------------------------: | :---------: | -| Batching | ❌ | ✅ | -| Export Bucket | ❌ | ❌ | - -By default, Snowflake uses [batching][self-preaggs-batching] to build -pre-aggregations. - -### Batching - -No extra configuration is required to configure batching for Snowflake. - -### Export Bucket - -Snowflake supports using both [AWS S3][aws-s3] and [Google Cloud -Storage][google-cloud-storage] for export bucket functionality. - -#### AWS S3 - - - -Ensure the AWS credentials are correctly configured in IAM to allow reads and -writes to the export bucket in S3. - - - -```dotenv -CUBEJS_DB_EXPORT_BUCKET_TYPE=s3 -CUBEJS_DB_EXPORT_BUCKET=my.bucket.on.s3 -CUBEJS_DB_EXPORT_BUCKET_AWS_KEY= -CUBEJS_DB_EXPORT_BUCKET_AWS_SECRET= -CUBEJS_DB_EXPORT_BUCKET_AWS_REGION= -``` - -#### Google Cloud Storage - - - -When using an export bucket, remember to assign the **Storage Object Admin** -role to your BigQuery credentials (`CUBEJS_DB_EXPORT_GCS_CREDENTIALS`). - - - -Before configuring Cube, an [integration must be created and configured in -Snowflake][snowflake-docs-gcs-integration]. Take note of the integration name -(`gcs_int` from the example link) as you'll need it to configure Cube. - -Once the Snowflake integration is set up, configure Cube using the following: - -```dotenv -CUBEJS_DB_EXPORT_BUCKET=snowflake-export-bucket -CUBEJS_DB_EXPORT_BUCKET_TYPE=gcp -CUBEJS_DB_EXPORT_GCS_CREDENTIALS= -CUBEJS_DB_EXPORT_INTEGRATION=gcs_int -``` - -## SSL - -Cube does not require any additional configuration to enable SSL as Snowflake -connections are made over HTTPS. - -[aws-s3]: https://aws.amazon.com/s3/ -[google-cloud-storage]: https://cloud.google.com/storage -[ref-caching-using-preaggs-build-strats]: - /caching/using-pre-aggregations#pre-aggregation-build-strategies -[ref-schema-ref-types-formats-countdistinctapprox]: - /schema/reference/types-and-formats#count-distinct-approx -[self-preaggs-batching]: #batching -[snowflake]: https://www.snowflake.com/ -[snowflake-docs-account-id]: - https://docs.snowflake.com/en/user-guide/admin-account-identifier.html -[snowflake-docs-connection-options]: - https://docs.snowflake.com/en/user-guide/nodejs-driver-use.html#additional-connection-options -[snowflake-docs-gcs-integration]: - https://docs.snowflake.com/en/user-guide/data-load-gcs-config.html -[snowflake-docs-regions]: - https://docs.snowflake.com/en/user-guide/intro-regions.html -[snowflake-docs-roles]: - https://docs.snowflake.com/en/user-guide/security-access-control-overview.html#roles -[snowflake-docs-approx-agg-fns]: - https://docs.snowflake.com/en/sql-reference/functions/approx_count_distinct.html -[snowflake-docs-warehouse]: - https://docs.snowflake.com/en/user-guide/warehouses.html diff --git a/docs/content/Configuration/Databases/Trino.mdx b/docs/content/Configuration/Databases/Trino.mdx deleted file mode 100644 index 5b6bf4275ed60..0000000000000 --- a/docs/content/Configuration/Databases/Trino.mdx +++ /dev/null @@ -1,65 +0,0 @@ ---- -title: Trino -permalink: /config/databases/trino ---- - -## Prerequisites - -- The hostname for the [Trino][trino] database server -- The username/password for the [Trino][trino] database server -- The name of the database to use within the [Trino][trino] database server - -## Setup - -### <--{"id" : "Setup"}--> Manual - -Add the following to a `.env` file in your Cube project: - -```dotenv -CUBEJS_DB_TYPE=trino -CUBEJS_DB_HOST=my.trino.host -CUBEJS_DB_USER=trino_user -CUBEJS_DB_PASS=********** -CUBEJS_DB_PRESTO_CATALOG=my_trino_catalog -CUBEJS_DB_SCHEMA=my_trino_schema -``` - -## Environment Variables - -| Environment Variable | Description | Possible Values | Required | [Supports multiple data sources?][ref-config-multiple-ds-decorating-env] | -| -------------------------- | ----------------------------------------------------------------------------------- | --------------------------------------------- | :------: | :----------------------------------------------------------------------: | -| `CUBEJS_DB_HOST` | The host URL for a database | A valid database host URL | ✅ | ✅ | -| `CUBEJS_DB_PORT` | The port for the database connection | A valid port number | ❌ | ✅ | -| `CUBEJS_DB_USER` | The username used to connect to the database | A valid database username | ✅ | ✅ | -| `CUBEJS_DB_PASS` | The password used to connect to the database | A valid database password | ✅ | ✅ | -| `CUBEJS_DB_PRESTO_CATALOG` | The catalog within Presto to connect to | A valid catalog name within a Presto database | ✅ | ✅ | -| `CUBEJS_DB_SCHEMA` | The schema within the database to connect to | A valid schema name within a Presto database | ✅ | ✅ | -| `CUBEJS_DB_SSL` | If `true`, enables SSL encryption for database connections from Cube | `true`, `false` | ❌ | ✅ | -| `CUBEJS_CONCURRENCY` | The number of concurrent connections each queue has to the database. Default is `2` | A valid number | ❌ | ❌ | -| `CUBEJS_DB_MAX_POOL` | The maximum number of concurrent database connections to pool. Default is `8` | A valid number | ❌ | ✅ | - -## Pre-Aggregation Feature Support - -### count_distinct_approx - -Measures of type -[`count_distinct_approx`][ref-schema-ref-types-formats-countdistinctapprox] can be -used in pre-aggregations when using Trino as a source database. To learn more -about Trino support for approximate aggregate functions, [click -here][trino-docs-approx-agg-fns]. - -## SSL - -To enable SSL-encrypted connections between Cube and Trino, set the -`CUBEJS_DB_SSL` environment variable to `true`. For more information on how to -configure custom certificates, please check out [Enable SSL Connections to the -Database][ref-recipe-enable-ssl]. - -[trino]: https://trino.io/ -[trino-docs-approx-agg-fns]: - https://trino.io/docs/current/functions/aggregate.html#approximate-aggregate-functions -[ref-config-multiple-ds-decorating-env]: - /config/multiple-data-sources#configuring-data-sources-with-environment-variables-decorated-environment-variables -[ref-recipe-enable-ssl]: /recipes/enable-ssl-connections-to-database -[ref-schema-ref-types-formats-countdistinctapprox]: - /schema/reference/types-and-formats#count-distinct-approx diff --git a/docs/content/Configuration/Databases/ksqlDB.mdx b/docs/content/Configuration/Databases/ksqlDB.mdx deleted file mode 100644 index 62e4b88657f6a..0000000000000 --- a/docs/content/Configuration/Databases/ksqlDB.mdx +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: ksqlDB -permalink: /config/databases/ksqldb ---- - - - ksqlDB driver is in preview. Please{' '} - contact us if you need help running it - in production. - - -## Prerequisites - -- Hostname for the ksqlDB server -- Username and password to connect to ksqlDB server - -If you are using Confluent Cloud, you need to generate API key and use **key as -username** and **secret as password**. - -## Setup - -### <--{"id" : "Setup"}--> Manual - -Add the following to a `.env` file in your Cube project: - -```dotenv -CUBEJS_DB_TYPE=ksql -CUBEJS_DB_URL=https://xxxxxx-xxxxx.us-west4.gcp.confluent.cloud:443 -CUBEJS_DB_USER=username -CUBEJS_DB_PASS=password -``` - -## Environment Variables - -| Environment Variable | Description | Possible Values | Required | [Supports multiple data sources?][ref-config-multiple-ds-decorating-env] | -| -------------------- | --------------------------------------------------------------------------- | ------------------------- | :------: | :----------------------------------------------------------------------: | -| `CUBEJS_DB_URL` | The host URL for ksqlDB with port | A valid database host URL | ✅ | ✅ | -| `CUBEJS_DB_USER` | The username used to connect to the ksqlDB. API key for Confluent Cloud. | A valid port number | ✅ | ✅ | -| `CUBEJS_DB_PASS` | The password used to connect to the ksqlDB. API secret for Confluent Cloud. | A valid database name | ✅ | ✅ | - -## Pre-Aggregations Support - -ksqlDB supports only -[streaming pre-aggregations](/caching/using-pre-aggregations#streaming-pre-aggregations). - -[ref-config-multiple-ds-decorating-env]: - /config/multiple-data-sources#configuring-data-sources-with-environment-variables-decorated-environment-variables diff --git a/docs/content/Configuration/Downstream/Appsmith.mdx b/docs/content/Configuration/Downstream/Appsmith.mdx deleted file mode 100644 index 66aea2ae8054b..0000000000000 --- a/docs/content/Configuration/Downstream/Appsmith.mdx +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: Connecting to Appsmith -permalink: /config/downstream/appsmith ---- - -[Appsmith](https://www.appsmith.com) is an open-source framework for building -internal tools. You can connect a Cube deployment to Appsmith using Cube’s -[REST API](https://cube.dev/docs/rest-api). - -Here's a short video guide on how to connect Appsmith to Cube. - - - -## Use REST API in Cube - -> Don't have a Cube project yet? -> [Learn how to get started here](https://cube.dev/docs/getting-started/cloud/overview). - -### <--{"id" : "Use REST API in Cube"}--> Cube Cloud - -Click the “How to connect” link on the Overview page, navigate to the REST API -tab. You should see the screen below with your connection credentials (the REST -API URL and the authorization token): - - - -### <--{"id" : "Use REST API in Cube"}--> Self-hosted Cube - -For a Cube instance publicly available at a specific `HOST`, the REST API URL -would be `HOST/cubejs-api/v1`. Please refer to the -[REST API page](https://cube.dev/docs/rest-api) for details. - -You will also need to generate a JSON Web Token that would be used to -authenticate requests to Cube. - -Please check the -[Security page](https://cube.dev/docs/security#generating-json-web-tokens-jwt) -to learn how to generate a token. We suggest generating a long-lived JWT that -won't expire soon. - -## Create a new Data Source in Appsmith - -Copy and paste the REST API URL and the Authorization token to create a new data -source in Appsmith. - - - -## Create a POST request in Appsmith - -Get your Cube query in the JSON -[query format](https://cube.dev/docs/query-format) ready. You can copy it from -Cube’s Playground or compose manually: - - - -Create a POST request, paste the JSON query in the **Body**. Make sure to add a -`query` parameter for your JSON query. - -Because you saved the data source as `HOST/cubejs-api/v1`, don't forget to add a -`/load` endpoint to the end of the data source API. - -Next, hit Run. - - - -## Display the Data in Appsmith - -You have many options to display the data in Appsmith. For instance, you can -display the data in a table widget. Also, you can create a chart widget and map -the values to _x_ and _y_ coordinates accordingly, give a _title_ and _names_ to -the _axis_. - - diff --git a/docs/content/Configuration/Downstream/Bubble.mdx b/docs/content/Configuration/Downstream/Bubble.mdx deleted file mode 100644 index c13652301e759..0000000000000 --- a/docs/content/Configuration/Downstream/Bubble.mdx +++ /dev/null @@ -1,103 +0,0 @@ ---- -title: Connecting to Bubble -permalink: /config/downstream/bubble ---- - -[Bubble](https://bubble.io/) is one of the oldest and most comprehensive -low-code app-building platforms on the market. You can connect a Cube deployment -to Bubble using Cube’s [REST API](https://cube.dev/docs/rest-api). - -Here's a short video guide on how to connect Bubble to Cube. - - - -## Use REST API in Cube - -> Don't have a Cube project yet? -> [Learn how to get started here](https://cube.dev/docs/getting-started/cloud/overview). - -### <--{"id" : "Use REST API in Cube"}--> Cube Cloud - -Click the "How to connect" link on the Overview page, navigate to the REST API -tab. You will see your connection credentials, the REST API URL and the -authorization token: - - - -### <--{"id" : "Use REST API in Cube"}--> Self-hosted Cube - -For a Cube instance publicly available at a specific `HOST`, the REST API URL -would be `HOST/cubejs-api/v1`. Please refer to the -[REST API page](https://cube.dev/docs/rest-api) for details. - -You will also need to generate a JSON Web Token that would be used to -authenticate requests to Cube. - -Please check the -[Security page](https://cube.dev/docs/security#generating-json-web-tokens-jwt) -to learn how to generate a token. We suggest generating a long-lived JWT that -won't expire soon. - -## Add an API connector plugin to your Bubble app - -Navigate to the plugins. Select and install the API connector plugin. - -## Create a new API in Bubble - -Click "Add another API". - - - -Next, set the "Authentication" as "Private key in header", and copy and paste -Cube's Authorization token. Additionally, make sure to add a shared header for -all calls by setting `content-type` to `application/json`. - - - -## Create a POST request in Bubble - -Click "Add another call". - -Copy and paste the Cube REST API, including the `/load` path, in the API field. -Make sure to select "Use as: Action", and use the `JSON` "data type" and "body -type". - - - -Get your Cube query in the JSON -[query format](https://cube.dev/docs/query-format) ready. You can copy it from -Cube’s Playground or compose manually. - -Paste the JSON query in the **Body**. Make sure to add a `query` parameter for -your JSON query. - - - -Next, hit "Initialize / Reinitialize call". - -## Create a workflow in Bubble - -First, add a "Page is loaded" event. Add an action that uses the API connector -plugin. Next, add another action to set the state of an element. Select the -index element, and create a new custom state. In this example I named the custom -state `OrdersByMonth` and selected the `data` field of the API response. Make -sure to toggle the "This state is a list" checkbox as well. While setting the -state's value you will need to select "Result of step 1" and the `data` field. - - - -## Display the data in Bubble - -Bubble has an amazing feature where you can drag and drop UI components into the -design dashboard. - -Once you added a chart, you configure the data points. More specifically, the -series categories and series data. For the series categories and series data you -need to select the `index` and `OrdersByMonth` state followed by the data point -you want to use in order to populate the chart. - - - -Once you open the "Preview" you'll see your nicely rendered charts. - - diff --git a/docs/content/Configuration/Downstream/Budibase.mdx b/docs/content/Configuration/Downstream/Budibase.mdx deleted file mode 100644 index 4f90cadffa1ab..0000000000000 --- a/docs/content/Configuration/Downstream/Budibase.mdx +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: Connecting to Budibase -permalink: /config/downstream/budibase ---- - -Budibase is a low-code app-building platform that helps developers create -responsive web applications with access methods to databases, APIs, and much -more. You can connect a Cube deployment to Budibase using Cube's -[REST API](https://cube.dev/docs/rest-api). - -Here's a short video guide on how to connect Budibase to Cube. - - - -## Use REST API in Cube - -> Don't have a Cube project yet? -> [Learn how to get started here](https://cube.dev/docs/getting-started/cloud/overview). - -### <--{"id" : "Use REST API in Cube"}--> Cube Cloud - -Click the "How to connect" link on the Overview page, navigate to the REST API -tab. You will see your connection credentials, the REST API URL and the -authorization token: - - - -### <--{"id" : "Use REST API in Cube"}--> Self-hosted Cube - -For a Cube instance publicly available at a specific `HOST`, the REST API URL -would be `HOST/cubejs-api/v1`. Please refer to the -[REST API page](https://cube.dev/docs/rest-api) for details. - -You will also need to generate a JSON Web Token that would be used to -authenticate requests to Cube. - -Please check the -[Security page](https://cube.dev/docs/security#generating-json-web-tokens-jwt) -to learn how to generate a token. We suggest generating a long-lived JWT that -won't expire soon. - -## Create a new data source in Budibase - -Create a new "REST API" data source in Budibase. - - - -Next, add a "Authorization" header for the REST API. Copy and paste Cube's -Authorization token. - - - -## Add a query in Budibase - -Click "+ Add Query". - -Copy and paste the Cube REST API, including the `/load` path, in the API field. -Make sure to select "POST" as the request. - - - -Get your Cube query in the JSON -[query format](https://cube.dev/docs/query-format) ready. You can copy it from -Cube's Playground or compose manually. - -Paste the JSON query in the **Body** as "raw(JSON)". Make sure to add a `query` -parameter for your JSON query. - - - -Edit the **Transform** to return the data of the request. - -```javascript -return data.data -``` - - - -You can also give the query a name. In this sample it's called `OrdersByMonth`. - -Lastly, click the "save" button and hit "send" to test the API and get a -response back. - -## Create a data provider in Budibase - -Move to the **Design** section and open the **Components**. - -First, add a Data Provider. Select the query from above. In this sample it's -called `OrdersByMonth`. - - - -## Display the data in Budibase - -Add a chart into the data provider. Next, configure the data provider for the -chart. Make sure it's set to the data provider you just created. - -Then, you set the "label column" and "data columns". In this sample you can set -`Orders.createdAt` as the label column and `Orders.count` as the data columns. - - - -You'll see a nicely rendered chart show up. - - diff --git a/docs/content/Configuration/Downstream/Deepnote.mdx b/docs/content/Configuration/Downstream/Deepnote.mdx deleted file mode 100644 index 505bd41dee585..0000000000000 --- a/docs/content/Configuration/Downstream/Deepnote.mdx +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: Connecting to Deepnote -permalink: /config/downstream/deepnote ---- - -You can connect to Cube from Deepnote, a new kind of data notebook that's built -for collaboration and is Jupyter compatible, using the [Cube SQL -API][ref-sql-api]. - -Here's a short video guide on how to connect Deepnote to Cube. - - - -## Enable Cube SQL API - - - -Don't have a Cube project yet? [Learn how to get started -here][ref-getting-started]. - - - -### <--{"id" : "Enable Cube SQL API"}--> Cube Cloud - -Click **Deploy SQL API** and then the **How to connect your BI tool** link on -the Overview page of your Cube deployment. Navigate to the **BIs and -Visualization Tools** tab. You should see the screen like the one below with -your connection credentials: - - - -### <--{"id" : "Enable Cube SQL API"}--> Self-hosted Cube - -You need to set the following environment variables to enable the Cube SQL API. -These credentials will be required to connect to Cube from Deepnote later. - -```dotenv -CUBEJS_PG_SQL_PORT=5432 -CUBEJS_SQL_USER=myusername -CUBEJS_SQL_PASSWORD=mypassword -``` - -## Connecting from Deepnote - -Deepnote connects to Cube as to a Postgres database. - - - -## Querying data - -Your cubes will be exposed as tables, where both your measures and dimensions -are columns. - -You can write SQL in Deepnote that will be executed in Cube. Learn more about -Cube SQL syntax on the [reference page][ref-sql-api]. - -
- -
- -You can also create a visualization of the executed SQL query. - -
- -
- -[ref-getting-started]: /getting-started/cloud/overview -[ref-sql-api]: /backend/sql diff --git a/docs/content/Configuration/Downstream/Delphi.mdx b/docs/content/Configuration/Downstream/Delphi.mdx deleted file mode 100644 index 34108d2276483..0000000000000 --- a/docs/content/Configuration/Downstream/Delphi.mdx +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: Connecting to Delphi -permalink: /config/downstream/delphi ---- - -[Delphi][delphi-website] is a conversational interface for the semantic layer, -powered by the large language model (LLM) technology by OpenAI. - -It provides a Slack bot that takes questions in natural language, translates -them into queries to Cube, and delivers the results back to Slack. You can learn -more about Delphi from the [blog post][delphi-blog-post] or see it in action in -this video: - - - -To start using Delphi, you need to install its bot (application) to your Slack -workspace, configure it, add to necessary channels, and start asking questions. - -## Installing Delphi to Slack - -Go to the [Slack application][delphi-slack-install] page; this will start the -installation. - - - -You may need to be a Slack workspace administrator to install a new application. - - - -## Configuring the connection to Cube - -In Slack, navigate to the Delphi app on the left sidebar. - -In the Home tab, choose Cube as the connection type: - - - -Then, enter your credentials: - -- Delphi Client ID and API Key -- Cube [REST API][ref-rest-api] URL and a [JSON Web Token][ref-jwt] - - - - - -If you don’t have Delphi Client ID and API Key yet, get them by -[email][delphi-email] from the Delphi team. - - - -## Asking questions - -Send Delphi a direct message or add it to relevant channels. - -Start with `@Delphi` and ask anything about your Cube [data -model][ref-data-model]. - - - -[delphi-website]: https://www.delphihq.com/ -[delphi-blog-post]: - https://cube.dev/blog/conversational-interface-for-semantic-layer -[delphi-slack-install]: https://delphi-prod.onrender.com/slack/install -[delphi-email]: - mailto:michael@delphihq.com?subject=Delphi%20credentials%20to%20use%20with%20Cube -[ref-rest-api]: /http-api/rest -[ref-jwt]: /security#generating-json-web-tokens-jwt -[ref-data-model]: /schema/getting-started diff --git a/docs/content/Configuration/Downstream/Hex.mdx b/docs/content/Configuration/Downstream/Hex.mdx deleted file mode 100644 index e3a0debc62bc0..0000000000000 --- a/docs/content/Configuration/Downstream/Hex.mdx +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: Connecting to Hex -permalink: /config/downstream/hex ---- - -You can connect to Cube from Hex, a collaborative data platform, using the [Cube -SQL API][ref-sql-api]. - -Here's a short video guide on how to connect Hex to Cube. - - - -## Enable Cube SQL API - - - -Don't have a Cube project yet? [Learn how to get started -here][ref-getting-started]. - - - -### <--{"id" : "Enable Cube SQL API"}--> Cube Cloud - -Click **How to connect your BI tool** link on the Overview page, navigate to the -SQL API tab and enable it. Once enabled, you should see the screen like the one -below with your connection credentials: - - - -### <--{"id" : "Enable Cube SQL API"}--> Self-hosted Cube - -You need to set the following environment variables to enable the Cube SQL API. -These credentials will be required to connect to Cube from Hex later. - -```dotenv -CUBEJS_PG_SQL_PORT=5432 -CUBEJS_SQL_USER=myusername -CUBEJS_SQL_PASSWORD=mypassword -``` - -## Connecting from Hex - -Hex connects to Cube as to a Postgres database. - -## Querying data - -Your cubes will be exposed as tables, where both your measures and dimensions -are columns. - -You can write SQL in Hex that will be executed in Cube. Learn more about Cube -SQL syntax on the [reference page][ref-sql-api]. - -
- -
- -You can also create a visualization of the executed SQL query. - -
- -
- -[ref-getting-started]: /getting-started/cloud/overview -[ref-sql-api]: /backend/sql diff --git a/docs/content/Configuration/Downstream/Jupyter.mdx b/docs/content/Configuration/Downstream/Jupyter.mdx deleted file mode 100644 index 9debfac1ea0ac..0000000000000 --- a/docs/content/Configuration/Downstream/Jupyter.mdx +++ /dev/null @@ -1,113 +0,0 @@ ---- -title: Connecting to Jupyter -permalink: /config/downstream/jupyter ---- - -You can connect to Cube from Jupyter using the [Cube SQL API][ref-sql-api]. The -Jupyter Notebook is a web application for creating and sharing computational -documents. - -Here's a short video guide on how to connect Jupyter to Cube. - - - -## Enable Cube SQL API - - - -Don't have a Cube project yet? [Learn how to get started -here][ref-getting-started]. - - - -### <--{"id" : "Enable Cube SQL API"}--> Cube Cloud - -Click **Deploy SQL API** and then the **How to connect your BI tool** link on -the Overview page of your Cube deployment. Navigate to the **BIs and -Visualization Tools** tab. You should see the screen like the one below with -your connection credentials: - - - -### <--{"id" : "Enable Cube SQL API"}--> Self-hosted Cube - -You need to set the following environment variables to enable the Cube SQL API. -These credentials will be required to connect to Cube from Jupyter later. - -```dotenv -CUBEJS_PG_SQL_PORT=5432 -CUBEJS_SQL_USER=myusername -CUBEJS_SQL_PASSWORD=mypassword -``` - -## Connecting from Jupyter - -Jupyter connects to Cube as to a Postgres database. - -Make sure to install the `sqlalchemy` and `pandas` modules. - -```bash{promptUser: user} -pip install sqlalchemy -pip install pandas -``` - -Then you can use `sqlalchemy.create_engine` to connect to Cube's SQL API. - -```python -import sqlalchemy -import pandas - -engine = sqlalchemy.create_engine( - sqlalchemy.engine.url.URL( - drivername="postgresql", - username="cube", - password="9943f670fd019692f58d66b64e375213", - host="thirsty-raccoon.sql.aws-eu-central-1.cubecloudapp.dev", - port="5432", - database="db@thirsty-raccoon", - ), - echo_pool=True, -) -print("connecting with engine " + str(engine)) -connection = engine.connect() - -# ... -``` - -## Querying data - -Your cubes will be exposed as tables, where both your measures and dimensions -are columns. - -You can write SQL in Jupyter that will be executed in Cube. Learn more about -Cube SQL syntax on the [reference page][ref-sql-api]. - -```python -# ... - -query = "SELECT SUM(count), status FROM orders GROUP BY status;" -df = pandas.read_sql_query(query, connection) -``` - -In your Jupyter notebook it'll look like this. - -
- -
- -You can also create a visualization of the executed SQL query. - -
- -
- -[ref-getting-started]: /getting-started/cloud/overview -[ref-sql-api]: /backend/sql diff --git a/docs/content/Configuration/Downstream/Metabase.mdx b/docs/content/Configuration/Downstream/Metabase.mdx deleted file mode 100644 index d4d52e76bfd14..0000000000000 --- a/docs/content/Configuration/Downstream/Metabase.mdx +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: Connecting to Metabase -permalink: /config/downstream/metabase ---- - -[Metabase][metabase-oss] is an open-source way to help everyone in your -company to ask questions and learn from data. There's also a fully-managed -[cloud service][metabase] for Metabase. - -## Semantic Layer Sync - -It is recommended to use [Semantic Layer Sync][ref-sls] to connect Cube to -Metabase. It automatically synchronizes the [data model][ref-data-model] -between Cube and Metabase. - -## SQL API - -You can also use the [SQL API][ref-sql-api] to connect Cube to Metabase. - - - -[Semantic Layer Sync][ref-sls] is the recommended way to connect Metabase to -Cube. If you're using the SQL API, consider migrating to Semantic Layer Sync. - - - -Here's a short video guide: - - - -### <--{"id" : "Enable Cube SQL API"}--> Cube Cloud - -Click **Deploy SQL API** and then the **How to connect your BI tool** link on -the Overview page of your Cube deployment. Navigate to the **BIs and -Visualization Tools** tab. You should see the screen like the one below with -your connection credentials: - - - -### <--{"id" : "Enable Cube SQL API"}--> Self-hosted Cube - -You need to set the following environment variables to enable the Cube SQL API. -These credentials will be required to connect to Cube from Metabase later. - -```dotenv -CUBEJS_PG_SQL_PORT=5432 -CUBEJS_SQL_USER=myusername -CUBEJS_SQL_PASSWORD=mypassword -``` - -## Connecting from Metabase - -Metabase connects to Cube as to a Postgres database. - -
- -
- -## Querying data - -Your cubes will be exposed as tables, where both your measures and dimensions -are columns. - -
- -
- -You can write SQL in Metabase that will be executed in Cube. Learn more about -Cube SQL syntax on the [reference page][ref-sql-api]. You can also create a -visualization of the executed SQL query. - -
- -
- -If you prefer using the UI interface to "Ask a question", you can do that as -well. - -
- -
- -[ref-getting-started]: /getting-started/cloud/overview -[ref-sql-api]: /backend/sql -[metabase-oss]: https://github.com/metabase/metabase -[metabase]: https://www.metabase.com -[ref-sls]: /semantic-layer-sync -[ref-sql-api]: /backend/sql -[ref-data-model]: /schema/getting-started \ No newline at end of file diff --git a/docs/content/Configuration/Downstream/Observable.mdx b/docs/content/Configuration/Downstream/Observable.mdx deleted file mode 100644 index 4c5bfe70f399e..0000000000000 --- a/docs/content/Configuration/Downstream/Observable.mdx +++ /dev/null @@ -1,213 +0,0 @@ ---- -title: Connecting to Observable -permalink: /config/downstream/observable ---- - -You can connect to Cube from Observable, a new kind of collaborative data -notebook that's built to uncover new insights, answer more questions, and make -better decisions, using the [Cube SQL API][ref-sql-api] or [Cube REST -API][ref-rest-api]. - -Here's a short video guide on how to connect Observable to Cube. - - - -## Use SQL API in Cube - - - -Don't have a Cube project yet? [Learn how to get started -here][ref-getting-started]. - - - -### <--{"id" : "Use SQL API in Cube"}--> Cube Cloud - -Click **Deploy SQL API** and then the **How to connect your BI tool** link on -the Overview page of your Cube deployment. Navigate to the **BIs and -Visualization Tools** tab. You should see the screen like the one below with -your connection credentials: - - - -### <--{"id" : "Use SQL API in Cube"}--> Self-hosted Cube - -You need to set the following environment variables to enable the Cube SQL API. -These credentials will be required to connect to Cube from Observable later. - -```dotenv -CUBEJS_PG_SQL_PORT=5432 -CUBEJS_SQL_USER=myusername -CUBEJS_SQL_PASSWORD=mypassword -``` - -## Connecting Cube SQL API from Observable - -Observable connects to Cube as to a Postgres database. - - - -### <--{"id" : "Connecting Cube SQL API from Observable"}--> Querying data with SQL API - -Your cubes will be exposed as tables, where both your measures and dimensions -are columns. - -Make sure to add a database to your notebook, and select **Database query** when -adding a new block. - -
- -
- -You can write SQL in Observable that will be executed in Cube. Learn more about -Cube SQL syntax on the [reference page][ref-sql-api]. - -
- -
- -You can also create a visualization of the executed SQL query. - -
- -
- -## Use REST API in Cube - -> Don't have a Cube project yet? -> [Learn how to get started here](https://cube.dev/docs/getting-started/cloud/overview). - -### <--{"id" : "Use REST API in Cube"}--> Cube Cloud - -Click the "How to connect" link on the Overview page, navigate to the REST API -tab. You will see your connection credentials, the REST API URL and the -authorization token: - - - -### <--{"id" : "Use REST API in Cube"}--> Self-hosted Cube - -For a Cube instance publicly available at a specific `HOST`, the REST API URL -would be `HOST/cubejs-api/v1`. Please refer to the -[REST API page](https://cube.dev/docs/rest-api) for details. - -You will also need to generate a JSON Web Token that would be used to -authenticate requests to Cube. - -Please check the -[Security page](https://cube.dev/docs/security#generating-json-web-tokens-jwt) -to learn how to generate a token. We suggest generating a long-lived JWT that -won't expire soon. - -## Connecting Cube REST API from Observable - -Observable connects to Cube through the REST API. - -### <--{"id" : "Connecting Cube REST API from Observable"}--> Querying data with REST API - -First, add two generic **JavaScript** cells: - -
- -
- -Next, copy Cube's REST API URL and the Authorization token and paste them into -their respective cells. - -```javascript -cubeRestApi = - 'https://thirsty-raccoon.aws-eu-central-1.cubecloudapp.dev/cubejs-api/v1/load'; -``` - -Because the Cube REST API has the format of `HOST/cubejs-api/v1`, don't forget -to add the `/load` endpoint to the end of the data source API. - -```javascript -cubeRestApiJwtToken = - 'Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpYXQiOjE2NTgzMzM3OTZ9.gUOoDgo_RJka_ZANdwSw3v8GkM4ZzH9LjxrxKxkGAk0'; -``` - -Also make sure to add the token next to the Bearer part of the Authorization -header. - -Get your Cube query in the JSON -[query format](https://cube.dev/docs/query-format) ready. You can copy it from -Cube's Playground or compose manually. - -Paste the JSON query in another JavaScript cell as an object literal and give it -a name, I chose `jsonBody` for simplicity. Make sure to add a `query` parameter -for your JSON query. - -```javascript -jsonQuery = { - query: { - measures: ['orders.count'], - timeDimensions: [ - { - dimension: 'orders.created_at', - granularity: 'month', - }, - ], - order: { - 'orders.created_at': 'asc', - }, - }, -}; -``` - -Next, create another JavaScript cell with a POST request. Paste this POST -request in the cell. Don't forget to put the `jsonBody` object inside the -`JSON.stringify` call. - -```javascript -orders_over_time = fetch(cubeRestApi, { - method: 'POST', - headers: { - Authorization: cubeRestApiJwtToken, - 'Content-Type': 'application/json', - }, - body: JSON.stringify(jsonQuery), -}) - .then((response) => response.json()) - .then((json) => json.data); -``` - -Next, click the play button on the top right of the cell. - -
- -
- -You can also create a visualization of the executed REST API request. - -
- -
- -[ref-getting-started]: /getting-started/cloud/overview -[ref-sql-api]: /backend/sql -[ref-rest-api]: /backend/rest-api diff --git a/docs/content/Configuration/Downstream/PowerBI.mdx b/docs/content/Configuration/Downstream/PowerBI.mdx deleted file mode 100644 index 87d5ed5d8f052..0000000000000 --- a/docs/content/Configuration/Downstream/PowerBI.mdx +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Connecting from Power BI -permalink: /config/downstream/powerbi ---- - - - -Power BI support is in preview, not all features and requests may work at this -point. - - - -You can connect to Cube from Power BI, interactive data visualization software -product developed by Microsoft, using the [Cube SQL API][ref-sql-api]. - -## Enable Cube SQL API - - - -Don't have a Cube project yet? [Learn how to get started -here][ref-getting-started]. - - - -### <--{"id" : "Enable Cube SQL API"}--> Cube Cloud - -Click **How to connect your BI tool** link on the Overview page, navigate to the -SQL API tab and enable it. Once enabled, you should see the screen like the one -below with your connection credentials: - - - -### <--{"id" : "Enable Cube SQL API"}--> Self-hosted Cube - -You need to set the following environment variables to enable the Cube SQL API. -These credentials will be required to connect to Cube from PowerBI later. - -```dotenv -CUBEJS_PG_SQL_PORT=5432 -CUBEJS_SQL_USER=myusername -CUBEJS_SQL_PASSWORD=mypassword -``` - -## Connecting from Power BI - -Power BI connects to Cube as to a Postgres database. - -## Querying data - -Your cubes will be exposed as tables, where both your measures and dimensions -are columns. - -[ref-getting-started]: /getting-started/cloud/overview -[ref-sql-api]: /backend/sql diff --git a/docs/content/Configuration/Downstream/Retool.mdx b/docs/content/Configuration/Downstream/Retool.mdx deleted file mode 100644 index a5f801b4cba31..0000000000000 --- a/docs/content/Configuration/Downstream/Retool.mdx +++ /dev/null @@ -1,87 +0,0 @@ ---- -title: Connecting to Retool -permalink: /config/downstream/retool ---- - -[Retool](https://retool.com/) is the fast way to build internal tools. It lets -you visually design apps that interface with any database or API. You can -connect a Cube deployment to Retool using Cube’s -[REST API](https://cube.dev/docs/rest-api). - -Here's a short video guide on how to connect Retool to Cube. - - - -## Use REST API in Cube - -> Don't have a Cube project yet? -> [Learn how to get started here](https://cube.dev/docs/getting-started/cloud/overview). - -### <--{"id" : "Use REST API in Cube"}--> Cube Cloud - -Click the "How to connect" link on the Overview page, navigate to the REST API -tab. You will see your connection credentials, the REST API URL and the -authorization token: - - - -### <--{"id" : "Use REST API in Cube"}--> Self-hosted Cube - -For a Cube instance publicly available at a specific `HOST`, the REST API URL -would be `HOST/cubejs-api/v1`. Please refer to the -[REST API page](https://cube.dev/docs/rest-api) for details. - -You will also need to generate a JSON Web Token that would be used to -authenticate requests to Cube. - -Please check the -[Security page](https://cube.dev/docs/security#generating-json-web-tokens-jwt) -to learn how to generate a token. We suggest generating a long-lived JWT that -won't expire soon. - -## Create a new resource in Retool - -Create a new "REST API" resource in Retool. - - - -Next, copy and paste Cube's REST API URL and the Authorization token. - - - -## Create a POST request in Retool - -Get your Cube query in the JSON -[query format](https://cube.dev/docs/query-format) ready. You can copy it from -Cube’s Playground or compose manually. - -Create a POST request, paste the JSON query in the **Body**. Make sure to add a -`query` parameter for your JSON query. - -Because the Cube REST API has the format of `HOST/cubejs-api/v1`, don't forget -to add the `/load` endpoint to the end of the data source API. - -Next, hit Run. - - - -## Display the data in Retool - -Retool has an amazing feature where you can drag and drop UI components into the -dashboard. You can use this to add a tables, bar charts, and much more. - -Because the name of the Retool query in the example is `OrdersByMonth`, using -the data binding curly brackets will populate the charts with data from the REST -API. - -```handlebars -{{ OrdersByMonth.data.data }} -``` - -Reference the name of the query in your Retool charts. - - - -Get nicely rendered charts. - - diff --git a/docs/content/Configuration/Downstream/Streamlit.mdx b/docs/content/Configuration/Downstream/Streamlit.mdx deleted file mode 100644 index 4907342d53d82..0000000000000 --- a/docs/content/Configuration/Downstream/Streamlit.mdx +++ /dev/null @@ -1,107 +0,0 @@ ---- -title: Connecting to Streamlit -permalink: /config/downstream/streamlit ---- - -You can connect to Cube from Streamlit using the [Cube SQL API][ref-sql-api]. -Streamlit turns data scripts into shareable web apps in minutes. - -Here's a short video guide on how to connect Streamlit to Cube. - - - -## Enable Cube SQL API - - - -Don't have a Cube project yet? [Learn how to get started -here][ref-getting-started]. - - - -### <--{"id" : "Enable Cube SQL API"}--> Cube Cloud - -Click **Deploy SQL API** and then the **How to connect your BI tool** link on -the Overview page of your Cube deployment. Navigate to the **BIs and -Visualization Tools** tab. You should see the screen like the one below with -your connection credentials: - - - -### <--{"id" : "Enable Cube SQL API"}--> Self-hosted Cube - -You need to set the following environment variables to enable the Cube SQL API. -These credentials will be required to connect to Cube from Streamlit later. - -```dotenv -CUBEJS_PG_SQL_PORT=5432 -CUBEJS_SQL_USER=myusername -CUBEJS_SQL_PASSWORD=mypassword -``` - -## Connecting from Jupyter - -Jupyter connects to Cube as to a Postgres database. - -Make sure to install the `streamlit`, `sqlalchemy` and `pandas` modules. - -```bash{promptUser: user} -pip install streamlit -pip install sqlalchemy -pip install pandas -``` - -Then you can use `sqlalchemy.create_engine` to connect to Cube's SQL API. - -```python -import streamlit -import sqlalchemy -import pandas - -engine = sqlalchemy.create_engine( - sqlalchemy.engine.url.URL( - drivername="postgresql", - username="cube", - password="9943f670fd019692f58d66b64e375213", - host="thirsty-raccoon.sql.aws-eu-central-1.cubecloudapp.dev", - port="5432", - database="db@thirsty-raccoon", - ), - echo_pool=True, -) -print("connecting with engine " + str(engine)) -connection = engine.connect() - -# ... -``` - -## Querying data - -Your cubes will be exposed as tables, where both your measures and dimensions -are columns. - -You can write SQL in Streamlit that will be executed in Cube. Learn more about -Cube SQL syntax on the [reference page][ref-sql-api]. - -```python -# ... - -with streamlit.echo(): - query = "SELECT sum(count) AS orders_count, status FROM orders GROUP BY status;" -df = pandas.read_sql_query(query, connection) -streamlit.dataframe(df) -``` - -In your Streamlit notebook it'll look like this. You can create a visualization -of the executed SQL query by using `streamlit.dataframe(df)`. - -
- -
- -[ref-getting-started]: /getting-started/cloud/overview -[ref-sql-api]: /backend/sql diff --git a/docs/content/Configuration/Downstream/Superset.mdx b/docs/content/Configuration/Downstream/Superset.mdx deleted file mode 100644 index 1bcc594ecb330..0000000000000 --- a/docs/content/Configuration/Downstream/Superset.mdx +++ /dev/null @@ -1,167 +0,0 @@ ---- -title: Connecting to Superset/Preset -permalink: /config/downstream/superset -redirect_from: - - /recipes/using-apache-superset-with-cube-sql ---- - - - -Tune into [our webinar on June 21st](https://cube.dev/events/introducing-semantic-layer-sync-with-preset) -to see the easiest way to integrate semantics layers with the most popular open source BI tool, [Preset](https://preset.io). - - - -[Apache Superset][superset] is a popular open-source data exploration -and visualization platform. [Preset][preset] is a fully-managed service for -Superset. - -## Semantic Layer Sync - -It is recommended to use [Semantic Layer Sync][ref-sls] to connect Cube to -Superset or Preset. It automatically synchronizes the -[data model][ref-data-model] between Cube and Superset or Preset. - -## SQL API - -You can also use the [SQL API][ref-sql-api] to connect Cube to Superset or Preset. - - - -[Semantic Layer Sync][ref-sls] is the recommended way to connect Superset or -Preset to Cube. If you're using the SQL API, consider migrating to Semantic -Layer Sync. - - - -Here's a short video guide: - - - -### <--{"id" : "Enable Cube SQL API"}--> Cube Cloud - -Click **How to connect your BI tool** link on the Overview page, navigate to the -SQL API tab and enable it. Once enabled, you should see the screen like the one -below with your connection credentials: - - - -### <--{"id" : "Enable Cube SQL API"}--> Self-hosted Cube - -You need to set the following environment variables to enable the Cube SQL API. -These credentials will be required to connect to Cube from Superset later. - -```dotenv -CUBEJS_PG_SQL_PORT=5432 -CUBEJS_SQL_USER=myusername -CUBEJS_SQL_PASSWORD=mypassword -``` - -### <--{"id" : "Enable Cube SQL API"}--> Connecting from Superset - -Apache Superset connects to Cube as to a Postgres database. - -In Apache Superset, go to **Data > Databases**, then click **+ Database** to add -a new database: - - - -## Querying data - -Your cubes will be exposed as tables, where both your measures and dimensions -are columns. - -Let's use the following Cube data model: - - - -```yaml -cubes: - - name: orders - sql_table: orders - - measures: - - name: count - type: count - - dimensions: - - name: status - sql: status - type: string - - - name: created - sql: created_at - type: time -``` - -```javascript -cube(`orders`, { - sql_table: `orders`, - - measures: { - count: { - type: `count`, - }, - }, - - dimensions: { - status: { - sql: `status`, - type: `string`, - }, - - created_at: { - sql: `created_at`, - type: `time`, - }, - }, -}); -``` - - - -Using the SQL API, `orders` will be exposed as a table. In Superset, we can -create datasets based on tables. Let's create one from `orders` table: - - - -Now, we can explore this dataset. Let's create a new chart of type line with -"Orders" dataset. - - - -We can select the `COUNT(*)` as a metric and `created_at` as the time column with -a time grain of `month`. - -The `COUNT(*)` aggregate function is being mapped to a measure of type -[count](/schema/reference/types-and-formats#measures-types-count) in Cube's -**Orders** data model file. - -## Additional Configuration - -### <--{"id" : "Additional Configuration"}--> Pre-Aggregations - -To allow queries from Superset to match pre-aggregations in Cube, [the -`allow_non_strict_date_range_match` property][ref-schema-ref-preagg-allownonstrict] -must be set to `true` in the pre-aggregation definition. This is because -Superset uses loose date ranges when generating SQL queries. - -[ref-getting-started]: /getting-started/cloud/overview -[ref-schema-ref-preagg-allownonstrict]: - /schema/reference/pre-aggregations#allow-non-strict-date-range-match -[superset]: https://superset.apache.org/ -[superset-docs-installation-docker]: - https://superset.apache.org/docs/installation/installing-superset-using-docker-compose -[preset]: https://preset.io -[ref-sls]: /semantic-layer-sync -[ref-sql-api]: /backend/sql -[ref-data-model]: /schema/getting-started diff --git a/docs/content/Configuration/Downstream/Tableau.mdx b/docs/content/Configuration/Downstream/Tableau.mdx deleted file mode 100644 index c6d80942f7499..0000000000000 --- a/docs/content/Configuration/Downstream/Tableau.mdx +++ /dev/null @@ -1,60 +0,0 @@ ---- -title: Connecting from Tableau -permalink: /config/downstream/tableau ---- - -You can connect to Cube from Tableau, a visual analytics platform, using the -[Cube SQL API][ref-sql-api]. - -## Enable Cube SQL API - - - -Don't have a Cube project yet? [Learn how to get started -here][ref-getting-started]. - - - -### <--{"id" : "Enable Cube SQL API"}--> Cube Cloud - -Click **How to connect your BI tool** link on the Overview page, navigate to the -SQL API tab and enable it. Once enabled, you should see the screen like the one -below with your connection credentials: - - - -### <--{"id" : "Enable Cube SQL API"}--> Self-hosted Cube - -You need to set the following environment variables to enable the Cube SQL API. -These credentials will be required to connect to Cube from Tableau later. - -```dotenv -CUBEJS_PG_SQL_PORT=5432 -CUBEJS_SQL_USER=myusername -CUBEJS_SQL_PASSWORD=mypassword -``` - -## Connecting from Tableau - -Tableau connects to Cube as to a Postgres database. - -In Tableau, select PostgreSQL connector and enter credentials from the above -step. - - - -## Querying data - -Your cubes will be exposed as tables, where both your measures and dimensions -are columns. - - - -[ref-getting-started]: /getting-started/cloud/overview -[ref-sql-api]: /backend/sql diff --git a/docs/content/Configuration/Downstream/Thoughtspot.mdx b/docs/content/Configuration/Downstream/Thoughtspot.mdx deleted file mode 100644 index a325f05492f96..0000000000000 --- a/docs/content/Configuration/Downstream/Thoughtspot.mdx +++ /dev/null @@ -1,71 +0,0 @@ ---- -title: Connecting from Thoughtspot -permalink: /config/downstream/thoughtspot ---- - -You can connect to Cube from [Thoughtspot][thoughtspot] using the [Cube SQL -API][ref-sql-api]. - -## Enable Cube SQL API - - - -Don't have a Cube project yet? [Learn how to get started -here][ref-getting-started]. - - - -### <--{"id" : "Enable Cube SQL API"}--> Cube Cloud - -Click **How to connect your BI tool** link on the Overview page, navigate to the -SQL API tab and enable it. Once enabled, you should see the screen like the one -below with your connection credentials: - - - -### <--{"id" : "Enable Cube SQL API"}--> Self-hosted Cube - -You need to set the following environment variables to enable the Cube SQL API. -These credentials will be required to connect to Cube from Tableau later. - -```dotenv -CUBEJS_PG_SQL_PORT=5432 -CUBEJS_SQL_USER=myusername -CUBEJS_SQL_PASSWORD=mypassword -``` - -## Connecting from Thoughtspot - -Thoughtspot connects to Cube as a Redshift database. - -In Thoughtspot, go to the Setup tab, then click **Connect now** to add a new -data source: - - - -Enter a name for the data source, choose **Amazon Redshift** as the data -warehouse and click **Continue** in the header: - - - -Enter credentials from the previous step and click **Continue**: - - - -Select columns from the desired cubes and click **Create Connection** on the -next screen: - - - -## Querying data - -Your cubes will be exposed as tables, where both your measures and dimensions -are columns. - -[ref-getting-started]: /getting-started/cloud/overview -[ref-sql-api]: /backend/sql -[ref-connecting-from-tableau]: - https://cubedev-blog-images.s3.us-east-2.amazonaws.com/dc025b24-674f-4f32-ac44-421d546ee676.GIF -[ref-querying-from-tableau]: - https://cubedev-blog-images.s3.us-east-2.amazonaws.com/ea73a998-e2ce-4814-863e-425b4d35860c.gif -[thoughtspot]: https://www.thoughtspot.com/ diff --git a/docs/content/Configuration/Overview.mdx b/docs/content/Configuration/Overview.mdx deleted file mode 100644 index 28e42c5e835df..0000000000000 --- a/docs/content/Configuration/Overview.mdx +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: Configuration Overview -menuTitle: Overview -permalink: /configuration/overview -category: Configuration -menuOrder: 1 ---- - -Cube can be configured in two ways. The first is through [options][link-config] -in a configuration file, commonly known as the `cube.js` file, and secondly, -through [environment variables][link-env-vars]. Values specified in `cube.js` -will take precedence over environment variables. - - - -When using Docker, ensure that the `cube.js` configuration file and your -`model/` [folder][ref-folder-structure] are mounted to `/cube/conf` within the -Docker container. - - - -## Cube Cloud - -You can set environment variables in Settings → Configuration: - - - -## Development Mode - -Cube can be run in an insecure, development mode by setting the -`CUBEJS_DEV_MODE` environment variable to `true`. Putting Cube in development -mode does the following: - -- Disables authentication checks -- Enables Cube Store in single instance mode -- Enables background refresh for in-memory cache and [scheduled - pre-aggregations][link-scheduled-refresh] -- Allows another log level to be set (`trace`) -- Enables [Developer Playground][link-dev-playground] on `http://localhost:4000` -- Uses `memory` instead of `cubestore` as the default cache/queue engine -- Logs incorrect/invalid configuration for `externalRefresh` /`waitForRenew` - instead of throwing errors - -## Concurrency and pooling - - - -All Cube database drivers come with presets for concurrency and pooling that -work out-of-the-box. The following information is included as a reference. - - - -For increased performance, Cube uses multiple concurrent connections to -configured data sources. The `CUBEJS_CONCURRENCY` environment variable controls -concurrency settings for query queues and the refresh scheduler as well as the -maximum concurrent connections. For databases that support connection pooling, -the maximum number of concurrent connections to the database can also be set by -using the `CUBEJS_DB_MAX_POOL` environment variable; if changing this from the -default, you must ensure that the new value is greater than the number of -concurrent connections used by Cube's query queues and refresh scheduler. - -[ref-folder-structure]: /data-modeling/syntax#folder-structure -[link-config]: /config -[link-dev-playground]: /dev-tools/dev-playground -[link-env-vars]: /reference/environment-variables -[link-scheduled-refresh]: /schema/reference/pre-aggregations#scheduled-refresh diff --git a/docs/content/Configuration/VPC/Connecting-with-a-VPC-AWS.mdx b/docs/content/Configuration/VPC/Connecting-with-a-VPC-AWS.mdx deleted file mode 100644 index 96dcf0c0e616d..0000000000000 --- a/docs/content/Configuration/VPC/Connecting-with-a-VPC-AWS.mdx +++ /dev/null @@ -1,126 +0,0 @@ ---- -title: Connecting with a VPC on AWS -permalink: /cloud/configuration/connecting-with-a-vpc/aws ---- - -To connect with a VPC on AWS, you need to collect the necessary information and -hand it over to your Cube Cloud representative. Next, you'll have to accept a -VPC peering request sent by Cube Cloud. Finally, you'll need to configure -security groups and route tables to ensure Cube Cloud can connect to your data -source. - -## Prerequisites - -To allow Cube Cloud to connect to a [VPC on AWS][aws-docs-vpc], the following -information is required: - -- **AWS Account ID:** The AWS account ID of the VPC owner. This can be found in - the top-right corner of [the AWS Console][aws-console]. -- **AWS Region:** [The AWS region][aws-docs-regions] that the VPC resides in. - Ensure that the region is available in [Supported Regions](#supported-regions) - to see if Cube Cloud VPC connectivity is available in your region. -- **AWS VPC ID:** The ID of the VPC that Cube Cloud will connect to, for - example, `vpc-0099aazz` -- **AWS VPC CIDR:** The [CIDR block][wiki-cidr-block] of the VPC that Cube Cloud - will connect to, for example, `10.0.0.0/16` - -After receiving the above information, a Customer Success Manager will provide -you with the **AWS account ID**, **region**, **VPC ID** and the [**CIDR -block**][wiki-cidr-block] used by Cube Cloud to connect to your VPC. - -## Setup - -### <--{"id" : "Setup"}--> VPC Peering Request - -After receiving the information above, Cube Cloud will send a [VPC peering -request][aws-docs-vpc-peering] that must be accepted. This can be done either -through the [AWS Web Console][aws-console] or through an infrastructure-as-code -tool. - -To [accept the VPC peering request][aws-docs-vpc-peering-accept] through the AWS -Web Console, follow the instructions below: - -1. Open the [Amazon VPC console](https://console.aws.amazon.com/vpc/). - - - - Ensure you have the necessary permissions to accept a VPC peering request. If - you are unsure, please contact your AWS administrator. - - - -2. Use the Region selector to choose the Region of the accepter VPC. - -3. In the navigation pane, choose Peering connections. - -4. Select the pending VPC peering connection (the status should be - `pending-acceptance`), then choose Actions, followed by  - ​Accept request. - - - - Ensure the peering request is from Cube Cloud by checking that the **AWS account - ID**, **region** and **VPC IDs** match those provided by your CSM. - - - -5. When prompted for confirmation, choose Accept request. - -6. Choose Modify my route tables now to add a route to the VPC route - table so that you can send and receive traffic across the peering connection. - - - -For more information about peering connection lifecycle statuses, check out the -[VPC peering connection lifecycle on AWS][aws-docs-vpc-peering-lifecycle]. - - - -### <--{"id" : "Setup"}--> Updating security groups - -The initial VPC setup will not allow traffic from Cube Cloud; this is because -[the security group][aws-docs-vpc-security-group] for the database will need to -allow access from the Cube Cloud CIDR block. - -This can be achieved by adding a new security group rule: - -| Protocol | Port Range | Source/Destination | -| -------- | ---------- | --------------------------------------------- | -| TCP | 3306 | The Cube Cloud CIDR block for the AWS region. | - -### <--{"id" : "Setup"}--> Update route tables - -The final step is to update route tables in your VPC to allow traffic from Cube -Cloud to reach your database. The Cube Cloud CIDR block must be added to the -route tables of all subnets that connect to the database. To do this, follow the -instructions on [the AWS documentation][aws-docs-vpc-peering-routing]. - -## Troubleshooting - -Database connection issues with misconfigured VPCs often manifest as connection -timeouts. If you are experiencing connection issues, please check the following: - -- Verify that - [all security groups allow traffic](#setup-updating-security-groups) from the - Cube Cloud provided CIDR block. -- Verify that - [a route exists to the Cube Cloud provided CIDR block](#setup-update-route-tables) - from the subnets that connect to the database. - -[aws-console]: https://console.aws.amazon.com/ -[aws-docs-regions]: - https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions -[aws-docs-vpc]: - https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html -[aws-docs-vpc-peering-accept]: - https://docs.aws.amazon.com/vpc/latest/peering/create-vpc-peering-connection.html#different-account-different-region -[aws-docs-vpc-peering-lifecycle]: - https://docs.aws.amazon.com/vpc/latest/peering/vpc-peering-basics.html#vpc-peering-lifecycle -[aws-docs-vpc-peering-routing]: - https://docs.aws.amazon.com/vpc/latest/peering/vpc-peering-routing.html -[aws-docs-vpc-peering]: - https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html -[aws-docs-vpc-security-group]: - https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html#SecurityGroupRules -[wiki-cidr-block]: - https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_blocks diff --git a/docs/content/Configuration/VPC/Connecting-with-a-VPC-Azure.mdx b/docs/content/Configuration/VPC/Connecting-with-a-VPC-Azure.mdx deleted file mode 100644 index edfa486684787..0000000000000 --- a/docs/content/Configuration/VPC/Connecting-with-a-VPC-Azure.mdx +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: Connecting with a VPC on Azure -permalink: /cloud/configuration/connecting-with-a-vpc/azure ---- - -## Prerequisites - -To allow Cube Cloud to connect to a Virtual Network on Azure, the following -information is required: - -- **Virtual Network Name:** This can be found in the Virtual Networks section of - the [Azure Portal][azure-console]. -- **Tenant ID:** This can be found under `Azure Active Directory` > `Properties` > - `Tenant ID` in the [Azure Portal][azure-console]. - -## Setup - -For cross-tenant peering in Azure, you are supposed to assign the peering role to -the service principal of the peering party. - -Using the steps outlined below, you would register Cube Cloud tenant at your organization -and grant peering access to Cube Cloud service principal. - -### Add Cube tenant to your organization - -First you should add the Cube Cloud tenant to your organization. To do this, open -the [Azure Portal][azure-console] and go to `Azure Active Directory` > `External -Identities` > `Cross-tenant access settings` > `Organizational Settings` > `Add -Organization`. - -For Tenant ID, enter `197e5263-87f4-4ce1-96c4-351b0c0c714a`. - -Make sure that `B2B Collaboration` > `Inbound Access` > `Applications` is set to `Allows access`. - -### Register Cube Cloud service principal at your organization - -To register the Cube Cloud service principal for your organization, follow these steps: - -1. Log in with an account that has permissions to register Enterprise applications. -2. Open a browser tab and go to the following URL, replacing `` with your tenant ID: `https://login.microsoftonline.com//oauth2/authorize?client_id=0c5d0d4b-6cee-402e-9a08-e5b79f199481&response_type=code&redirect_uri=https%3A%2F%2Fwww.microsoft.com%2F` -3. The Cube Cloud service principal has specific credentials. Check that the following details match exactly what you see on the dialog box that pops up: -- Client ID: `0c5d0d4b-6cee-402e-9a08-e5b79f199481` -- Name: `cube-dedicated-infra-sp` - -Once you have confirmed that all the information is correct, select "Consent on behalf of your organization" and click "Accept". - -### Grant peering permissions to Cube Cloud service principal on your `Virtual Network` - -As `peering role` you can use built-in `Network Contributor` or create custom role (e.g. `cube-peering-role`) with the following permissions: - -- `Microsoft.Network/virtualNetworks/virtualNetworkPeerings/write` -- `Microsoft.Network/virtualNetworks/peer/action` -- `Microsoft.ClassicNetwork/virtualNetworks/peer/action` -- `Microsoft.Network/virtualNetworks/virtualNetworkPeerings/read` -- `Microsoft.Network/virtualNetworks/virtualNetworkPeerings/delete` - -On the [Azure Portal][azure-console], go to `Virtual networks` > *Virtual -Network Name* > `Access Control (IAM)` > `Add` > `Add role assignment` and fill in the following details: -- Role = `Network Contributor` or `cube-peering-role` -- Members: `cube-dedicated-infra-sp` - -### Firewall - -Make sure that your firewall rules allow inbound and outbound traffic to IP/port your database is listening at. - -## Information required by Cube Cloud support - -When you are reaching out Cube Cloud support please provide following -information: - -- **Virtual Network ID:** You can find it at `Virtual Networks` > *Virtual - Network Name* > `Overview` > `JSON view` > `Resource ID` on [Azure - Portal][azure-console]. -- **Virtual Network Address Spaces:** You can find it at `Virtual Networks` > - *Virtual Network Name* > `Overview` > `JSON view` > `properties` > - `addressSpace` on [Azure Portal][azure-console]. -- **Tenant ID:** You can find it in `Azure Active Directory` > `Properties` > - `Tenant ID` section of [Azure Portal][azure-console]. - -## Supported Regions - -We support all general-purpose regions. Cube Store is currently located only in `US Central` -so pre-aggregations performance might depend on geographical proximity to it. - -[azure-console]: https://portal.azure.com diff --git a/docs/content/Configuration/VPC/Connecting-with-a-VPC-GCP.mdx b/docs/content/Configuration/VPC/Connecting-with-a-VPC-GCP.mdx deleted file mode 100644 index e279c6e4d1a76..0000000000000 --- a/docs/content/Configuration/VPC/Connecting-with-a-VPC-GCP.mdx +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: Connecting with a VPC on Google Cloud -permalink: /cloud/configuration/connecting-with-a-vpc/gcp ---- - -## Prerequisites - -- [Google Cloud Project ID][gcp-docs-projects] -- Google Cloud VPC Network Name - -## Setup - -### VPC Peering - -After receiving the information above, create a [VPC peering -request][gcp-docs-vpc-peering], either through the [GCP Web -Console][gcp-console] or an infrastructure-as-code tool. To send a VPC peering -request through the Google Cloud Console, follow [the instructions -here][gcp-docs-create-vpc-peering], with the following amendments: - -- In Step 6, use the project name `XXXXX` and network name(s) provided by Cube - Cloud. -- In Step 7, ensure **Import custom routes** and **Export custom routes** are - selected so that the necessary routes are created. - -## Supported Regions - -- `northeast1` -- `europe-west-2` -- `europe-west-3` -- `us-central-1` - -## Notes - -### Cloud SQL - -Google Cloud SQL databases [can only be peered to a VPC within the same GCP -project][gcp-docs-vpc-peering-restrictions]. To work around this limitation, we -recommend that customers provision a micro VM in their Google Cloud account to -run the [Cloud SQL Auth Proxy][gcp-cloudsql-auth-proxy]. - -[gcp-cloudsql-auth-proxy]: - https://cloud.google.com/sql/docs/mysql/connect-admin-proxy -[gcp-docs-vpc-peering-restrictions]: - https://cloud.google.com/vpc/docs/vpc-peering#restrictions -[gcp-console]: https://console.cloud.google.com/ -[gcp-docs-create-vpc-peering]: - https://cloud.google.com/vpc/docs/using-vpc-peering#creating_a_peering_configuration -[gcp-docs-projects]: - https://cloud.google.com/resource-manager/docs/creating-managing-projects#before_you_begin -[gcp-docs-vpc-peering]: https://cloud.google.com/vpc/docs/vpc-peering diff --git a/docs/content/Configuration/VPC/Connecting-with-a-VPC.mdx b/docs/content/Configuration/VPC/Connecting-with-a-VPC.mdx deleted file mode 100644 index f26b4436aff77..0000000000000 --- a/docs/content/Configuration/VPC/Connecting-with-a-VPC.mdx +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: Connecting with a VPC -permalink: /cloud/configuration/connecting-with-a-vpc -category: Configuration -menuOrder: 4 ---- - -For improved stability and security, Cube Cloud supports connecting to one or -more VPCs (virtual private clouds) in your Azure, AWS, or GCP accounts. - - - -VPC connectivity is available in Cube Cloud on -[Enterprise](https://cube.dev/pricing) tier. -[Contact us](https://cube.dev/contact) for details. - - - -VPC connection improves stability through dedicated infrastructure for a -deployment and improves security by preventing your database traffic from being -routed through the public internet. - - - - - - diff --git a/docs/content/Cube.js-Introduction.mdx b/docs/content/Cube.js-Introduction.mdx deleted file mode 100644 index 78942072d31a8..0000000000000 --- a/docs/content/Cube.js-Introduction.mdx +++ /dev/null @@ -1,102 +0,0 @@ ---- -title: Introduction -permalink: /introduction -category: Cube.js Introduction -redirect_from: - - /cubejs-introduction ---- - -**Cube is the Semantic Layer for building data apps.** It helps data engineers -and application developers access data from modern data stores, organize it into -consistent definitions, and deliver it to every application. - - - -Cube was designed to work with all SQL-enabled data sources, including cloud -data warehouses like Snowflake or Google BigQuery, query engines like Presto or -Amazon Athena, and application databases like Postgres. Cube has a built-in -caching engine to provide sub-second latency and high concurrency for API -requests. - -With Cube, you can build a data model, manage access control and caching, and -expose your data to every application via REST, GraphQL, and SQL APIs. Cube is -headless, API-first, and decoupled from visualizations. You can use any charting -library to build custom UI, or connect existing dashboarding and reporting tools -to Cube. - -## Why Cube? - -If you are building a data application—such as a business intelligence tool or a -customer-facing analytics feature—you’ll probably face the following problems: - -1. **SQL code organization.** Sooner or later, modeling even a dozen metrics - with a dozen dimensions using pure SQL queries becomes a maintenance - nightmare, which leads to building a modeling framework. -2. **Performance.** Most of the time and effort in modern analytics software - development is spent providing adequate time to insight. In a world where - every company’s data is big data, writing just SQL queries to get insight - isn’t enough anymore. -3. **Access Control.** It is important to secure and govern access to data for - all downstream data consuming applications. - -Cube has the necessary infrastructure and features to implement efficient data -modeling, access control, and performance optimizations so that every -application can access consistent data via REST, SQL, and GraphQL APIs. Achieve -insights from raw data within minutes, and get an API with sub-second response -times on up to a trillion data points. - -
- -
- -## Architecture - -**Cube acts as a data access layer**, translating API requests into SQL, -managing caching, queuing, and database connection. - -Cube accepts queries via REST, GraphQL or SQL interfaces. Based on the data -model and an incoming query, Cube generates a SQL query and executes it in your -database. Cube fully manages query orchestration, database connections, as well -as caching and access control layers. The result is then sent back to the -client. - -
- -
- -## Why Cube Cloud? - -[Cube Cloud][cube-cloud] offers a managed Cube service with extra features for -reliability and performance. It enhances Cube with features that substantially -improve reliability, performance, and security: - -- **Performance:** Cube Store in-memory caching, Cube Store autoscaling, - autoscaling for multi-tenancy. - -- **Security:** Cube Store encryption at rest, SQL API connection via SSL, - secured access to Cube Playground, security audits and updates to Docker - images. - -- **Reliability:** Cube Store replication for high availability, distributed - file storage failovers and consistency checking. - -- **Integrations:** Optimized Databricks driver, optimized Elasticsearch driver, - support for Azure blob storage in Cube Store, and integration with monitoring - solutions. - -Cube Cloud is available as [managed service](https://cubecloud.dev/auth/signup) -and through "bring your own cloud" model. -[Please contact us](https://cube.dev/contact) if you'd like to install Cube -Cloud within your AWS, GCP, or Azure account. - -[cube-cloud]: https://cube.dev/cloud diff --git a/docs/content/Deployment/Cloud/Auto-Suspension.mdx b/docs/content/Deployment/Cloud/Auto-Suspension.mdx deleted file mode 100644 index 0a566f2550a41..0000000000000 --- a/docs/content/Deployment/Cloud/Auto-Suspension.mdx +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: Auto-suspension -permalink: /deployment/cloud/auto-suspension -category: Deployment -subCategory: Cube Cloud -menuOrder: 3 ---- - - - -Auto-suspension is available in Cube Cloud on -[Starter and above tiers](https://cube.dev/pricing). - - - -Cube Cloud can automatically suspend deployments when not in use to prevent -[resource consumption][ref-deployment-pricing] when infrastructure is not being -actively used, which helps manage spend and preventing unnecessary quota use. - -This is useful for deployments that are not used 24/7, such as staging -deployments. Auto-suspension will hibernate the deployment when **no** API -requests are received after a period of time, and automatically resume the -deployment when API requests start coming in again: - -
- Cube Cloud auto-suspend flowchart -
- -[Development Instances][ref-deployment-dev-instance] are auto-suspended -automatically when not in use for 10 minutes, whereas [Production -Clusters][ref-deployment-prod-cluster] and [Production -Multi-Clusters][ref-deployment-prod-multi-cluster] can auto-suspend after no API -requests were received within a configurable time period. While suspended, -[pre-aggregation][ref-caching-preaggs-gs] builds will also be paused to prevent -unnecessary resource consumption. - -## Configuration - -To configure auto-suspension settings, navigate to the Settings -screen in your deployment and click the Configuration tab, then -ensure Enable Auto-suspend is turned on: - - - -To configure how long Cube Cloud should wait before suspending the deployment, -adjust Auto-suspend threshold (minutes) to the desired value and -click Apply: - - - -The Cube API instances will temporarily become unavailable while they are -configured; this usually takes less than a minute. - -## Resuming a suspended deployment - -To resume a suspended deployment, send a query to Cube using the API or by -navigating to the deployment in Cube Cloud. - - - -Currently, Cube Cloud's auto-suspension feature cannot guarantee a 100% resume -rate on the first query or a specific time frame for resume. While in most -cases, deployment resumes within several seconds of the first query, there is -still a possibility that it may take longer to resume your deployment. This can -potentially lead to an error response code for the initial query. - - - -Deployments typically resume in under 30 seconds, but can take significantly -longer in certain situations depending on two major factors: - -- **Data model:** How many cubes and views are defined. -- **Query complexity:** How complicated the queries being sent to the API are - -Complex data models take more time to compile, and complex queries can cause -response times to be significantly longer than usual. - -[ref-caching-preaggs-gs]: /caching/pre-aggregations/getting-started -[ref-deployment-dev-instance]: - /cloud/configuration/deployment-types#development-instance -[ref-deployment-prod-cluster]: - /cloud/configuration/deployment-types#production-cluster -[ref-deployment-prod-multi-cluster]: - /cloud/configuration/deployment-types#production-multi-cluster -[ref-deployment-pricing]: /cloud/pricing -[ref-workspace-dev-api]: /cloud/workspace/development-api diff --git a/docs/content/Deployment/Cloud/Continuous-Deployment.mdx b/docs/content/Deployment/Cloud/Continuous-Deployment.mdx deleted file mode 100644 index 6cf3a054510e6..0000000000000 --- a/docs/content/Deployment/Cloud/Continuous-Deployment.mdx +++ /dev/null @@ -1,115 +0,0 @@ ---- -title: Continuous deployment -permalink: /cloud/deploys -category: Deployment -subCategory: Cube Cloud -menuOrder: 3 ---- - -This guide covers features and tools you can use to deploy your Cube project to -Cube Cloud. - -## Deploy with Git - -Continuous deployment works by connecting a Git repository to a Cube Cloud -deployment and keeping the two in sync. - -First, go to the Build & Deploy tab on the Settings screen -to make sure your deployment is configured to deploy with Git. Then -click Generate Git credentials to obtain Git credentials: - - - -The instructions to set up Cube Cloud as a Git remote are also available on the -same screen: - -```bash{promptUser: user} -git config credential.helper store -git remote add cubecloud -git push cubecloud master -``` - -## Deploy with GitHub - -First, ensure your deployment is configured to deploy with Git. Then connect -your GitHub repository to your deployment by clicking the Connect to -GitHub button, and selecting your repository. - -
- -
- -Cube Cloud will automatically deploy from the specified production branch -(`master` by default). - -## Deploy with CLI - - - -Enabling this option will cause the Data Model page to display the -last known state of a Git-based codebase (if available), instead of reflecting -the latest modifications made. It is important to note that the logic will still -be updated in both the API and the Playground. - - - -You can use [the CLI][ref-workspace-cli] to set up continuous deployment for a -Git repository. You can also use it to manually deploy changes without -continuous deployment. - -### <--{"id" : "Deploy with CLI"}--> Manual Deploys - -You can deploy your Cube project manually. This method uploads data models and -configuration files directly from your local project directory. - -You can obtain a Cube Cloud deploy token from your -deployment's Settings screen. - -```bash{promptUser: user} -npx cubejs-cli deploy --token TOKEN -``` - -### <--{"id" : "Deploy with CLI"}--> Continuous Deployment - -You can use Cube CLI with your continuous integration tool. - - - -You can use the `CUBE_CLOUD_DEPLOY_AUTH` environment variable to pass the Cube -Cloud deploy token to Cube CLI. - - - -Below is an example configuration for GitHub Actions: - -```yaml -name: My Cube App -on: - push: - paths: - - "**" - branches: - - "master" -jobs: - deploy: - name: Deploy My Cube App - runs-on: ubuntu-latest - timeout-minutes: 30 - steps: - - name: Checkout - uses: actions/checkout@v2 - - name: Use Node.js 14.x - uses: actions/setup-node@v1 - with: - node-version: 14.x - - name: Deploy to Cube Cloud - run: npx cubejs-cli deploy - env: - CUBE_CLOUD_DEPLOY_AUTH: ${{ secrets.CUBE_CLOUD_DEPLOY_AUTH }} -``` - -[ref-workspace-cli]: /using-the-cubejs-cli diff --git a/docs/content/Deployment/Cloud/Custom-Domains.mdx b/docs/content/Deployment/Cloud/Custom-Domains.mdx deleted file mode 100644 index 360f471590cbb..0000000000000 --- a/docs/content/Deployment/Cloud/Custom-Domains.mdx +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Custom domains -permalink: /cloud/configuration/custom-domains -category: Deployment -subCategory: Cube Cloud -menuOrder: 4 ---- - -By default, Cube Cloud deployments and their API endpoints use auto-generated -anonymized domain names, e.g., `emerald-llama.gcp-us-central1.cubecloudapp.dev`. -You can also assign a custom domain to your deployment. - - - -Custom domains are available in Cube Cloud on -[Premium](https://cube.dev/pricing) tier. -[Contact us](https://cube.dev/contact) for details. - - - -To set up a custom domain, go to your deployment's settings page. Under -the Custom Domain section, type in your custom domain and -click Add: - - - -After doing this, copy the provided domain and add the following `CNAME` records -to your DNS provider: - -| Name | Value | -| ------------- | ------------------------------------------- | -| `YOUR_DOMAIN` | The copied value from Cube Cloud's settings | - -Using the example subdomain from the screenshot, a `CNAME` record for `acme.dev` -would look like: - -| Name | Value | -| ---------- | ----------------------------------------------------- | -| `insights` | `colossal-crowville.gcp-us-central1.cubecloudapp.dev` | - -DNS changes can sometimes take up to 15 minutes to propagate, please wait at -least 15 minutes and/or try using another DNS provider to verify the `CNAME` -record correctly before raising a new support ticket. - -[cube-contact]: https://cube.dev/contact diff --git a/docs/content/Deployment/Cloud/Deployment-Types.mdx b/docs/content/Deployment/Cloud/Deployment-Types.mdx deleted file mode 100644 index d618047264d54..0000000000000 --- a/docs/content/Deployment/Cloud/Deployment-Types.mdx +++ /dev/null @@ -1,163 +0,0 @@ ---- -title: Deployment types -permalink: /cloud/configuration/deployment-types -category: Deployment -subCategory: Cube Cloud -menuOrder: 5 ---- - -## Development Instance - - - -Development instance is available in Cube Cloud for free, no credit card -required. Your free trial is limited to 2 development instances and only 1,000 -queries per day. Upgrade to [any tier](https://cube.dev/pricing) to unlock all -features. - - - -
- High-level architecture diagram of a Cube Cloud Development Instance -
- -Development Instances are designed for development use-cases only. This makes it -easy to get started with Cube Cloud quickly, and also allows you to build and -query pre-aggregations on-demand. - -Development Instances do not provide high-availability nor do they guarantee -fast response times. Development Instances also hibernate after 10 minutes of -inactivity, which can cause the first request after hibernation to take -additional time to process. They also have [limits][ref-limits] on the maximum -number of queries per day and the maximum number of Cube Store Workers. We -**strongly** advise not using a development instance in a production environment, -it is for testing and learning about Cube only and will not deliver a -production-level experience for your users. - -You can try a Cube Cloud development instance by -[signing up for Cube Cloud](https://cubecloud.dev/auth/signup) to try it free -(no credit card required). - -## Production Cluster - - - -Production cluster is available in Cube Cloud on -[all tiers](https://cube.dev/pricing). - - - -
- High-level architecture diagram of a Cube Cloud Production Cluster -
- -Production Clusters are designed to support high-availability production -workloads. It consists of several key components, including starting with 2 Cube -API instances, 1 Cube Refresh Worker and 2 Cube Store Routers - all of which run -on dedicated infrastructure. The cluster can automatically scale to meet the -needs of your workload by adding more components as necessary; check the -[Scalability section](#scalability) below. - -## Production multi-cluster - -You can deploy a production multi-cluster in Cube Cloud, which allows for larger -amounts of data as well as load balancing for multi-tenant deployments with over -1,000 tenants. - - - -Production multi-cluster is available in Cube Cloud on -[Enterprise Premier](https://cube.dev/pricing) tier. -[Contact us](https://cube.dev/contact) for details. - - - -
- High-level architecture diagram of a Cube Cloud Production Multi-Cluster -
- -Cube Cloud routes traffic between clusters based on -[`contextToAppId()`][ref-conf-ref-ctx-to-app-id]. - -Each cluster is billed separately, and all clusters can use auto-scaling to -match demand. - -## Scalability - -Cube Cloud also allows adding additional infrastructure to your deployment to -increase scalability and performance beyond what is available with each -Production Deployment. - -### <--{"id" : "Scalability"}--> Cube Store Worker - -Cube Store Workers are used to build and persist pre-aggregations. Each Worker -has a **maximum of 150GB** of storage; [additional Cube Store workers][ref-limits] can be -added to your deployment to both increase storage space and improve -pre-aggregation performance. A **minimum of 2** Cube Store Workers is required -for pre-aggregations; this can be adjusted. For a rough estimate, it will take -approximately 2 Cube Store Workers per 4 GB of pre-aggregated data per day. - - - -Idle workers will automatically hibernate after 10 minutes of inactivity, and -will not consume CCUs until they are resumed. Workers are resumed automatically -when Cube receives a query that should be accelerated by a pre-aggregation, or -when a scheduled refresh is triggered. - - - -To change the number of Cube Store Workers in a deployment, go to the -deployment’s Settings screen, and open the Configuration -tab. From this screen, you can set the number of Cube Store Workers from the -dropdown: - - - -### <--{"id" : "Scalability"}--> Cube API Instance - -With a Production Deployment, 2 Cube API Instances are included. That said, it -is very common to use more, and [additional API instances][ref-limits] can be added to -your deployment to increase the throughput of your queries. A rough estimate is -that 1 Cube API Instance is needed for every 5-10 requests-per-second served. -Cube API Instances can also auto-scale as needed. - -To change how many Cube API instances are available in the Production Cluster, -go to the deployment’s Settings screen, and open -the Configuration tab. From this screen, you can set the minimum -and maximum number of Cube API instances for a deployment: - - - -## Switching between deployment types - -To switch a deployment's type, go to the deployment's Settings screen -and select from the available options: - - - -[ref-conf-ref-ctx-to-app-id]: /config#options-reference-context-to-app-id -[ref-limits]: /cloud/limits#resources diff --git a/docs/content/Deployment/Cloud/Limits.mdx b/docs/content/Deployment/Cloud/Limits.mdx deleted file mode 100644 index 152de59d1ed3b..0000000000000 --- a/docs/content/Deployment/Cloud/Limits.mdx +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: Limits -permalink: /cloud/limits -category: Deployment -subCategory: Cube Cloud -menuOrder: 7 ---- - -Cube Cloud implements limits on resource usage on account and deployment -levels to ensure the best experience for all Cube Cloud users. - - - -Limits apply to Cube Cloud accounts on [all tiers](https://cube.dev/pricing). - - - -## Limit types - -Each limit can be of one of the following types: - -- *Hard limit* either has a threshold that can't be exceeded or prevents -further use of a resource when a threshhold is hit until a cool-down period -passes (e.g., until the next day starts) or the limit is increased (e.g., -when a Cube Cloud account upgrades to another tier). -- *Soft limit* may allow further use of a resource after a threshold is hit. - -## Resources - -The following resources are subject to limits, depending on [deployment types][ref-deployment-types] and -[pricing tiers][ref-pricing]: - -| Resource | Free Tier | Starter | Premium | Enterprise | Enterprise Premier | -| --------------------------------------------------------------- | :-------: | :-------: | :-------: | :--------: | :---------------------------: | -| Number of deployments | 2 | Unlimited | Unlimited | Unlimited | Unlimited | -| Number of API instances | 1 | 20 | 20 | 20 | [Contact us][cube-contact-us] | -| Number of Cube Store workers | 2 | 16 | 16 | 16 | [Contact us][cube-contact-us] | -| Queries per day ([development instance][ref-dev-instance]) | 1,000 | 10,000 | Unlimited | Unlimited | Unlimited | -| Queries per day ([production cluster][ref-prod-cluster]) | — | 50,000 | Unlimited | Unlimited | Unlimited | -| Queries processed by [Query History][ref-query-history] per day | 1,000 | 50,000 | 100,000 | 200,000 | 300,000 | - -### <--{"id" : "Resources"}--> Number of deployments - -This is a hard limit. Consider upgrading to [another tier][ref-pricing]. - -### <--{"id" : "Resources"}--> Number of API instances - -This is a hard limit. Consider upgrading to [another tier][ref-pricing]. - -### <--{"id" : "Resources"}--> Number of Cube Store workers - -This is a hard limit. Consider upgrading to [another tier][ref-pricing]. - -### <--{"id" : "Resources"}--> Queries per day - -This is a hard limit. Usage is calculated per Cube Cloud account, i.e., -in total for all deployments within an account. - -When a threshold is hit, further queries will not be processed. In that -case, consider upgrading a development instance to a production cluster. -Alternatively, consider upgrading to [another tier][ref-pricing]. - -### <--{"id" : "Resources"}--> Queries processed by Query History per day - -This is a soft limit. Usage is calculated per Cube Cloud account, i.e., -in total for all deployments within an account. - -When a threshold is hit, query processing will be stopped. Please -[contact support][cube-contact-us] for further assistance. - -[ref-deployment-types]: /cloud/configuration/deployment-types -[ref-pricing]: /cloud/pricing -[ref-query-history]: /cloud/inspecting-queries -[ref-dev-instance]: /cloud/configuration/deployment-types#development-instance -[ref-prod-cluster]: /cloud/configuration/deployment-types#production-cluster -[cube-contact-us]: https://cube.dev/contact diff --git a/docs/content/Deployment/Cloud/Overview.mdx b/docs/content/Deployment/Cloud/Overview.mdx deleted file mode 100644 index 30eb723706b2c..0000000000000 --- a/docs/content/Deployment/Cloud/Overview.mdx +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Overview -permalink: /deployment/platforms/cube-cloud -category: Deployment -subCategory: Cube Cloud -menuOrder: 2 ---- - -This guide walks you through deploying Cube with [Cube Cloud][link-cube-cloud], -a purpose-built platform to run Cube applications in production. It is made by -the creators of Cube and incorporates all the best practices of running and -scaling Cube applications. - - - -Are you moving from a self-hosted installation to Cube Cloud? [Click -here][blog-migrate-to-cube-cloud] to learn more. - - - -
- -
- -## Prerequisites - -- A Cube Cloud account - -## Configuration - -Learn more about [deployment with Cube Cloud here][ref-cubecloud-getstart]. - -[blog-migrate-to-cube-cloud]: - https://cube.dev/blog/migrating-from-self-hosted-to-cube-cloud/ -[link-cube-cloud]: https://cubecloud.dev -[link-cube-cloud-waitlist]: https://cube.dev/cloud -[ref-cubecloud-getstart]: /getting-started/cloud/overview diff --git a/docs/content/Deployment/Cloud/Pricing.mdx b/docs/content/Deployment/Cloud/Pricing.mdx deleted file mode 100644 index 3494a186762b1..0000000000000 --- a/docs/content/Deployment/Cloud/Pricing.mdx +++ /dev/null @@ -1,190 +0,0 @@ ---- -title: Pricing -permalink: /cloud/pricing -category: Deployment -subCategory: Cube Cloud -menuOrder: 6 ---- - -Cube Cloud pricing is based on consumption of compute power which we measure -using Cube Compute Units. The units are priced based on which subscription plan -you have chosen. Each plan has different features and functionality that you -should review as you think about what is right for your business. - - - -A **Cube Compute Unit** is a way to measure compute power used to run Cube Cloud -infrastructure. The price of a compute unit is determined by the Cube Cloud plan -you're subscribed to. - - - -| Node Type | Compute Unit per hour | -| ------------------------- | :-------------------: | -| Cube Production Cluster | `4` | -| Cube Development Instance | `1` | -| Cube Store Worker | `1` | -| Cube API Instance | `1` | - -Cube Cloud has four subscription plans that you can review -[here](https://cube.dev/pricing). The subscription plan you choose will -determine the price per Cube Compute Unit (CCU). Each subscription plan -determines the features, scalability, availability, as well as the speed and -scope of support you may receive for your deployment. - -## Free - -The Free plan is designed for development and testing purposes. It is not -intended for production use. - -It offers up to two [Development Instances][ref-cloud-deployment-dev-instance]. -The Free plan also has limits which are described [here][ref-cloud-limits]. - -### Support - -The Free plan offers support via the community Slack channel. - -## Starter - -The Starter plan starts at a minimum of $99/month and targets low-scale -production that is not business-critical. - -It offers a [Production Cluster][ref-cloud-deployment-prod-cluster], the ability -to use third-party packages from the npm registry, AWS and GCP support in select -regions, pre-aggregations of upto 150GB in size, [alerts][ref-cloud-alerts], -auto-suspend controls, and [semantic layer sync][ref-workspace-semantic-layer] -with a single BI tool (such as Preset or Metabase). The Starter plan also has -limits which are described [here][ref-cloud-limits]. - -### Support - -The Starter plan includes support via the community Slack channel and in-product -chat support with three questions per week. - -| Priority | Response Time | -| -------- | ---------------- | -| P0 | 8 business hours | -| P1 | 8 business hours | -| P2 | 8 business hours | -| P3 | 2 business days | - -## Premium - -The Premium plan starts at a minimum of $10,000/year, and is designed for basic -small-scale production deployments. - -It offers everything in the [Starter plan](#starter) as well as support for -[custom domains][ref-cloud-custom-domains], AWS and GCP support in all regions, -and [semantic layer sync][ref-workspace-semantic-layer] with unlimited BI tools. -The Premium plan also has limits which are described [here][ref-cloud-limits]. - -### Support - -The Premium plan includes support via the community Slack channel and unlimited -in-product chat support on weekdays. Cube Cloud provides a 99.95% uptime SLA for -this plan. - -| Priority | Response Time | -| -------- | ---------------- | -| P0 | 60 minutes | -| P1 | 4 hours | -| P2 | 8 business hours | -| P3 | 2 business days | - -## Enterprise - -The Enterprise plan starts at a minimum of $20,000/year, and is suitable for -high-scale or mission-critical production deployments with more significant -security and compliance needs. - -It offers everything in the [Premium plan](#premium) as well as SAML 2.0 support -for single sign-on, Azure support for all regions, a dedicated VPC for your Cube -Cloud infrastructure, [VPC peering][ref-cloud-vpc-peering], [monitoring -integrations][ref-cloud-monitoring], and [role-based access -control][ref-cloud-acl]. The Enterprise plan also has limits which are described -[here][ref-cloud-limits]. - -### Support - -The Enterprise plan offers improved support response times as compared to the -[Premium plan](#premium-support) with the addition of a dedicated CSM. Cube -Cloud provides a 99.99% uptime SLA for this plan. - -| Priority | Response Time | -| -------- | ---------------- | -| P0 | 30 minutes | -| P1 | 2 hours | -| P2 | 8 business hours | -| P3 | 2 business days | - -## Enterprise Premier - -The Enterprise Premium plan caters to high-scale, high-availability -mission-critical production deployments with security and compliance needs. - -It offers everything in the [Enterprise plan](#enterprise) as well as enabling -the use of [Production Multi-Clusters][ref-cloud-deployment-prod-multicluster], -unlimited pre-aggregation sizes, and support for kSQL/Elasticsearch. The -Enterprise Premier plan also has limits which are described -[here][ref-cloud-limits]. - -### Support - -The Enterprise Premier plan includes the same -[support plan as the Enterprise plan](#enterprise-support). Cube Cloud provides -a 99.995% uptime SLA for this plan. - -| Priority | Response Time | -| -------- | ---------------- | -| P0 | 30 minutes | -| P1 | 2 hours | -| P2 | 8 business hours | -| P3 | 2 business days | - -## Support Priority Definitions - -We prioritize support requests based on their severity, as follows: - -- **P0**: The platform is severely impacted or completely shut down. We will - assign specialists to work continuously to fix the issue, provide ongoing - updates, and start working on a temporary workaround or fix. - -- **P1**: The platform is functioning with limited capabilities or facing - critical issues preventing a production deployment. We will assign specialists - to fix the issue, provide ongoing updates, and start working on a temporary - workaround or fix. - -- **P2**: There are issues with workaround solutions or non-critical functions. - We will use resources during local business hours until the issue is resolved - or a workaround is in place. - -- **P3**: There is a need for clarification in the documentation, or a - suggestion for product enhancement. We will triage the request, provide - clarification when possible, and may include a resolution in a future update. - -## Payment Terms - -Future purchases and upgrades are subject to the pricing that is in effect at -the time of the order. Payments are non-refundable. If your usage of resources -exceeds the balance of CCUs in your account, services may degrade or be -suspended until new CCUs are purchased. You may upgrade your CCUs to a -higher-level subscription plan at any time by paying the difference in per-Cube -Compute Unit pricing, or by asking to convert the price paid for the remaining -CCUs into CCUs for the higher subscription plan at the CCU pricing for that plan -(resulting in a lower number of available CCUs but upgraded to the higher -subscription plan). Future purchases and upgrades are subject to the pricing -that is in effect at the time of the order. No credit is allowed for downgrading -CCUs to a lower subscription plan level. - -[ref-cloud-deployment-dev-instance]: /cloud/configuration/deployment-types#development-instance -[ref-cloud-deployment-prod-cluster]: - /cloud/configuration/deployment-types#production-cluster -[ref-cloud-alerts]: /cloud/workspace/alerts -[ref-cloud-limits]: /cloud/limits -[ref-cloud-monitoring]: /monitoring/integrations -[ref-cloud-acl]: /cloud/access-control/ -[ref-cloud-deployment-prod-multicluster]: - /cloud/configuration/deployment-types#production-multi-cluster -[ref-cloud-custom-domains]: /cloud/configuration/custom-domains -[ref-cloud-vpc-peering]: /cloud/configuration/connecting-with-a-vpc -[ref-workspace-semantic-layer]: /semantic-layer-sync diff --git a/docs/content/Deployment/Core/Overview.mdx b/docs/content/Deployment/Core/Overview.mdx deleted file mode 100644 index 5b0d5e9e706c1..0000000000000 --- a/docs/content/Deployment/Core/Overview.mdx +++ /dev/null @@ -1,352 +0,0 @@ ---- -title: Overview -permalink: /deployment/platforms/docker -category: Deployment -subCategory: Cube Core -menuOrder: 11 ---- - -This guide walks you through deploying Cube with Docker. - - - -This is an example of a production-ready deployment, but real-world deployments -can vary significantly depending on desired performance and scale. - - - -## Prerequisites - -- [Docker Desktop][link-docker-app] - -## Configuration - -Create a Docker Compose stack by creating a `docker-compose.yml`. A -production-ready stack would at minimum consist of: - -- One or more Cube API instance -- A Cube Refresh Worker -- A Cube Store Router node -- One or more Cube Store Worker nodes - -An example stack using BigQuery as a data source is provided below: - - - -Using macOS or Windows? Use `CUBEJS_DB_HOST=host.docker.internal` instead of -`localhost` if your database is on the same machine. - - - -```yaml -version: '2.2' - -services: - cube_api: - restart: always - image: cubejs/cube:v%CURRENT_VERSION - ports: - - 4000:4000 - environment: - - CUBEJS_DB_TYPE=bigquery - - CUBEJS_DB_BQ_PROJECT_ID=cubejs-bq-cluster - - CUBEJS_DB_BQ_CREDENTIALS= - - CUBEJS_DB_EXPORT_BUCKET=cubestore - - CUBEJS_CUBESTORE_HOST=cubestore_router - - CUBEJS_API_SECRET=secret - volumes: - - .:/cube/conf - depends_on: - - cube_refresh_worker - - cubestore_router - - cubestore_worker_1 - - cubestore_worker_2 - - cube_refresh_worker: - restart: always - image: cubejs/cube:v%CURRENT_VERSION - environment: - - CUBEJS_DB_TYPE=bigquery - - CUBEJS_DB_BQ_PROJECT_ID=cubejs-bq-cluster - - CUBEJS_DB_BQ_CREDENTIALS= - - CUBEJS_DB_EXPORT_BUCKET=cubestore - - CUBEJS_CUBESTORE_HOST=cubestore_router - - CUBEJS_API_SECRET=secret - - CUBEJS_REFRESH_WORKER=true - volumes: - - .:/cube/conf - - cubestore_router: - restart: always - image: cubejs/cubestore:v%CURRENT_VERSION - environment: - - CUBESTORE_WORKERS=cubestore_worker_1:10001,cubestore_worker_2:10002 - - CUBESTORE_REMOTE_DIR=/cube/data - - CUBESTORE_META_PORT=9999 - - CUBESTORE_SERVER_NAME=cubestore_router:9999 - volumes: - - .cubestore:/cube/data - - cubestore_worker_1: - restart: always - image: cubejs/cubestore:v%CURRENT_VERSION - environment: - - CUBESTORE_WORKERS=cubestore_worker_1:10001,cubestore_worker_2:10002 - - CUBESTORE_SERVER_NAME=cubestore_worker_1:10001 - - CUBESTORE_WORKER_PORT=10001 - - CUBESTORE_REMOTE_DIR=/cube/data - - CUBESTORE_META_ADDR=cubestore_router:9999 - volumes: - - .cubestore:/cube/data - depends_on: - - cubestore_router - - cubestore_worker_2: - restart: always - image: cubejs/cubestore:v%CURRENT_VERSION - environment: - - CUBESTORE_WORKERS=cubestore_worker_1:10001,cubestore_worker_2:10002 - - CUBESTORE_SERVER_NAME=cubestore_worker_2:10002 - - CUBESTORE_WORKER_PORT=10002 - - CUBESTORE_REMOTE_DIR=/cube/data - - CUBESTORE_META_ADDR=cubestore_router:9999 - volumes: - - .cubestore:/cube/data - depends_on: - - cubestore_router -``` - -## Set up reverse proxy - -In production, the Cube API should be served over an HTTPS connection to -ensure security of the data in-transit. We recommend using a reverse proxy; as -an example, let's use [NGINX][link-nginx]. - - - -You can also use a reverse proxy to enable HTTP 2.0 and GZIP compression - - - -First we'll create a new server configuration file called `nginx/cube.conf`: - -```nginx -server { - listen 443 ssl; - server_name cube.my-domain.com; - - ssl_protocols TLSv1 TLSv1.1 TLSv1.2; - ssl_ecdh_curve secp384r1; - # Replace the ciphers with the appropriate values - ssl_ciphers "ECDHE-RSA-AES256-GCM-SHA512:DHE-RSA-AES256-GCM-SHA512:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-SHA384 OLD_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 OLD_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256"; - ssl_prefer_server_ciphers on; - ssl_certificate /etc/ssl/private/cert.pem; - ssl_certificate_key /etc/ssl/private/key.pem; - ssl_session_timeout 10m; - ssl_session_cache shared:SSL:10m; - ssl_session_tickets off; - ssl_stapling on; - ssl_stapling_verify on; - - location / { - proxy_pass http://cube:4000/; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "upgrade"; - } -} -``` - -Then we'll add a new service to our Docker Compose stack: - -```yaml -services: - ... - nginx: - image: nginx - ports: - - 443:443 - volumes: - - ./nginx:/etc/nginx/conf.d - - ./ssl:/etc/ssl/private -``` - -Don't forget to create a `ssl` directory with the `cert.pem` and `key.pem` files -inside so the Nginx service can find them. - -For automatically provisioning SSL certificates with LetsEncrypt, [this blog -post][medium-letsencrypt-nginx] may be useful. - -## Security - -### <--{"id" : "Security"}--> Use JSON Web Tokens - -Cube can be configured to use industry-standard JSON Web Key Sets for -securing its API and limiting access to data. To do this, we'll define the -relevant options on our Cube API instance: - - - -If you're using [`queryRewrite`][ref-config-queryrewrite] for access control, -then you must also configure -[`scheduledRefreshContexts`][ref-config-sched-ref-ctx] so the refresh workers -can correctly create pre-aggregations. - - - -```yaml -services: - cube_api: - image: cubejs/cube:v%CURRENT_VERSION - ports: - - 4000:4000 - environment: - - CUBEJS_DB_TYPE=bigquery - - CUBEJS_DB_BQ_PROJECT_ID=cubejs-bq-cluster - - CUBEJS_DB_BQ_CREDENTIALS= - - CUBEJS_DB_EXPORT_BUCKET=cubestore - - CUBEJS_CUBESTORE_HOST=cubestore_router - - CUBEJS_API_SECRET=secret - - CUBEJS_JWK_URL=https://cognito-idp..amazonaws.com//.well-known/jwks.json - - CUBEJS_JWT_AUDIENCE= - - CUBEJS_JWT_ISSUER=https://cognito-idp..amazonaws.com/ - - CUBEJS_JWT_ALGS=RS256 - - CUBEJS_JWT_CLAIMS_NAMESPACE= - volumes: - - .:/cube/conf - depends_on: - - cubestore_worker_1 - - cubestore_worker_2 - - cube_refresh_worker -``` - -### <--{"id" : "Security"}--> Securing Cube Store - -All Cube Store nodes (both router and workers) should only be accessible to -Cube API instances and refresh workers. To do this with Docker Compose, we -simply need to make sure that none of the Cube Store services have any exposed - -## Monitoring - -All Cube logs can be found by through the Docker Compose CLI: - -```bash{outputLines: 2-9,11-18} -docker-compose ps - - Name Command State Ports ---------------------------------------------------------------------------------------------------------------------------------- -cluster_cube_1 docker-entrypoint.sh cubej ... Up 0.0.0.0:4000->4000/tcp,:::4000->4000/tcp -cluster_cubestore_router_1 ./cubestored Up 3030/tcp, 3306/tcp -cluster_cubestore_worker_1_1 ./cubestored Up 3306/tcp, 9001/tcp -cluster_cubestore_worker_2_1 ./cubestored Up 3306/tcp, 9001/tcp - -docker-compose logs - -cubestore_router_1 | 2021-06-02 15:03:20,915 INFO [cubestore::metastore] Creating metastore from scratch in /cube/.cubestore/data/metastore -cubestore_router_1 | 2021-06-02 15:03:20,950 INFO [cubestore::cluster] Meta store port open on 0.0.0.0:9999 -cubestore_router_1 | 2021-06-02 15:03:20,951 INFO [cubestore::mysql] MySQL port open on 0.0.0.0:3306 -cubestore_router_1 | 2021-06-02 15:03:20,952 INFO [cubestore::http] Http Server is listening on 0.0.0.0:3030 -cube_1 | 🚀 Cube API server (%CURRENT_VERSION) is listening on 4000 -cubestore_worker_2_1 | 2021-06-02 15:03:24,945 INFO [cubestore::cluster] Worker port open on 0.0.0.0:9001 -cubestore_worker_1_1 | 2021-06-02 15:03:24,830 INFO [cubestore::cluster] Worker port open on 0.0.0.0:9001 -``` - -## Update to the latest version - -Find the latest stable release version (currently `v%CURRENT_VERSION`) [from -Docker Hub][link-cubejs-docker]. Then update your `docker-compose.yml` to use -the tag: - -```yaml -version: '2.2' - -services: - cube_api: - image: cubejs/cube:v%CURRENT_VERSION - ports: - - 4000:4000 - environment: - - CUBEJS_DB_TYPE=bigquery - - CUBEJS_DB_BQ_PROJECT_ID=cubejs-bq-cluster - - CUBEJS_DB_BQ_CREDENTIALS= - - CUBEJS_DB_EXPORT_BUCKET=cubestore - - CUBEJS_CUBESTORE_HOST=cubestore_router - - CUBEJS_API_SECRET=secret - volumes: - - .:/cube/conf - depends_on: - - cubestore_router - - cube_refresh_worker -``` - -## Extend the Docker image - -If you need to use npm packages with native extensions inside [the `cube.js` -configuration file][ref-config-js], you'll need to build your own Docker image. -You can do this by first creating a `Dockerfile` and a corresponding -`.dockerignore`: - -```bash{promptUser: user} -touch Dockerfile -touch .dockerignore -``` - -Add this to the `Dockerfile`: - -```dockerfile -FROM cubejs/cube:latest - -COPY . . -RUN npm install -``` - -And this to the `.dockerignore`: - -```gitignore -node_modules -npm-debug.log -schema -cube.js -.env -``` - -Then start the build process by running the following command: - -```bash{promptUser: user} -docker build -t /cubejs-custom-build . -``` - -Finally, update your `docker-compose.yml` to use your newly-built image: - -```yaml -version: '2.2' - -services: - cube_api: - image: /cubejs-custom-build - ports: - - 4000:4000 - environment: - - CUBEJS_DB_TYPE=bigquery - - CUBEJS_DB_BQ_PROJECT_ID=cubejs-bq-cluster - - CUBEJS_DB_BQ_CREDENTIALS= - - CUBEJS_DB_EXPORT_BUCKET=cubestore - - CUBEJS_CUBESTORE_HOST=cubestore_router - - CUBEJS_API_SECRET=secret - volumes: - - .:/cube/conf - # Prevent dev dependencies leaking - - .empty:/cube/conf/node_modules/@cubejs-backend/ - depends_on: - - cubestore_router - - cube_refresh_worker -``` - -[medium-letsencrypt-nginx]: - https://pentacent.medium.com/nginx-and-lets-encrypt-with-docker-in-less-than-5-minutes-b4b8a60d3a71 -[link-cubejs-docker]: https://hub.docker.com/r/cubejs/cube -[link-docker-app]: https://www.docker.com/products/docker-app -[link-nginx]: https://www.nginx.com/ -[ref-config-js]: /config -[ref-config-queryrewrite]: /config#query-rewrite -[ref-config-sched-ref-ctx]: /config#scheduled-refresh-contexts diff --git a/docs/content/Deployment/Overview.mdx b/docs/content/Deployment/Overview.mdx deleted file mode 100644 index f257cd6c606ae..0000000000000 --- a/docs/content/Deployment/Overview.mdx +++ /dev/null @@ -1,285 +0,0 @@ ---- -title: Deployment Overview -menuTitle: Overview -permalink: /deployment/overview -category: Deployment -menuOrder: 1 -redirect_from: - - /deployment - - /deployment/guide ---- - -This section contains a general overview of deploying a Cube cluster in -production. You can also check platform-specific guides for [Cube -Cloud][ref-deploy-cubecloud] and [Docker][ref-deploy-docker]. - -If you are moving Cube to production, check out the [Production -Checklist][ref-deploy-prod-list]. - -## Components - -As shown in the diagram below, a typical production deployment of Cube includes -the following components: - -- One or multiple API instances -- A Refresh Worker -- A Cube Store cluster - -
- Deployment Overview -
- -**API Instances** process incoming API requests and query either Cube Store for -pre-aggregated data or connected database(s) for raw data. The **Refresh -Worker** builds and refreshes pre-aggregations in the background. **Cube Store** -ingests pre-aggregations built by Refresh Worker and responds to queries from -API instances. - -API instances and Refresh Workers can be configured via [environment -variables][ref-config-env] or the [`cube.js` configuration file][ref-config-js]. -They also need access to the data model files. Cube Store clusters can be -configured via environment variables. - -You can find an example Docker Compose configuration for a Cube deployment in -the platform-specific guide for [Docker][ref-deploy-docker]. - -## API instances - -API instances process incoming API requests and query either Cube Store for -pre-aggregated data or connected data sources for raw data. It is possible to -horizontally scale API instances and use a load balancer to balance incoming -requests between multiple API instances. - -The [Cube Docker image][dh-cubejs] is used for API Instance. - -API instances can be configured via environment variables or the `cube.js` -configuration file, and **must** have access to the data model files (as -specified by [`schemaPath`][ref-conf-ref-schemapath]. - -## Refresh Worker - -A Refresh Worker updates pre-aggregations and invalidates the in-memory cache in -the background. They also keep the refresh keys up-to-date for all data models -and pre-aggregations. Please note that the in-memory cache is just invalidated -but not populated by Refresh Worker. In-memory cache is populated lazily during -querying. On the other hand, pre-aggregations are eagerly populated and kept -up-to-date by Refresh Worker. - -The [Cube Docker image][dh-cubejs] can be used for creating Refresh Workers; to -make the service act as a Refresh Worker, `CUBEJS_REFRESH_WORKER=true` should be -set in the environment variables. - -## Cube Store - -Cube Store is the purpose-built pre-aggregations storage for Cube. - -Cube Store uses a distributed query engine architecture. In every Cube Store -cluster: - -- a one or many [router nodes](#cube-store-cube-store-router) handle incoming - connections, manages database metadata, builds query plans, and orchestrates - their execution -- multiple [worker nodes](#cube-store-cube-store-worker) ingest warmed up data - and execute queries in parallel -- a local or cloud-based blob storage keeps pre-aggregated data in columnar - format - -
- Cube Store Router with two Cube Store Workers -
- -By default, Cube Store listens on the port `3030` for queries coming from Cube. -The port could be changed by setting `CUBESTORE_HTTP_PORT` environment variable. -In a case of using custom port, please make sure to change -`CUBEJS_CUBESTORE_PORT` environment variable for Cube API Instances and Refresh -Worker. - -Both the router and worker use the [Cube Store Docker image][dh-cubestore]. The -following environment variables should be used to manage the roles: - -| Environment Variable | Specify on Router? | Specify on Worker? | -| ----------------------- | ------------------ | ------------------ | -| `CUBESTORE_SERVER_NAME` | Yes | Yes | -| `CUBESTORE_META_PORT` | Yes | - | -| `CUBESTORE_WORKERS` | Yes | Yes | -| `CUBESTORE_WORKER_PORT` | - | Yes | -| `CUBESTORE_META_ADDR` | - | Yes | - - - -Looking for a deeper dive on Cube Store architecture? Check out -[this presentation](https://docs.google.com/presentation/d/1oQ-koloag0UcL-bUHOpBXK4txpqiGl41rxhgDVrw7gw/) -by our CTO, [Pavel][gh-pavel]. - - - -### <--{"id" : "Cube Store"}--> Cube Store Router - -
- Cube Store Router with two Cube Store Workers -
- -The Router in a Cube Store cluster is responsible for receiving queries from -Cube, managing metadata for the Cube Store cluster, and query planning and -distribution for the Workers. It also [provides a MySQL-compatible -interface][ref-caching-inspect-sql] that can be used to query pre-aggregations -from Cube Store directly. Cube **only** communicates with the Router, and does -not interact with Workers directly. - -[ref-caching-inspect-sql]: - /caching/using-pre-aggregations#inspecting-pre-aggregations - -### <--{"id" : "Cube Store"}--> Cube Store Worker - -
- Cube Store Router with two Cube Store Workers -
- -Workers in a Cube Store cluster receive and execute subqueries from the Router, -and directly interact with the underlying distributed storage for insertions, -selections and pre-aggregation warmup. Workers **do not** interact with each -other directly, and instead rely on the Router to distribute queries and manage -any associated metadata. - -### <--{"id" : "Cube Store"}--> Scaling - -Although Cube Store _can_ be run in single-instance mode, this is often -unsuitable for production deployments. For high concurrency and data throughput, -we **strongly** recommend running Cube Store as a cluster of multiple instances -instead. Because the storage layer is decoupled from the query processing -engine, you can horizontally scale your Cube Store cluster for as much -concurrency as you require. - -A sample Docker Compose stack setting Cube Store cluster up might look like: - -```yaml -version: '2.2' - -services: - cubestore_router: - image: cubejs/cubestore:latest - environment: - - CUBESTORE_WORKERS=cubestore_worker_1:10001,cubestore_worker_2:10002 - - CUBESTORE_REMOTE_DIR=/cube/data - - CUBESTORE_META_PORT=9999 - - CUBESTORE_SERVER_NAME=cubestore_router:9999 - volumes: - - .cubestore:/cube/data - depends_on: - - cubestore_worker_1 - - cubestore_worker_2 - - cubestore_worker_1: - image: cubejs/cubestore:latest - environment: - - CUBESTORE_WORKERS=cubestore_worker_1:10001,cubestore_worker_2:10002 - - CUBESTORE_SERVER_NAME=cubestore_worker_1:10001 - - CUBESTORE_WORKER_PORT=10001 - - CUBESTORE_REMOTE_DIR=/cube/data - - CUBESTORE_META_ADDR=cubestore_router:9999 - volumes: - - .cubestore:/cube/data - - cubestore_worker_2: - image: cubejs/cubestore:latest - environment: - - CUBESTORE_WORKERS=cubestore_worker_1:10001,cubestore_worker_2:10002 - - CUBESTORE_SERVER_NAME=cubestore_worker_2:10002 - - CUBESTORE_WORKER_PORT=10002 - - CUBESTORE_REMOTE_DIR=/cube/data - - CUBESTORE_META_ADDR=cubestore_router:9999 - volumes: - - .cubestore:/cube/data -``` - -### <--{"id" : "Cube Store"}--> Storage - -Cube Store makes use of a separate storage layer for storing metadata as well as -for persisting pre-aggregations as Parquet files. Cube Store can use both AWS S3 -and Google Cloud, or if desired, a local path on the server if all nodes of a -cluster run on a single machine. - -A simplified example using AWS S3 might look like: - -```yaml -version: '2.2' -services: - cubestore_router: - image: cubejs/cubestore:latest - environment: - - CUBESTORE_SERVER_NAME=cubestore_router:9999 - - CUBESTORE_META_PORT=9999 - - CUBESTORE_WORKERS=cubestore_worker_1:9001 - - CUBESTORE_S3_BUCKET= - - CUBESTORE_S3_REGION= - - CUBESTORE_AWS_ACCESS_KEY_ID= - - CUBESTORE_AWS_SECRET_ACCESS_KEY= - cubestore_worker_1: - image: cubejs/cubestore:latest - environment: - - CUBESTORE_SERVER_NAME=cubestore_worker_1:9001 - - CUBESTORE_WORKER_PORT=9001 - - CUBESTORE_META_ADDR=cubestore_router:9999 - - CUBESTORE_WORKERS=cubestore_worker_1:9001 - - CUBESTORE_S3_BUCKET= - - CUBESTORE_S3_REGION= - - CUBESTORE_AWS_ACCESS_KEY_ID= - - CUBESTORE_AWS_SECRET_ACCESS_KEY= - depends_on: - - cubestore_router -``` - -## Redis - -Earlier, [Redis][redis] was used in production deployments as storage for -in-memory cache and query queue. Since version v0.32.0, Cube Store is used for -that purpose. It is still possible to [configure][ref-config-redis] Cube to use -Redis; however, it is strongly not recommended. Please check the [blog -post][blog-details] for details. - - - -Redis support is deprecated and will be removed from Cube in the future. Upgrade -to v0.32.0 or later to use Cube Store instead of Redis. See the [migration -guide][blog-migration-guide]. - - - -[dh-cubejs]: https://hub.docker.com/r/cubejs/cube -[dh-cubestore]: https://hub.docker.com/r/cubejs/cubestore -[gh-cube-examples-k8s]: - https://github.com/cube-js/cube/tree/master/examples/kubernetes -[gh-cube-examples-k8s-helm]: - https://github.com/cube-js/cube/tree/master/examples/helm-charts -[ref-deploy-prod-list]: /deployment/production-checklist -[ref-deploy-cubecloud]: /deployment/platforms/cube-cloud -[ref-deploy-docker]: /deployment/platforms/docker -[ref-config-env]: /reference/environment-variables -[ref-config-js]: /config -[ref-conf-ref-schemapath]: /config#options-reference-schema-path -[redis]: https://redis.io -[ref-config-redis]: /reference/environment-variables#cubejs-redis-password -[blog-details]: https://cube.dev/blog/how-you-win-by-using-cube-store-part-1 -[blog-migration-guide]: - https://cube.dev/blog/how-you-win-by-using-cube-store-part-1#how-to-migrate-to-cube-store -[gh-pavel]: https://github.com/paveltiunov diff --git a/docs/content/Deployment/Production-Checklist.mdx b/docs/content/Deployment/Production-Checklist.mdx deleted file mode 100644 index 7e64106e0ad65..0000000000000 --- a/docs/content/Deployment/Production-Checklist.mdx +++ /dev/null @@ -1,167 +0,0 @@ ---- -title: Production checklist -permalink: /deployment/production-checklist -category: Deployment -menuOrder: 2 ---- - - - -Thinking of migrating to the cloud instead? [Click -here][blog-migrate-to-cube-cloud] to learn more about migrating a self-hosted -installation to [Cube Cloud][link-cube-cloud]. - - - -This is a checklist for configuring and securing Cube for a production -deployment. - -## Disable Development Mode - -When running Cube in production environments, make sure development mode is -disabled both on API Instances and Refresh Worker. Running Cube in development -mode in a production environment can lead to security vulnerabilities. -Enabling Development Mode in Cube Cloud is not recommended. -Development Mode will expose your data to the internet. You can -read more on the differences between [production and development mode -here][link-cubejs-dev-vs-prod]. - - - -Development mode is disabled by default. - - - -```dotenv -# Set this to false or leave unset to disable development mode -CUBEJS_DEV_MODE=false -``` - -## Set up Refresh Worker - -To refresh in-memory cache and [pre-aggregations][ref-schema-ref-preaggs] in the -background, we recommend running a separate Cube Refresh Worker instance. This -allows your Cube API Instance to continue to serve requests with high -availability. - -```dotenv -# Set to true so a Cube instance acts as a refresh worker -CUBEJS_REFRESH_WORKER=true -``` - -## Set up Cube Store - - - -While Cube can operate with in-memory cache and queue storage, there're multiple -parts of Cube which require Cube Store in production mode. Replicating Cube -instances without Cube Store can lead to source database degraded performance, -various race conditions and cached data inconsistencies. - - - -Cube Store manages in-memory cache, queue and pre-aggregations for Cube. Follow -the [instructions here][ref-caching-cubestore] to set it up. - -Depending on your database, Cube may need to "stage" pre-aggregations inside -your database first before ingesting them into Cube Store. In this case, Cube -will require write access to the `prod_pre_aggregations` schema inside your -database. The schema name can be modified by the -[`CUBEJS_PRE_AGGREGATIONS_SCHEMA`][ref-conf-ref-env-preaggs-schema] environment -variable; see the [Environment Variables reference][ref-env-vars] for more -details. - - - -You may consider enabling an export bucket which allows Cube to build large -pre-aggregations in a much faster manner. It is currently supported for -BigQuery, Redshift and Snowflake. Check [the relevant documentation for your -configured database][ref-config-connect-db] to set it up. - - - -## Secure the deployment - -If you're using JWTs, you can configure Cube to correctly decode them and inject -their contents into the [Security Context][ref-sec-ctx]. Add your authentication -provider's configuration under [the `jwt` property of your `cube.js` -configuration file][ref-config-jwt], or if using environment variables, see -`CUBEJS_JWK_*`, `CUBEJS_JWT_*` in the [Environment Variables -reference][ref-env-vars]. - -## Set up health checks - -Cube provides [Kubernetes-API compatible][link-k8s-healthcheck-api] health check -(or probe) endpoints that indicate the status of the deployment. Configure your -monitoring service of choice to use the [`/readyz`][ref-api-readyz] and -[`/livez`][ref-api-livez] API endpoints so you can check on the Cube -deployment's health and be alerted to any issues. - -## Appropriate cluster sizing - -There's no one-size-fits-all when it comes to sizing a Cube cluster and its -resources. Resources required by Cube significantly depend on the amount of -traffic Cube needs to serve and the amount of data it needs to process. The -following sizing estimates are based on default settings and are very generic, -which may not fit your Cube use case, so you should always tweak resources based -on consumption patterns you see. - -### <--{"id" : "Appropriate cluster sizing"}--> Memory and CPU - -Each Cube cluster should contain at least 2 Cube API instances. Every Cube API -instance should have at least 3GB of RAM and 2 CPU cores allocated for it. - -Refresh workers tend to be much more CPU and memory intensive, so at least 6GB -of RAM is recommended. Please note that to take advantage of all available RAM, -the Node.js heap size should be adjusted accordingly by using the -[`--max-old-space-size` option][node-heap-size]: - -```sh -NODE_OPTIONS="--max-old-space-size=6144" -``` - -[node-heap-size]: - https://nodejs.org/api/cli.html#--max-old-space-sizesize-in-megabytes - -The Cube Store router node should have at least 6GB of RAM and 4 CPU cores -allocated for it. Every Cube Store worker node should have at least 8GB of RAM -and 4 CPU cores allocated for it. The Cube Store cluster should have at least -two worker nodes. - -### <--{"id" : "Appropriate cluster sizing"}--> RPS and data volume - -Depending on data model size, every Core Cube API instance can serve 1 to 10 -requests per second. Every Core Cube Store router node can serve 50-100 queries -per second. As a rule of thumb, you should provision 1 Cube Store worker node -per one Cube Store partition or 1M of rows scanned in a query. For example if -your queries scan 16M of rows per query, you should have at least 16 Cube Store -worker nodes provisioned. -Please note that the number of raw data rows doesn't usually equal the number of rows in pre-aggregation. -At the same time, queries don't usually scan all the data in pre-aggregations, as Cube Store uses partition pruning to optimize queries. -`EXPLAIN ANALYZE` can be used to see scanned partitions -involved in a Cube Store query. Cube Cloud ballpark performance numbers can -differ as it has different Cube runtime. - -[blog-migrate-to-cube-cloud]: - https://cube.dev/blog/migrating-from-self-hosted-to-cube-cloud/ -[link-caddy]: https://caddyserver.com/ -[link-cube-cloud]: https://cubecloud.dev -[link-cubejs-dev-vs-prod]: /configuration/overview#development-mode -[link-k8s-healthcheck-api]: - https://kubernetes.io/docs/reference/using-api/health-checks/ -[link-kong]: https://konghq.com/kong/ -[link-nginx]: https://www.nginx.com/ -[link-nginx-docs]: https://nginx.org/en/docs/http/configuring_https_servers.html -[ref-config-connect-db]: /connecting-to-the-database -[ref-caching-cubestore]: /caching/running-in-production -[ref-conf-ref-env-cachequeue-driver]: - /reference/environment-variables#cubejs-cache-and-queue-driver -[ref-conf-ref-env-preaggs-schema]: - /reference/environment-variables#cubejs-pre-aggregations-schema -[ref-env-vars]: /reference/environment-variables -[ref-schema-ref-preaggs]: /schema/reference/pre-aggregations -[ref-api-scheduled-refresh]: /rest-api#v-1-run-scheduled-refresh -[ref-sec-ctx]: /security/context -[ref-config-jwt]: /config#jwt -[ref-api-readyz]: /rest-api#readyz -[ref-api-livez]: /rest-api#livez diff --git a/docs/content/Examples-Tutorials-Recipes/Examples.mdx b/docs/content/Examples-Tutorials-Recipes/Examples.mdx deleted file mode 100644 index 3a768105ff28f..0000000000000 --- a/docs/content/Examples-Tutorials-Recipes/Examples.mdx +++ /dev/null @@ -1,81 +0,0 @@ ---- -title: Examples Overview -menuTitle: Overview -permalink: /examples -category: Examples & Tutorials -redirect_from: - - /tutorials ---- - -Below you can find tutorials to help you get started with Cube. - -If you're already building something with Cube, please explore -[recipes](/recipes) — short, self-contained, and runnable solutions to popular -use cases. - -## Tutorials - -These tutorials are great places to start learning Cube: - -- [Cube, the Open Source Dashboard Framework: Ultimate Guide](https://cube.dev/blog/cubejs-open-source-dashboard-framework-ultimate-guide) - — launch an API with Cube and build a dashboard with React - -- [React query builder](https://cube.dev/blog/react-query-builder-with-cubejs) - and [Vue query builder](https://cube.dev/blog/vue-query-builder-with-cubejs/) - — learn how to use Query Builder components for React and Vue to build - customizable query interfaces - -The following tutorials cover advanced concepts of Cube: - -- [Pre-Aggregations Tutorial](https://cube.dev/blog/high-performance-data-analytics-with-cubejs-pre-aggregations/) - — learn about pre-aggregations, one of the most powerful Cube features that - significantly speed up the performance of your applications such as dashboards - and reports - -- _Building an Open Source Mixpanel Alternative_ — a two-part series on building - a production-ready application with Cube: - [Part 1: Collecting and Displaying Events](https://cube.dev/blog/building-an-open-source-mixpanel-alternative-1), - [Part 2: Conversion Funnels](https://cube.dev/blog/building-open-source-mixpanel-alternative-2/) - -### <--{"id" : "Tutorials"}--> Features - -Learn more about prominent features of Cube: - -| Feature | Story | Demo | -| :-------------------------------------------------------------------------------------- | :---------------------------------------------------------------------------------------------------------------- | :------------------------------------------------ | -| [Drill downs](https://cube.dev/docs/schema/fundamentals/additional-concepts#drilldowns) | [Introducing a drill down table API](https://cube.dev/blog/introducing-a-drill-down-table-api-in-cubejs/) | [Demo](https://drill-downs-demo.cube.dev) | -| [Compare date range](https://cube.dev/docs/query-format#time-dimensions-format) | [Comparing data over different time periods](https://cube.dev/blog/comparing-data-over-different-time-periods/) | [Demo](https://compare-date-range-demo.cube.dev) | -| [Data blending](https://cube.dev/docs/recipes/data-blending) | [Introducing data blending API](https://cube.dev/blog/introducing-data-blending-api/) | [Demo](https://data-blending-demo.cube.dev) | -| [Real-time data fetch](https://cube.dev/docs/real-time-data-fetch) | [Real-time dashboard guide](https://real-time-dashboard.cube.dev) | [Demo](https://real-time-dashboard-demo.cube.dev) | -| [Dynamic data model](https://cube.dev/docs/dynamic-schema-creation) | [Using asyncModule to generate schemas](https://github.com/cube-js/cube/tree/master/examples/async-module-simple) | — | -| [Authentication](https://cube.dev/docs/security#using-json-web-key-sets-jwks) | [Auth0 integration](https://github.com/cube-js/cube/tree/master/examples/auth0) | — | -| [Authentication](https://cube.dev/docs/security#using-json-web-key-sets-jwks) | [AWS Cognito integration](https://github.com/cube-js/cube/tree/master/examples/cognito) | — | - -### <--{"id" : "Tutorials"}--> Front-end integrations - -Explore how to integrate Cube with data visualization tools: - -| Tool | Tutorial | Demo | -| :--------------------------------------------------------------------- | :----------------------------------------------------------------------------------------------------------------------- | :-------------------------------------------------------------------------------------- | -| 📈 [D3.js](https://awesome.cube.dev/tools/d3) | [D3 dashboard tutorial](https://d3-dashboard.cube.dev) | [Demo](https://d3-dashboard-demo.cube.dev) | -| 📈 [Google Charts](https://awesome.cube.dev/tools/google-charts) | [Google Charts dashboard](https://cube.dev/blog/google-charts-dashboard) | [Demo](https://google-charts-dashboard-demo.cube.dev) | -| 📈 [Highcharts](https://awesome.cube.dev/tools/highcharts) | [React Highcharts example](https://cube.dev/blog/react-highcharts-example/) | [Demo](https://highcharts-demo.cube.dev) | -| 📈 [Material UI](https://awesome.cube.dev/tools/material-ui-data-grid) | [Material UI Dashboard with React](https://material-ui-dashboard.cube.dev) | [Demo](https://material-ui-dashboard-demo.cube.dev) | -| 📈 [Material UI](https://awesome.cube.dev/tools/material-ui-data-grid) | [React data table with Material UI](https://dev.to/cubejs/react-data-table-with-material-ui-and-a-spark-of-joy-50o1) | [Demo](https://react-data-table-demo.cube.dev) | -| 📈 [Material](https://material.io) | [Angular dashboard with Material](https://angular-dashboard.cube.dev) | [Demo](https://angular-dashboard-demo.cube.dev) | -| 📊 [AG Grid](https://awesome.cube.dev/tools/ag-grid) | [React Pivot Table with AG Grid](https://react-pivot-table.cube.dev) | [Demo](https://react-pivot-table-demo.cube.dev) | -| 🗺 [Mapbox](https://awesome.cube.dev/tools/mapbox-gl) | [Building map-based data visualizations with Mapbox](https://mapbox-guide.cube.dev) | [Demo](https://mapbox-demo.cube.dev) | -| 📊 [Retool](https://retool.com/) | [Building an internal dashboard with Retool](https://cube.dev/blog/building-an-internal-dashboard-with-retool-and-cube/) | [Demo](https://cubedev.retool.com/embedded/public/945c174d-566e-42f6-b33c-73052847e483) | - -### <--{"id" : "Tutorials"}--> Demo applications - -Have a look at some demo applications you can build with Cube: - -| Application | Story | -| :--------------------------------------------------- | :----------------------------------------------------------------------------------------------------------------------------- | -| [Hacktoberfest 2020](https://hacktoberfest.cube.dev) | [Hacktoberfest 2020 in insights and statistics](https://dev.to/igorlukanin/hacktoberfest-2020-in-insights-and-statistics-3m57) | - -Cube integrates with all kinds of -[data sources](https://cube.dev/docs/config/databases) and data -[visualization tools](https://cube.dev/docs/config/downstream). You can -[find](https://cube.dev/for) more easy-step guides to get started with Cube. diff --git a/docs/content/Examples-Tutorials-Recipes/Recipes.mdx b/docs/content/Examples-Tutorials-Recipes/Recipes.mdx deleted file mode 100644 index f489da4423add..0000000000000 --- a/docs/content/Examples-Tutorials-Recipes/Recipes.mdx +++ /dev/null @@ -1,63 +0,0 @@ ---- -title: Recipes -permalink: /recipes -category: Examples & Tutorials ---- - -These recipes will show you the best practices of using Cube. - -### <--{"id" : "Recipes"}--> Analytics - -- [Calculating daily, weekly, monthly active users](/recipes/active-users) -- [Implementing event analytics](/recipes/event-analytics) -- [Implementing funnel analysis](/recipes/funnels) -- [Implementing retention analysis & cohorts](/recipes/cohort-retention) - -### <--{"id" : "Recipes"}--> Authentication & Authorization - -- [Authenticate requests to Cube with Auth0](/recipes/authn-with-auth0) -- [Authenticate requests to Cube with AWS Cognito](/recipes/authn-with-aws-cognito) - -### <--{"id" : "Recipes"}--> Access control - -- [Enforcing role-based access](/recipes/role-based-access) -- [Enforcing column-based access](/recipes/column-based-access) -- [Enforcing mandatory filters](/recipes/enforcing-mandatory-filters) -- [Controlling access to cubes and views](/recipes/controlling-access-to-cubes-and-views) - -### <--{"id" : "Recipes"}--> Data sources - -- [Using multiple data sources](/recipes/multiple-sources-same-schema) -- [Using SSL connections to a data source](/recipes/enable-ssl-connections-to-database) -- [Joining data from multiple data sources](/recipes/joining-multiple-data-sources) - -### <--{"id" : "Recipes"}--> Data modeling - -- [Calculating average and percentiles](https://cube.dev/docs/recipes/percentiles) -- [Implementing data snapshots](/recipes/snapshots) -- [Implementing Entity-Attribute-Value model](/recipes/entity-attribute-value) -- [Using different data models for tenants](/recipes/using-different-schemas-for-tenants) -- [Using dynamic measures](/recipes/referencing-dynamic-measures) -- [Using dynamic union tables](/recipes/dynamically-union-tables) - -### <--{"id" : "Recipes"}--> Queries - -- [Getting unique values for a field](/recipes/getting-unique-values-for-a-field) -- [Implementing pagination](/recipes/pagination) -- [Passing dynamic parameters in a query](/recipes/passing-dynamic-parameters-in-a-query) - -### <--{"id" : "Recipes"}--> Query acceleration - -- [Accelerating non-additive measures](/recipes/non-additivity) -- [Using originalSql and rollup pre-aggregations effectively](/recipes/using-originalsql-and-rollups-effectively) -- [Incrementally building pre-aggregations for a date range](/recipes/incrementally-building-pre-aggregations-for-a-date-range) -- [Refreshing select partitions of a pre-aggregation](/recipes/refreshing-select-partitions) - -### <--{"id" : "Recipes"}--> Code reusability - -- [Implementing schema generation](/recipes/schema-generation) - -### <--{"id" : "Recipes"}--> Upgrading Cube - -- [Migrating from Redis to Cube Store](https://cube.dev/blog/how-you-win-by-using-cube-store-part-1#how-to-migrate-to-cube-store) -- [Migrating from Express to Docker](/recipes/migrating-from-express-to-docker) diff --git a/docs/content/Examples-Tutorials-Recipes/Recipes/Access-control/column-based-access.mdx b/docs/content/Examples-Tutorials-Recipes/Recipes/Access-control/column-based-access.mdx deleted file mode 100644 index 2a9ecb9690499..0000000000000 --- a/docs/content/Examples-Tutorials-Recipes/Recipes/Access-control/column-based-access.mdx +++ /dev/null @@ -1,185 +0,0 @@ ---- -title: Enforcing Column-Based Access -permalink: /recipes/column-based-access -category: Examples & Tutorials -subCategory: Access control -menuOrder: 2 ---- - -## Use case - -We want to manage user access to different data depending on a database -relationship. In the recipe below, we will manage supplier access to their -products. A supplier can't see other supplier's products. - -## Data modeling - -To implement column-based access, we will use supplier's email from a -[JSON Web Token](https://cube.dev/docs/security), and the -[`queryRewrite`](https://cube.dev/docs/security/context#using-query-rewrite) -extension point to manage data access. - -We have `products` and `suppliers` cubes with a `many_to_one` relationship from -products to suppliers: - - - -```yaml -cubes: - - name: products - sql_table: products - - joins: - - name: suppliers - relationship: many_to_one - sql: "{CUBE}.supplier_id = {suppliers.id}" - - dimensions: - - name: name - sql: name - type: string -``` - -```javascript -cube(`products`, { - sql_table: `products`, - - joins: { - suppliers: { - relationship: `many_to_one`, - sql: `${CUBE}.supplier_id = ${suppliers.id}`, - }, - }, - - dimensions: { - name: { - sql: `name`, - type: `string`, - }, - }, -}); -``` - - - - - -```yaml -cubes: - - name: suppliers - sql_table: suppliers - - dimensions: - - name: id - sql: id - type: string - primary_key: true - - - name: email - sql: email - type: string -``` - -```javascript -cube(`suppliers`, { - sql_table: `suppliers`, - - dimensions: { - id: { - sql: `id`, - type: `string`, - primary_key: true, - }, - - email: { - sql: `email`, - type: `string`, - }, - }, -}); -``` - - - -## Configuration - -Let's add the supplier email filter if a query includes any dimensions or -measures from the `products` cube: - -```javascript -module.exports = { - queryRewrite: (query, { securityContext }) => { - const cubeNames = [ - ...(query.dimensions || []), - ...(query.measures || []), - ].map((e) => e.split('.')[0]); - - if (cubeNames.includes('products')) { - if (!securityContext.email) { - throw new Error('No email found in Security Context!'); - } - - query.filters.push({ - member: `suppliers.email`, - operator: 'equals', - values: [securityContext.email], - }); - } - - return query; - }, -}; -``` - -## Query - -To get the supplier's products, we will send two identical requests with -different emails inside JWTs. - -```json -{ - "iat": 1000000000, - "exp": 5000000000, - "email": "purus.accumsan@Proin.org" -} -``` - -```json -{ - "iat": 1000000000, - "exp": 5000000000, - "email": "gravida.sit.amet@risus.net" -} -``` - -## Result - -We have received different data depending on the supplier's email. - -```javascript -// purus.accumsan@Proin.org -[ - { - 'products.name': 'Awesome Soft Salad', - }, - { - 'products.name': 'Rustic Granite Gloves', - }, -]; -``` - -```javascript -// gravida.sit.amet@risus.net -[ - { - 'products.name': 'Incredible Granite Cheese', - }, -]; -``` - -## Source code - -Please feel free to check out the -[full source code](https://github.com/cube-js/cube/tree/master/examples/recipes/column-based-access) -or run it with the `docker-compose up` command. You'll see the result, including -queried data, in the console. diff --git a/docs/content/Examples-Tutorials-Recipes/Recipes/Access-control/controlling-access-to-cubes-and-views.mdx b/docs/content/Examples-Tutorials-Recipes/Recipes/Access-control/controlling-access-to-cubes-and-views.mdx deleted file mode 100644 index a6997775ccc6f..0000000000000 --- a/docs/content/Examples-Tutorials-Recipes/Recipes/Access-control/controlling-access-to-cubes-and-views.mdx +++ /dev/null @@ -1,148 +0,0 @@ ---- -title: Controlling access to cubes and views -permalink: /recipes/controlling-access-to-cubes-and-views -category: Examples & Tutorials -subCategory: Access control -menuOrder: 1 ---- - -## Use case - -We want to manage user access to different cubes and/or views depending on some -sort of user property. In the recipe below, we will manage access to a view so -that only users with a `department` claim in their JWT can query it. - -## Configuration - -```javascript -module.exports = { - contextToAppId: ({ securityContext }) => { - return `CUBEJS_APP_${securityContext.company}`; - }, - extendContext: ({ securityContext }) => { - return { - isFinance: securityContext.department === 'finance', - }; - }, -}; -``` - -## Data modeling - - - -```yaml -# orders.yml -cubes: - - name: orders - sql_table: orders - public: false - # ... - -# users.yml -cubes: - - name: users - sql_table: users - public: false - # ... - -# total_revenue_per_customer.yml -views: - - name: total_revenue_per_customer - public: COMPILE_CONTEXT.security_context.isFinance - includes: - - orders.total_revenue - - users.company -``` - -```javascript -// orders.js -cube(`orders`, { - sql_table: `orders`, - public: false, - - // ... -}); - -// users.js -cube(`users`, { - sql_table: `users`, - public: false, - - // ... -}); - -// total_revenue_per_customer.js -view('total_revenue_per_customer', { - description: `Total revenue per customer`, - public: COMPILE_CONTEXT.security_context.isFinance, - - includes: [ - orders.total_revenue, - users.company, - ], -}); -``` - - - -## Query - -After generating a JWT with a `department` claim set to `finance`, we can send -it as part of a cURL command: - -```bash{outputLines: 2-3} -curl \ - -H "Authorization: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJkZXBhcnRtZW50IjoiZmluYW5jZSIsImV4cCI6MTY2NzMzNzI1MH0.njfL7GMDNlzKaJDZA0OQ_b2u2JhuSm-WjnS0yVfB8NA" \ - http://localhost:4000/cubejs-api/v1/meta -``` - -## Result - -The `/meta` endpoint shows the available cubes and views: - -```json -{ - "cubes": [ - { - "name": "total_revenue_per_customer", - "title": "Total Revenue Per Customer", - "description": "Total revenue per customer", - "measures": [ - { - "name": "total_revenue_per_customer.total_revenue", - "title": "Total Revenue Per Customer Total Revenue", - "shortTitle": "Total Revenue", - "cumulativeTotal": false, - "cumulative": false, - "type": "number", - "aggType": "number", - "drillMembers": [], - "drillMembersGrouped": { - "measures": [], - "dimensions": [] - }, - "isVisible": true - } - ], - "dimensions": [ - { - "name": "total_revenue_per_customer.company", - "title": "Total Revenue Per Customer Company", - "type": "string", - "shortTitle": "Company", - "suggestFilterValues": true, - "isVisible": true - } - ], - "segments": [] - } - ] -} -``` - -## Source code - -Please feel free to check out the -[full source code](https://github.com/cube-js/cube/tree/master/examples/recipes/changing-visibility-of-cubes-or-views) -or run it with the `docker-compose up` command. diff --git a/docs/content/Examples-Tutorials-Recipes/Recipes/Access-control/role-based-access.mdx b/docs/content/Examples-Tutorials-Recipes/Recipes/Access-control/role-based-access.mdx deleted file mode 100644 index 98a7ae8e837c2..0000000000000 --- a/docs/content/Examples-Tutorials-Recipes/Recipes/Access-control/role-based-access.mdx +++ /dev/null @@ -1,107 +0,0 @@ ---- -title: Enforcing Role-Based Access -permalink: /recipes/role-based-access -category: Examples & Tutorials -subCategory: Access control -menuOrder: 2 ---- - -## Use case - -We want to manage user access to different data depending on their role. In the -recipe below, a user with the `operator` role can only view processing orders -from a shop and a `manager` can only view shipped and completed orders. - -## Configuration - -To implement role-based access, we will use a -[JSON Web Token](https://cube.dev/docs/security) with role information in the -payload, and the -[`queryRewrite`](https://cube.dev/docs/security/context#using-query-rewrite) -extension point to manage data access. - -Let's add the role verification in the `cube.js` file. - -```javascript -module.exports = { - queryRewrite: (query, { securityContext }) => { - if (!securityContext.role) { - throw new Error('No role found in Security Context!'); - } - - if (securityContext.role == 'manager') { - query.filters.push({ - member: 'orders.status', - operator: 'equals', - values: ['shipped', 'completed'], - }); - } - - if (securityContext.role == 'operator') { - query.filters.push({ - member: 'orders.status', - operator: 'equals', - values: ['processing'], - }); - } - - return query; - }, -}; -``` - -## Query - -To get the number of orders as a manager or operator, we will send two identical -requests with different JWTs: - -```json -{ - "iat": 1000000000, - "exp": 5000000000, - "role": "manager" -} -``` - -```json -{ - "iat": 1000000000, - "exp": 5000000000, - "role": "operator" -} -``` - -## Result - -We have received different data depending on the user's role. - -```json5 -// Manager -[ - { - "orders.status": "completed", - "orders.count": "3346" - }, - { - "orders.status": "shipped", - "orders.count": "3300" - } -] -``` - -```json5 -// Operator -[ - { - "orders.status": "processing", - "orders.count": "3354" - } -] -``` - -## Source code - -Please feel free to check out the -[full source code](https://github.com/cube-js/cube/tree/master/examples/recipes/role-based-access) -or run it with the `docker-compose up` command. You'll see the result, including -queried data, in the console. diff --git a/docs/content/Examples-Tutorials-Recipes/Recipes/Access-control/using-different-schemas-for-tenants.mdx b/docs/content/Examples-Tutorials-Recipes/Recipes/Access-control/using-different-schemas-for-tenants.mdx deleted file mode 100644 index 958bf983cc4b3..0000000000000 --- a/docs/content/Examples-Tutorials-Recipes/Recipes/Access-control/using-different-schemas-for-tenants.mdx +++ /dev/null @@ -1,184 +0,0 @@ ---- -title: Using Different Data Models for Tenants -permalink: /recipes/using-different-schemas-for-tenants -category: Examples & Tutorials -subCategory: Access control -menuOrder: 2 ---- - -## Use case - -We want to provide different data models to different tenants. In the recipe -below, we'll learn how to switch between multiple data models based on the -tenant. - -## Configuration - -We have a folder structure as follows: - -```tree -model/ -├── avocado/ -│ └── cubes -│ └── Products.js -└── mango/ - └── cubes - └── Products.js -``` - -Let's configure Cube to use a specific data model path for each tenant. We'll -pass the tenant name as a part of -[`securityContext`](https://cube.dev/docs/security/context#top) into the -[`repositoryFactory`](https://cube.dev/docs/config#repository-factory) function. - -We'll also need to override the -[`contextToAppId`](https://cube.dev/docs/config#context-to-app-id) function to -control how the data model compilation result is cached and provide the tenant -names via the -[`scheduledRefreshContexts`](https://cube.dev/docs/config#scheduled-refresh-contexts) -function so a refresh worker can find all existing data models and build -pre-aggregations for them, if needed. - -Our `cube.js` file will look like this: - -```javascript -const { FileRepository } = require('@cubejs-backend/server-core'); - -module.exports = { - contextToAppId: ({ securityContext }) => - `CUBEJS_APP_${securityContext.tenant}`, - - repositoryFactory: ({ securityContext }) => - new FileRepository(`model/${securityContext.tenant}`), - - scheduledRefreshContexts: () => [ - { securityContext: { tenant: 'avocado' } }, - { securityContext: { tenant: 'mango' } }, - ], -}; -``` - -## Data modeling - -In this example, we'd like to get products with odd `id` values for the -`avocado` tenant and with even `id` values the `mango` tenant: - -This is the `products` cube for the `avocado` tenant: - - - -```yaml -cubes: - - name: products - sql: > - SELECT * - FROM public.Products - WHERE MOD (id, 2) = 1 -``` - -```javascript - -cube(`products`, { - sql: `SELECT * - FROM public.Products - WHERE MOD (id, 2) = 1`, - - // ... -}); -``` - - - -This is the `products` cube for the `mango` tenant: - - - -```yaml -cubes: - - name: products - sql: > - SELECT * - FROM public.Products - WHERE MOD (id, 2) = 0 -``` - -```javascript -cube(`products`, { - sql: `SELECT * - FROM public.Products - WHERE MOD (id, 2) = 0`, - - // ... -}); -``` - - - -## Query - -To fetch the products, we will send two identical queries with different JWTs: - -```json -{ - "sub": "1234567890", - "tenant": "Avocado", - "iat": 1000000000, - "exp": 5000000000 -} -``` - -```json5 -{ - "sub": "1234567890", - "tenant": "Mango", - "iat": 1000000000, - "exp": 5000000000 -} -``` - -## Result - -We will receive different data for each tenant, as expected: - -```json5 -// Avocado products -[ - { - "products.id": 1, - "products.name": "Generic Fresh Keyboard", - }, - { - "products.id": 3, - "products.name": "Practical Wooden Keyboard", - }, - { - "products.id": 5, - "products.name": "Handcrafted Rubber Chicken", - } -] -``` - -```json5 -// Mango products: -[ - { - "products.id": 2, - "products.name": "Gorgeous Cotton Sausages", - }, - { - "products.id": 4, - "products.name": "Handmade Wooden Soap", - }, - { - "products.id": 6, - "products.name": "Handcrafted Plastic Chair", - } -] -``` - -## Source code - -Please feel free to check out the -[full source code](https://github.com/cube-js/cube/tree/master/examples/recipes/using-different-schemas-for-tenants) -or run it with the `docker-compose up` command. You'll see the result, including -queried data, in the console. diff --git a/docs/content/Examples-Tutorials-Recipes/Recipes/Analytics/active-users.mdx b/docs/content/Examples-Tutorials-Recipes/Recipes/Analytics/active-users.mdx deleted file mode 100644 index 52791a800fd6f..0000000000000 --- a/docs/content/Examples-Tutorials-Recipes/Recipes/Analytics/active-users.mdx +++ /dev/null @@ -1,171 +0,0 @@ ---- -title: Daily, Weekly, Monthly Active Users (DAU, WAU, MAU) -permalink: /recipes/active-users -category: Examples & Tutorials -subCategory: Analytics -menuOrder: 1 -redirect_from: - - /active-users ---- - -## Use case - -We want to know the customer engagement of our store. To do this, we need to use -an [Active Users metric](https://en.wikipedia.org/wiki/Active_users). - -## Data modeling - -Daily, weekly, and monthly active users are commonly referred to as DAU, WAU, -MAU. To get these metrics, we need to use a rolling time frame to calculate a -daily count of how many users interacted with the product or website in the -prior day, 7 days, or 30 days. Also, we can build other metrics on top of these -basic metrics. For example, the WAU to MAU ratio, which we can add by using -already defined `weekly_active_users` and `monthly_active_users`. - -To calculate daily, weekly, or monthly active users we’re going to use the -[`rolling_window`](https://cube.dev/docs/schema/reference/measures#parameters-rolling-window) -measure parameter. - - - -```yaml -cubes: - - name: active_users - sql: > - SELECT user_id, created_at - FROM public.orders - - measures: - - name: monthly_active_users - type: count_distinct - sql: user_id - rolling_window: - trailing: 30 day - offset: start - - - name: weekly_active_users - type: count_distinct - sql: user_id - rolling_window: - trailing: 7 day - offset: start - - - name: daily_active_users - type: count_distinct - sql: user_id - rolling_window: - trailing: 1 day - offset: start - - - name: wau_to_mau - title: WAU to MAU - type: number - sql: "100.000 * {weekly_active_users} / NULLIF({monthly_active_users}, 0)" - format: percent - - dimensions: - - name: created_at - type: time - sql: created_at -``` - -```javascript -cube(`active_users`, { - sql: `SELECT user_id, created_at - FROM public.orders`, - - measures: { - monthly_active_users: { - sql: `user_id`, - type: `count_distinct`, - rolling_window: { - trailing: `30 day`, - offset: `start`, - }, - }, - - weekly_active_users: { - sql: `user_id`, - type: `count_distinct`, - rolling_window: { - trailing: `7 day`, - offset: `start`, - }, - }, - - daily_active_users: { - sql: `user_id`, - type: `count_distinct`, - rolling_window: { - trailing: `1 day`, - offset: `start`, - }, - }, - - wau_to_mau: { - title: `WAU to MAU`, - sql: `100.000 * ${weekly_active_users} / NULLIF(${monthly_active_users}, 0)`, - type: `number`, - format: `percent`, - }, - }, - - dimensions: { - created_at: { - sql: `created_at`, - type: `time`, - }, - }, -}); -``` - - - -## Query - -We should set a `timeDimensions` with the `dateRange`. - -```bash{outputLines: 2-18} -curl cube:4000/cubejs-api/v1/load \ - 'query={ - "measures": [ - "active_users.monthly_active_users", - "active_users.weekly_active_users", - "active_users.daily_active_users", - "active_users.wau_to_mau" - ], - "timeDimensions": [ - { - "dimension": "active_users.created_at", - "dateRange": [ - "2020-01-01", - "2020-12-31" - ] - } - ] - }' -``` - -## Result - -We got the data with our daily, weekly, and monthly active users. - -```json -{ - "data": [ - { - "active_users.monthly_active_users": "22", - "active_users.weekly_active_users": "4", - "active_users.daily_active_users": "0", - "active_users.wau_to_mau": "18.1818181818181818" - } - ] -} -``` - -## Source code - -Please feel free to check out the -[full source code](https://github.com/cube-js/cube/tree/master/examples/recipes/active-users) -or run it with the `docker-compose up` command. You'll see the result, including -queried data, in the console. diff --git a/docs/content/Examples-Tutorials-Recipes/Recipes/Analytics/cohort-retention.mdx b/docs/content/Examples-Tutorials-Recipes/Recipes/Analytics/cohort-retention.mdx deleted file mode 100644 index 7d54330ba0f06..0000000000000 --- a/docs/content/Examples-Tutorials-Recipes/Recipes/Analytics/cohort-retention.mdx +++ /dev/null @@ -1,241 +0,0 @@ ---- -title: Implementing Retention Analysis & Cohorts -permalink: /recipes/cohort-retention -category: Examples & Tutorials -subCategory: Analytics -menuOrder: 3 -redirect_from: - - /cohort-retention ---- - -This is an advanced topic that assumes good, pre-existing knowledge of SQL and -Cube. - -Whether you’re selling groceries, financial services, or gym memberships, -successful recruitment of new customers is only truly successful if they return -to buy from you again. The metric that reflects this is called **retention**, -and the approach we use is **customer retention analysis**. Retention analysis -is typically done using **cohort analysis**. - -Cohort analysis is a technique to see how variables change over in different -groups with different starting conditions. Retention is a simplified one, where -the **starting condition is usually the time of signup and the variable is -simply activity**. - -It’s usually visualized as a cohort grid or retention curves. - -
- -
- -Cohort retention analysis is pretty hard to do in SQL. **We need to have the -user-date combination**, which tells us about a user’s activity on that date, -including dates with no activity. To do this, we need to make a tricky join, -which gives us a dates list. Once we have it, we can “fill it” with users’ -activities. - -The example below shows monthly cohort retention. The same technique can be used -for daily or weekly retention. - - - -The SQL code in this guide is Postgres-compliant. The final SQL code may be -different depending on your database. Also, this technique requires at least 1 -user to be active during the month, otherwise this month will not be included -in the months' list. - - - - - -```yaml -cubes: - - name: monthly_retention - sql: > - SELECT - users.id as user_id, - date_trunc('month', users.created_at) as signup_month, - months_list.activity_month as activity_month, - data.monthly_pageviews - FROM users - LEFT JOIN - ( - SELECT - DISTINCT (date_trunc('month', pages.original_timestamp)) as activity_month - FROM pages - ) as months_list - ON months_list.activity_month >= date_trunc('month', users.created_at) - LEFT JOIN - ( - SELECT - p.user_id, - date_trunc('month', p.original_timestamp) as activity_month, - COUNT(DISTINCT p.id) as monthly_pageviews - FROM pages p - GROUP BY 1,2 - ) as data - ON data.activity_month = months_list.activity_month - AND data.user_id = users.id -``` - -```javascript -cube(`monthly_retention`, { - sql: `SELECT - users.id as user_id, - date_trunc('month', users.created_at) as signup_month, - months_list.activity_month as activity_month, - data.monthly_pageviews - FROM users - LEFT JOIN - ( - SELECT - DISTINCT (date_trunc('month', pages.original_timestamp)) as activity_month - FROM pages - ) as months_list - ON months_list.activity_month >= date_trunc('month', users.created_at) - LEFT JOIN - ( - SELECT - p.user_id, - date_trunc('month', p.original_timestamp) as activity_month, - COUNT(DISTINCT p.id) as monthly_pageviews - FROM pages p - GROUP BY 1,2 - ) as data - ON data.activity_month = months_list.activity_month - AND data.user_id = users.id`, -}); -``` - - - -The SQL above provides the base table for our retention cube. It would show -signup months and activity months with pageviews: - -| user_id | signup_month | activity_month | monthly_pageviews | -|---------|--------------|----------------|-------------------| -| 1 | 1/18 | 1/18 | 10 | -| 1 | 1/18 | 2/18 | 5 | -| 1 | 1/18 | 3/18 | 0 | -| 2 | 2/18 | 2/18 | 12 | -| 2 | 2/18 | 3/18 | 0 | -| 3 | 3/18 | 3/18 | 5 | - -Now we can calculate a total count of users and the total count of active users, -who has more than 0 page views, for every month. Based on these two measures we -can calculate monthly `percentage_of_active`. - - - -```yaml -cubes: - - name: monthly_retention - # ... - measures: - - name: total_count - sql: user_id - type: count_distinct - public: false - - - name: total_active_count - sql: user_id - type: count_distinct - filters: - - sql: monthly_pageviews > 0 - drill_members: - - users.id - - users.email - - - name: percentage_of_active - sql: "100.0 * {total_active_count} / NULLIF({total_count}, 0)" - type: number - format: percent - drill_members: - - users.email - - bots.team - - bots.last_seen - - percentage_of_active -``` - -```javascript -cube(`monthly_retention`, { - // ... - - measures: { - total_count: { - sql: `user_id`, - type: `count_distinct`, - public: false, - }, - - total_active_count: { - sql: `user_id`, - type: `count_distinct`, - filters: [{ sql: `${CUBE}.monthly_pageviews > 0` }], - drill_members: [users.id, users.email], - }, - - percentage_of_active: { - sql: `100.0 * ${total_active_count} / NULLIF(${total_count}, 0)`, - type: `number`, - format: `percent`, - drill_members: [users.email, bots.team, bots.last_seen, percentage_of_active], - }, - }, -}); -``` - - - -To be able to build cohorts, we need to group by two dimensions: **signup -date**, which will define our cohorts, and **months since signup**, which will -show how the percentage of active users is changing. - - - -```yaml -cubes: - - name: monthly_retention - # ... - dimensions: - - name: months_since_signup - sql: "DATEDIFF('month', signup_month, activity_month)" - type: number - - - name: signup_date - sql: "(signup_month AT TIME ZONE 'America/Los_Angeles')" - type: time -``` - -```javascript -cube(`monthly_retention`, { - // ... - - dimensions: { - months_since_signup: { - sql: `DATEDIFF('month', ${CUBE}.signup_month, ${CUBE}.activity_month)`, - type: `number`, - }, - - signup_date: { - sql: `(signup_month AT TIME ZONE 'America/Los_Angeles')`, - type: `time`, - }, - }, -}); -``` - - - - - -Note, we are explicitly setting the `signup_month` timezone. `date_trunc` -returns UTC dates and not setting a correct timezone would lead to wrong -results due to time shift. - - - diff --git a/docs/content/Examples-Tutorials-Recipes/Recipes/Analytics/event-analytics.mdx b/docs/content/Examples-Tutorials-Recipes/Recipes/Analytics/event-analytics.mdx deleted file mode 100644 index 03be8d2d62f75..0000000000000 --- a/docs/content/Examples-Tutorials-Recipes/Recipes/Analytics/event-analytics.mdx +++ /dev/null @@ -1,616 +0,0 @@ ---- -title: Implementing Event Analytics -permalink: /recipes/event-analytics -category: Examples & Tutorials -subCategory: Analytics -menuOrder: 2 -redirect_from: - - /event-analytics ---- - - - -This functionality only works with data models written in JavaScript, not YAML. -For more information, check out the [Data Modeling Syntax][ref-modeling-syntax] page. - - - -This tutorial walks through how to transform raw event data into sessions. Many -“out-of-box” web analytics solutions come already prepackaged with sessions, but -they work as a “black box.” It doesn’t give the user either insight into or -control how these sessions defined and work. - -With Cube SQL-based sessions data model, you’ll have full control over how these -metrics are defined. It will give you great flexibility when designing sessions -and events to your unique business use case. - -A few question we’ll answer with our sessions data model: - -- How do we measure session duration? -- What is our bounce rate? -- What areas of the app are most used? -- Where are users spending most of their time? -- How do we filter sessions where a user performs a specific action? - -We’ll explore the subject using the data from -[Segment.com](https://segment.com)’s analytics.js library. The same concept -could be applied for different data collection tools, such as -[Snowplow](https://snowplowanalytics.com). - -## What is a session? - -A session is defined as a group of interactions one user takes within a given -time frame on your app. Usually that time frame defaults to 30 minutes, meaning -that whatever a user does on your app (e.g. browses pages, downloads resources, -purchases products) before they leave equals one session. - -
- -
- -## Unify events and page views into single cube - -Segment stores page view data as a `pages` table and events data as a `tracks` -table. For sessions we want to rely not only on page views data, but on events -as well. Imagine you have a highly interactive app, a user loads a page and can -stay on this page interacting with the website for while. Hence, you want to -count events as part of the session as well. - -To do that we need to combine page view data and event data into a single cube. -We’ll call the cube just events and assign a page views event type to -`pageview`. Also, we’re going to assign a unique event_id to every event to use -as primary key. - -```javascript -// Create a cube for events with the following content -cube(`events`, { - sql: ` - SELECT - t.id || '-e' as event_id - , t.anonymous_id as anonymous_id - , t.timestamp - , t.event - , t.context_page_path as page_path - , NULL as referrer - from javascript.tracks as t - - UNION ALL - - SELECT - p.id as event_id - , p.anonymous_id - , p.timestamp - , 'pageview' as event - , p.context_page_path as page_path - , p.referrer as referrer - FROM javascript.pages as p - `, -}); -``` - -The above SQL creates base table for our events cube. Now we can add some -measures to calculate the number of events and number of page views only, using -a filter on `event` column. - -```javascript -cube('events', { - // ..., - - measures: { - count: { - sql: `event_id`, - type: `count` - }, - - page_views_count: { - sql: `event_id`, - type: `count`, - filters: [ - { sql: `${CUBE}.event = 'pageview'` } - ] - } - }, -}); -``` - -Having this in place, we will already be able to calculate the total number of -events and pageviews. Next, we’re going to add dimensions to be able to filter -events in a specific time range and for specific types. - -```javascript -cube('events', { - // ..., - - dimensions: { - anonymous_id: { - sql: `anonymous_id`, - type: `number`, - primary_key: true - }, - - event_id: { - sql: `event_id`, - type: `number`, - primary_key: true - }, - - timestamp: { - sql: `timestamp`, - type: `time` - }, - - event: { - sql: `event`, - type: `string` - } - } -}); -``` - -Now we have everything for Events cube and can move forward to grouping these -events into sessions. - -## Creating Sessions - -As a recap, a session is defined as a group of interactions one user takes -within a given time frame on your app. Usually that time frame defaults to 30 -minutes. First, we’re going to use -[`LAG()` function](https://docs.aws.amazon.com/redshift/latest/dg/r_WF_LAG.html) -in Redshift to determine an inactivity_time between events. - -```sql -select - e.event_id AS event_id - , e.anonymous_id AS anonymous_id - , e.timestamp AS timestamp - , DATEDIFF(minutes, LAG(e.timestamp) OVER(PARTITION BY e.anonymous_id ORDER BY e.timestamp), e.timestamp) AS inactivity_time -FROM events AS e -``` - -`inactivity_time` is the time in minutes between the current event and the -previous. We’re going to use `inactivity_time` to terminate a session based on -30 minutes of inactivity. This window could be changed to any value, based on -how users interact with your app. Now we’re ready to introduce our Sessions -cube. - -```javascript -// Create new cube for sessions with the following content -cube(`sessions`, { - sql: ` - SELECT - ROW_NUMBER() OVER(PARTITION BY event.anonymous_id ORDER BY event.timestamp) || ' - '|| event.anonymous_id AS session_id - , event.anonymous_id - , event.timestamp AS session_start_at - , ROW_NUMBER() OVER(PARTITION BY event.anonymous_id ORDER BY event.timestamp) AS session_sequence - , LEAD(timestamp) OVER(PARTITION BY event.anonymous_id ORDER BY event.timestamp) AS next_session_start_at - FROM - (SELECT - e.anonymous_id - , e.timestamp - , DATEDIFF(minutes, LAG(e.timestamp) OVER(PARTITION BY e.anonymous_id ORDER BY e.timestamp), e.timestamp) AS inactivity_time - FROM ${events.sql()} AS e - ) AS event - WHERE (event.inactivity_time > 30 OR event.inactivity_time IS NULL) - `, -}); -``` - -The SQL query above creates sessions, either where inactivity_time is NULL, -which means it is the first session for the user, or after 30 minutes of -inactivity. - -As a primary key, we’re going to use `session_id`, which is the combination of -the `anonymous_id` and the session sequence, since it’s guaranteed to be unique -for each session. Having this in place, we can already count sessions and plot a -time series chart of sessions. - -```javascript -cube('sessions', { - // ..., - - measures: { - count: { - sql: `session_id`, - type: `count` - } - }, - - dimensions: { - anonymous_id: { - sql: `anonymous_id`, - type: `number`, - primary_key: true - }, - - session_id: { - sql: `session_id`, - type: `number`, - primary_key: true - }, - - start_at: { - sql: `session_start_at`, - type: `time` - }, - - next_start_at: { - sql: `next_session_start_at`, - type: `time` - }, - } -}); -``` - -## Connecting Events to Sessions - -The next step is to identify the events contained within the session and the -events ending the session. It’s required to get metrics such as session duration -and events per session, or to identify sessions where specific events occurred -(we’re going to use that for funnel analysis later on). We’re going to -[declare a join](/schema/reference/joins) such that the `events` cube has a `many_to_one` -relation to the `sessions` cube, and specify a condition, such as all users' events from -session start (inclusive) till the start of the next session (exclusive) belong -to that session. - -```javascript -cube('events', { - // ..., - - joins: { - sessions: { - relationship: `many_to_one`, - sql: ` - ${events.anonymous_id} = ${sessions.anonymous_id} - AND ${events.timestamp} >= ${sessions.start_at} - AND (${events.timestamp} < ${sessions.next_start_at} or ${sessions.next_start_at} is null) - ` - } - } -}); -``` - -To determine the end of the session, we’re going to use the -[`subQuery` feature](/schema/fundamentals/additional-concepts#subquery) in Cube. - -```javascript -cube('events', { - // ..., - - measures: { - last_event_timestamp: { - sql: `timestamp`, - type: `max`, - public: false - } - }, -}); - -cube('sessions', { - // ..., - - dimensions: { - end_raw: { - sql: `${events.last_event_timestamp}`, - type: `time`, - subQuery: true, - public: false - }, - - end_at: { - sql: - `CASE WHEN ${end_raw} + INTERVAL '1 minutes' > ${CUBE}.next_session_start_at - THEN ${CUBE}.next_session_start_at - ELSE ${end_raw} + INTERVAL '30 minutes' - END`, - type: `time` - }, - - duration_minutes: { - sql: `datediff(minutes, ${CUBE}.session_start_at, ${end_at})`, - type: `number` - } - }, - - measures: { - average_duration_minutes: { - type: `avg`, - sql: `${duration_minutes}` - } - }, -}); - -``` - -## Mapping Sessions to Users - -Right now all our sessions are anonymous, so the final step in our modeling -would be to map sessions to users in case, they have signed up and have been -assigned a `user_id`. Segment keeps track of such assignments in a table called -identifies. Every time you identify a user with segment it will connect the -current `anonymous_id` to the identified user id. - -We’re going to create an `identifies` cube, which will not contain any visible -measures and dimensions for users to use in Insights, but instead will provide -us with a `user_id` to use in the **Sessions** cube. Also, `identifies` could be -used later on to join `sessions` to your `users` cube, which could be a cube -built based on your internal database data for users. - -```javascript -// Create a new file for the `identifies` cube with following content -cube(`identifies`, { - sql: `SELECT distinct user_id, anonymous_id FROM javascript.identifies`, - - measures: {}, - - dimensions: { - id: { - sql: `user_id || '-' || anonymous_id`, - type: `string`, - primary_key: true, - }, - - anonymous_id: { - sql: `anonymous_id`, - type: `number`, - }, - - user_id: { - sql: `user_id`, - type: `number`, - format: `id`, - }, - }, -}); -``` - -We need to declare a relationship between `identifies` and `sessions`, where -session has a `many_to_one` relationship with identity. - -```javascript -cube('sessions', { - // ..., - - joins: { - identifies: { - relationship: `many_to_one`, - sql: `${identifies.anonymous_id} = ${sessions.anonymous_id}` - } - }, -}); -``` - -Once we have it, we can create a dimension `user_id`, which will be either a -`user_id` from the identifies table or an `anonymous_id` in case we don’t have -the identity of a visitor, which means that this visitor never signed in. - -```javascript -cube('sessions', { - // ..., - - dimensions: { - user_id: { - sql: `coalesce(${identifies.user_id}, ${CUBE}.anonymous_id)`, - type: `string` - } - } -}); -``` - -Based on the just-created dimension, we can add two new metrics: the count of -users and the average sessions per user. - -```javascript -cube('sessions', { - // ..., - - measures: { - users_count: { - sql: `${user_id}`, - type: `count_distinct` - }, - - average_sessions_per_user: { - sql: `${count}::NUMERIC / NULLIF(${users_count}, 0)`, - type: `number` - } - }, -}); -``` - -That was our final step in building a foundation for a sessions data model. -Congratulations on making it here! Now we’re ready to add some advanced metrics -on top of it. - -## More metrics for Sessions - -### <--{"id" : "More metrics for Sessions"}--> Number of Events per Session - -This one is super easy to add with a subQuery dimension. We just calculate the number -of events, which we already have as a measure in the `events` cube, as a dimension -in the `sessions` cube. - -```javascript -cube('sessions', { - // ..., - - dimensions: { - number_events: { - sql: `${events.count}`, - type: `number`, - subQuery: true - } - }, -}); - -``` - -### <--{"id" : "More metrics for Sessions"}--> Bounce Rate - -A bounced session is usually defined as a session with only one event. Since -we’ve just defined the number of events per session, we can easily add a -dimension `is_bounced` to identify bounced sessions to the Sessions cube. Using -this dimension, we can add two measures to the Sessions cube as well - a count -of bounced sessions and a bounce rate. - -```javascript -cube('sessions', { - // ..., - - dimensions: { - is_bounced: { - type: `string`, - case: { - when: [ { sql: `${number_events} = 1`, label: `True` }], - else: { label: `False` } - } - } - }, - - measures: { - bounced_count: { - sql: `session_id`, - type: `count`, - filters: [{ - sql: `${is_bounced} = 'True'` - }] - }, - - bounce_rate: { - sql: `100.00 * ${bounced_count} / NULLIF(${count}, 0)`, - type: `number`, - format: `percent` - } - }, -}); -``` - -### <--{"id" : "More metrics for Sessions"}--> First Referrer - -We already have this column in place in our base table. We’re just going to -define a dimension on top of this. - -```javascript -cube('sessions', { - // ..., - - measures: { - first_referrer: { - type: `string`, - sql: `first_referrer` - } - }, -}); -``` - -### <--{"id" : "More metrics for Sessions"}--> Sessions New vs Returning - -Same as for the first referrer. We already have a `session_sequence` field in -the base table, which we can use for the `is_first` dimension. If -`session_sequence` is 1 - then it belongs to the first session, otherwise - to a -repeated session. - -```javascript -cube('sessions', { - // ..., - - dimensions: { - is_first: { - type: `string`, - case: { - when: [{ sql: `${CUBE}.session_sequence = 1`, label: `First`}], - else: { label: `Repeat` } - } - } - }, - - measures: { - repeat_count: { - description: `Repeat Sessions Count`, - sql: `session_id`, - type: `count`, - filters: [ - { sql: `${is_first} = 'Repeat'` } - ] - }, - - repeat_percent: { - description: `Percent of Repeat Sessions`, - sql: `100.00 * ${repeat_count} / NULLIF(${count}, 0)`, - type: `number`, - format: `percent` - }, - }, -}); -``` - -### <--{"id" : "More metrics for Sessions"}--> Filter Sessions, where user performs specific event - -Often, you want to select specific sessions where a user performed some -important action. In the example below, we’ll filter out sessions where the -`form_submitted` event happened. To do that, we need to follow 3 steps: - -Define a measure on the Events cube to count only `form_submitted` events. - -```javascript -cube('events', { - // ..., - - // Add this measure to the `events` cube - measures: { - form_submitted_count: { - sql: `event_id`, - type: `count`, - filters: [ - { sql: `${CUBE}.event = 'form_submitted'` } - ] - } - }, -}); -``` - -Define a dimension `form_submitted_count` on the Sessions using subQuery. - -```javascript -cube('sessions', { - // ..., - - // Add this dimension to the `sessions` cube - dimensions: { - form_submitted_count: { - sql: `${events.form_submitted_count}`, - type: `number`, - subQuery: true - } - }, -}); -``` - -Create a measure to count only sessions where `form_submitted_count` is greater -than 0. - -```javascript -cube('sessions', { - // ..., - - // Add this measure to the `sessions` cube - measures: { - with_form_submitted_count: { - type: `count`, - sql: `session_id`, - filters: [ - { sql: `${form_submitted_count} > 0` } - ] - } - - }, -}); -``` - -Now we can use the `with_form_submitted_count` measure to get only sessions when -the `form_submitted` event occurred. - -[ref-modeling-syntax]: /data-modeling/syntax diff --git a/docs/content/Examples-Tutorials-Recipes/Recipes/Analytics/funnels.mdx b/docs/content/Examples-Tutorials-Recipes/Recipes/Analytics/funnels.mdx deleted file mode 100644 index 0b126e60ff576..0000000000000 --- a/docs/content/Examples-Tutorials-Recipes/Recipes/Analytics/funnels.mdx +++ /dev/null @@ -1,346 +0,0 @@ ---- -title: Implementing Funnel Analysis -permalink: /recipes/funnels -category: Examples & Tutorials -subCategory: Analytics -menuOrder: 3 -redirect_from: - - /funnels ---- - - - -This functionality only works with data models written in JavaScript, not YAML. -For more information, check out the [Data Modeling Syntax][ref-modeling-syntax] page. - - - -Funnels represent a series of events that lead users towards a defined goal. -Funnel analysis is an approach commonly used in product, marketing and sales -analytics. - -Regardless of the domain, every funnel has the following traits: - -- The identity of the object moving through the funnel – e.g. user or lead -- A set of steps, through which the object moves -- The date and time of each step -- The time to convert between steps - -Since funnels have a pretty standard structure, they are good candidates for -being extracted into reusable packages. Cube comes pre-packaged with a standard -funnel package. - -```javascript -// First step is to require the Funnel package -const Funnels = require(`Funnels`); - -cube(`PurchaseFunnel`, { - extends: Funnels.eventFunnel({ - userId: { - sql: `user_id`, - }, - time: { - sql: `timestamp`, - }, - steps: [ - { - name: `view_product`, - eventsView: { - sql: `select * from events where event = 'view_product'`, - }, - }, - { - name: `purchase_product`, - eventsView: { - sql: `select * from events where event = 'purchase_product'`, - }, - timeToConvert: '1 day', - }, - ], - }), -}); -``` - -Cube will generate an SQL query for this funnel. Since funnel analysis in SQL is -not straight forward, the SQL code itself is quite complicated, even for such a -small funnel. - - - {' '} - Show Funnel's SQL - - -
- -```sql -SELECT - purchase_funnel.step "purchase_funnel.step", - count(purchase_funnel.user_id) "purchase_funnel.conversions" -FROM - ( - WITH joined_events AS ( - select - view_product_events.user_id view_product_user_id, - purchase_product_events.user_id purchase_product_user_id, - view_product_events.t - FROM - ( - select - user_id user_id, - timestamp t - from - ( - select - * - from - events - where - event = 'view_product' - ) e - ) view_product_events - LEFT JOIN ( - select - user_id user_id, - timestamp t - from - ( - select - * - from - events - where - event = 'purchase_product' - ) e - ) purchase_product_events ON view_product_events.user_id = purchase_product_events.user_id - AND purchase_product_events.t >= view_product_events.t - AND ( - purchase_product_events.t :: timestamptz AT TIME ZONE 'America/Los_Angeles' - ) <= ( - view_product_events.t :: timestamptz AT TIME ZONE 'America/Los_Angeles' - ) + interval '1 day' - ) - select - user_id, - first_step_user_id, - step, - max(t) t - from - ( - SELECT - view_product_user_id user_id, - view_product_user_id first_step_user_id, - t, - 'View Product' step - FROM - joined_events - UNION ALL - SELECT - purchase_product_user_id user_id, - view_product_user_id first_step_user_id, - t, - 'Purchase Product' step - FROM - joined_events - ) as event_steps - GROUP BY - 1, - 2, - 3 - ) AS purchase_funnel -WHERE - ( - purchase_funnel.t >= '2018-07-01T07:00:00Z' :: timestamptz - AND purchase_funnel.t <= '2018-07-31T06:59:59Z' :: timestamptz - ) -GROUP BY - 1 -ORDER BY - 2 DESC -LIMIT - 5000 -``` - -
- -## Funnel parameters - -### <--{"id" : "Funnel parameters"}--> userId - -A unique key to identify the users moving through the funnel. - -```javascript -userId: { - sql: `user_id`; -} -``` - -### <--{"id" : "Funnel parameters"}--> nextStepUserId - -In the situation where `user_id` changes between steps, you can pass a unique -key to join two adjacent steps. For example, if a user signs in after having -been tracked anonymously until that point in the funnel, you could use -`nextStepUserId` to define a funnel where users are tracked by anonymous ID on -the first step and then by an identified user ID on subsequent steps. - -```javascript -const Funnels = require(`Funnels`); - -cube(`OnboardingFunnel`, { - extends: Funnels.eventFunnel({ - userId: { - sql: `id`, - }, - time: { - sql: `timestamp`, - }, - steps: [ - { - name: `View Page`, - eventsView: { - sql: `select anonymous_id as id, timestamp from pages`, - }, - }, - { - name: `Sign Up`, - eventsView: { - sql: `select anonymous_id as id, user_id, timestamp from sign_ups`, - }, - nextStepUserId: { - sql: `user_id`, - }, - timeToConvert: '1 day', - }, - { - name: `Action`, - eventsView: { - sql: `select user_id as id from actions`, - }, - timeToConvert: '1 day', - }, - ], - }), -}); -``` - -### <--{"id" : "Funnel parameters"}--> time - -A timestamp of the event. - -```javascript -time: { - sql: `timestamp`; -} -``` - -### <--{"id" : "Funnel parameters"}--> steps - -An array of steps. Each step has 2 required and 1 optional parameters: - -- **name** _(required)_ - Name of the step. It must be unique within a funnel. -- **eventsView** _(required)_ - Events table for the step. It must contain - `userId` and `time` fields. For example, if we have defined the userId as - `user_id` and time as `timestamp`, we need to have these fields in the table - we're selecting from. -- **timeToConvert** _(optional)_ - A time window during which conversion should - happen. Set it depending on your funnel logic. If this is set to `1 day`, for - instance, it means the funnel will include only users who made a purchase - within 1 day of visiting the product page. - -```javascript -steps: [ - { - name: `purchase_product`, - eventsView: { - sql: `select * from events where event = 'purchase_product'`, - }, - timeToConvert: '1 day', - }, -]; -``` - -## Joining funnels - -In order to provide additional dimensions, funnels can be joined with other -cubes using `user_id` at the first step of a funnel. This will always use a -`many_to_one` relationship, hence you should always join with the corresponding -user cube. Here, by 'user' we understand this to be any entity that can go -through a sequence of steps within funnel. It could be a real web user with an -auto assigned ID or a specific email sent by an email automation that goes -through a typical flow of events like 'sent', 'opened', 'clicked', and so on. -For example, for our `PurchaseFunnel` we can add a join to another funnel as -following: - -```javascript -cube(`PurchaseFunnel`, { - joins: { - Users: { - relationship: `many_to_one`, - sql: `${CUBE}.first_step_user_id = ${Users.id}`, - }, - }, - - extends: Funnels.eventFunnel({ - // ... - }), -}); -``` - -## Using funnels - -Cube is based on -[multidimensional analysis](https://en.wikipedia.org/wiki/Multidimensional_analysis) - -Funnel-based cubes have the following structure: - -### <--{"id" : "Using funnels"}--> Measures - -- **conversions** - Count of conversions in the funnel. The most useful when - broken down by **steps**. It's the classic funnel view. -- **conversionsPercent** - Percentage of conversions. It is useful when you want - to inspect a specific step, or set of steps, and find out how a conversion has - changed over time. - -### <--{"id" : "Using funnels"}--> Dimensions - -- **step** - Describes funnels' steps. Use it to break down **conversions** or - **conversionsPercent** by steps, or to filter for a specific step. -- **time** - time dimension for the funnel. Use it to filter your analysis for - specific dates or to analyze how conversion changes over time. - -In the following example, we use the `conversions` measure along with the -`steps` dimension to display a classic bar chart showing the funnel's steps. - - - -## Performance considerations - -Funnel joins are extremely heavy for most modern databases and complexity grows -in a non-linear way with the addition of steps. However, if the cardinality of -the first event isn't too high, very simple optimization can be applied: -[`originalSql` pre-aggregation][ref-schema-ref-preaggs-origsql]. - -It is best to use [partitioned rollups][ref-partitioned-rollups] to cache the -steps instead. Add one to the `PurchaseFunnel` cube as follows: - -```javascript -cube(`PurchaseFunnel`, { - extends: Funnels.eventFunnel({ - // ... - }), - - preAggregations: { - main: { - type: `originalSql`, - }, - }, -}); -``` - -[ref-modeling-syntax]: /data-modeling/syntax -[ref-partitioned-rollups]: /caching/using-pre-aggregations#time-partitioning -[ref-schema-ref-preaggs-origsql]: - /schema/reference/pre-aggregations#type-originalsql diff --git a/docs/content/Examples-Tutorials-Recipes/Recipes/Auth/AWS-Cognito.mdx b/docs/content/Examples-Tutorials-Recipes/Recipes/Auth/AWS-Cognito.mdx deleted file mode 100644 index e6d0d729e9c6a..0000000000000 --- a/docs/content/Examples-Tutorials-Recipes/Recipes/Auth/AWS-Cognito.mdx +++ /dev/null @@ -1,197 +0,0 @@ ---- -title: Authenticate requests to Cube with AWS Cognito -permalink: /recipes/authn-with-aws-cognito -category: Examples & Tutorials -subCategory: Authentication & Authorization -menuOrder: 12 -redirect_from: - - /security/jwt/aws-cognito ---- - -## Introduction - -In this guide, you'll learn how to integrate AWS Cognito authentication with a -Cube deployment. If you already have a pre-existing Cognito User Pool in AWS -that you'd like to re-use, please skip ahead to -[Configure Cube](#configure-cube). - -## Create and configure a User Pool - -If you haven't already created a User Pool, please follow [the instructions in -the AWS Cognito documentation][link-aws-cognito-hosted-ui] to create one, along -with enabling the Hosted UI. - -### <--{"id" : "Create and configure a User Pool"}--> Custom claims - -To add custom claims to the JWT, you will need to associate [a Lambda -function][link-aws-lambda] to the [Pre Token Generation event -trigger][link-aws-cognito-pre-token] available on your User Pool. - -First, go to the AWS Lambda Console and create new a Lambda function: - -
- -
- -Add the following code to the Lambda function: - -```javascript -exports.handler = (event, context, callback) => { - event.response = { - claimsOverrideDetails: { - claimsToAddOrOverride: { - 'http://localhost:4000/': JSON.stringify({ - company_id: 'company1', - user_id: event.request.userAttributes.sub, - roles: ['user'], - }), - }, - }, - }; - callback(null, event); -}; -``` - -Then navigate to the Amazon Cognito User Pools Console, select Triggers from the -left sidebar and associate the Lambda function you created previously: - -
- -
- -You can find more examples of [modifying claims in JWTs -here][link-aws-cognito-pretoken-example]. - -## Configure Cube - -Now we're ready to configure Cube to use AWS Cognito. Go to your Cube project -and open the `.env` file and add the following, replacing the values wrapped in -`<>`. - -```dotenv -CUBEJS_JWK_URL=https://cognito-idp..amazonaws.com//.well-known/jwks.json -CUBEJS_JWT_AUDIENCE= -CUBEJS_JWT_ISSUER=https://cognito-idp..amazonaws.com/ -CUBEJS_JWT_ALGS=RS256 -CUBEJS_JWT_CLAIMS_NAMESPACE= -``` - -## Testing with the Developer Playground - -### <--{"id" : "Testing with the Developer Playground"}--> Retrieving a JWT - -Go to the [OpenID Playground from Auth0][link-openid-playground] to and click -Configuration. - -
- -
- -Change the Server Template to Custom, and enter the following values: - -
- -
- -- **Discovery Document URL**: - `https://cognito-idp..amazonaws.com//.well-known/openid-configuration` -- **OIDC Client ID**: Retrieve from App Client settings page in AWS Cognito User - Pool Console -- **OIDC Client Secret**: Retrieve from App Client settings page in AWS Cognito - User Pool Console - -Click 'Use Discovery Document' to auto-fill the remaining values, then click -Save. - - - -If you haven't already, go back to the AWS Cognito App Client's settings and add -`https://openidconnect.net/callback` to the list of allowed callback URLs. - - - -Now click Start; and in a separate tab, go to the App Client's settings page and -click the Launch Hosted UI button. - -
- -
- -If the login is successful, you should be redirected to the OpenID Connect -Playground. Click on the Exchange button to exchange the code for your tokens: - -
- -
- -Click Next, and continue on to the next section and click the Verify button to -verify the JWT signature as well as decode the identity token: - -
- -
- -### <--{"id" : "Testing with the Developer Playground"}--> Set JWT in Developer Playground - -Now open the Developer Playground (at `http://localhost:4000`) and on the Build -page, click Add Security Context. - -
- -
- -Click the Token tab, paste the `id_token` from OpenID Playground and click the -Save button. - -
- -
- -Close the popup and use the Developer Playground to make a request. Any data -models using the [Security Context][ref-sec-ctx] should now work as expected. - -[link-aws-cognito-hosted-ui]: - https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-app-integration.html#cognito-user-pools-create-an-app-integration -[link-aws-cognito-pre-token]: - https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-pre-token-generation.html -[link-aws-cognito-pretoken-example]: - https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-pre-token-generation.html#aws-lambda-triggers-pre-token-generation-example-1 -[link-aws-lambda]: https://docs.aws.amazon.com/lambda/latest/dg/welcome.html -[link-openid-playground]: https://openidconnect.net/ -[ref-sec-ctx]: /security/context diff --git a/docs/content/Examples-Tutorials-Recipes/Recipes/Auth/Auth0-Guide.mdx b/docs/content/Examples-Tutorials-Recipes/Recipes/Auth/Auth0-Guide.mdx deleted file mode 100644 index 7e1223d2ac259..0000000000000 --- a/docs/content/Examples-Tutorials-Recipes/Recipes/Auth/Auth0-Guide.mdx +++ /dev/null @@ -1,254 +0,0 @@ ---- -title: Authenticate requests to Cube with Auth0 -permalink: /recipes/authn-with-auth0 -category: Examples & Tutorials -subCategory: Authentication & Authorization -menuOrder: 12 -redirect_from: - - /security/jwt/auth0 ---- - -## Introduction - -In this guide, you'll learn how to integrate Auth0 authentication with a Cube -deployment. If you already have a pre-existing application on Auth0 that you'd -like to re-use, please skip ahead to [Configure Cube][ref-config-auth0]. - -We'll be creating an Auth0 [application][link-auth0-docs-app] and -[API][link-auth0-docs-api], configuring a [rule on Auth0][link-auth0-docs-rules] -to add custom claims to vended JWTs, and finally configuring Cube to use Auth0. - -## Create an application - -First, go to the [Auth0 dashboard][link-auth0-app], and click on the -Applications menu option on the left and then click the Create Application -button. - -
- -
- -In the popup, set the name of your application and select Single Page Web -Applications. - -
- -
- -Next, go to the application's settings and add the appropriate callback URLs for -your application (`http://localhost:4000` for the Developer Playground). - -### <--{"id" : "Create an application"}--> Custom claims - -You can also configure custom claims for your JWT token. Auth0 has two SDKs -available; [Auth0.js][link-auth0-js] and the [Auth0 SPA -SDK][link-auth0-spa-sdk]. We recommend using the SPA SDK wherever possible, [as -per Auth0's own developer advice][gh-auth0-spa-sdk-issue34]. If you're using -`@auth0/auth0-angular` or `@auth0/auth0-react`, then the SPA SDK is -automatically included. - -Open the Auth0 dashboard, click on 'Rules' and add a rule to add any custom -claims to the JWT. - -#### Auth0 SPA SDK - - - -Take note of the value of `namespace` here, you will need it later to [configure -Cube][ref-config-auth0]. - - - -```javascript -function (user, context, callback) { - const namespace = "http://localhost:4000/"; - context.accessToken[namespace] = - { - 'company_id': 'company1', - 'user_id': user.user_id, - 'roles': ['user'], - }; - callback(null, user, context); -} -``` - -## Create an API - -If you're using the Auth0 SPA SDK, you'll also need to [create an -API][link-auth0-api]. First, go to the [Auth0 dashboard][link-auth0-app] and -click on the APIs menu option from the left sidebar, then click the Create API -button. - -
- -
- -In the 'New API' popup, set a name for this API and an identifier (e.g. -`cubejs-app`), then click the Create button. - -
- -
- - - -Take note of the Identifier here, as it is used to [set the JWT Audience option -in Cube][ref-config-auth0]. - - - -In your application code, configure your API identifier as the audience when -initializing Auth0. If you're using the `@auth0/auth-react` package for your -application front-end, this might look something like this: - -```tsx - {}} - audience="cubejs" -> -``` - -Refer to Auth0's documentation for instructions on configuring -[Angular][link-auth0-angular] or [Vue][link-auth0-vue] applications. - -## Configure Cube - -Now we're ready to configure Cube to use Auth0. Go to your Cube project and open -the `.env` file and add the following, replacing the values wrapped in `<>`. - -```dotenv -CUBEJS_JWK_URL=https://.auth0.com/.well-known/jwks.json -CUBEJS_JWT_AUDIENCE= -CUBEJS_JWT_ISSUER=https://.auth0.com/ -CUBEJS_JWT_ALGS=RS256 -CUBEJS_JWT_CLAIMS_NAMESPACE= -``` - -## Testing with the Developer Playground - -### <--{"id" : "Testing with the Developer Playground"}--> Retrieving a JWT - -Go to the [OpenID Playground from Auth0][link-openid-playground] to and click -Configuration. - -
- -
- -Enter the following values: - -- **Auth0 domain**: `.auth0.com` -- **OIDC Client ID**: Retrieve from Auth0 Application settings page -- **OIDC Client Secret**: Retrieve from Auth0 Application settings page -- **Audience**: Retrieve from Auth0 API settings - -Click 'Use Auth0 Discovery Document' to auto-fill the remaining values, then -click Save. - -
- -
- - - -If you haven't already, go back to the Auth0 application's settings and add -`https://openidconnect.net/callback` to the list of allowed callback URLs. - - - -Now click Start; if the login is successful, you should see the code, as well as -a button called 'Exchange'. Click on it to exchange the code for your tokens: - -
- -
- -Copy the `access_token` from the response, and use the [JWT.IO -Debugger][link-jwt-io-debug] to decode the token and verify any custom claims -were successfully added. - -### <--{"id" : "Testing with the Developer Playground"}--> Set JWT in Developer Playground - -Now open the Developer Playground (at `http://localhost:4000`) and on the Build -page, click Add Security Context. - -
- -
- -Click the Token tab, paste the JWT from OpenID Playground and click the Save -button. - -
- -
- -Close the popup and use the Developer Playground to make a request. Any data -models using the [Security Context][ref-sec-ctx] should now work as expected. - -## Example - -To help you get up and running, we have [an example project which is configured -to use Auth0][gh-cubejs-auth0-example]. You can use it as a starting point for -your own Cube application. You can also use our [Multi-Tenant Analytics with -Auth0 and Cube guide][link-multitenant-auth0-guide] for a more detailed -walkthrough. - -[link-auth0-angular]: https://auth0.com/docs/quickstart/spa/angular/01-login -[link-auth0-vue]: https://auth0.com/docs/quickstart/spa/vuejs/01-login -[link-auth0-docs-app]: https://auth0.com/docs/applications -[link-auth0-docs-api]: https://auth0.com/docs/get-started/set-up-apis -[link-auth0-docs-rules]: https://auth0.com/docs/rules -[gh-auth0-spa-sdk-issue34]: - https://github.com/auth0/auth0-spa-js/issues/34#issuecomment-505420895 -[link-auth0-app]: https://manage.auth0.com/ -[link-auth0-js]: https://auth0.com/docs/libraries/auth0js -[link-auth0-spa-sdk]: https://auth0.com/docs/libraries/auth0-spa-js -[link-auth0-api]: - https://auth0.com/docs/tokens/access-tokens#json-web-token-access-tokens -[link-jwt-io-debug]: https://jwt.io/#debugger-io -[link-openid-playground]: https://openidconnect.net/ -[ref-config-auth0]: #configure-cube -[ref-sec-ctx]: /security/context -[gh-cubejs-auth0-example]: - https://github.com/cube-js/cube/tree/master/examples/auth0 -[link-multitenant-auth0-guide]: https://multi-tenant-analytics.cube.dev/ diff --git a/docs/content/Examples-Tutorials-Recipes/Recipes/Code-reusability/schema-generation.mdx b/docs/content/Examples-Tutorials-Recipes/Recipes/Code-reusability/schema-generation.mdx deleted file mode 100644 index 55c4148ba14fa..0000000000000 --- a/docs/content/Examples-Tutorials-Recipes/Recipes/Code-reusability/schema-generation.mdx +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: Implementing Schema Generation -permalink: /recipes/schema-generation -category: Examples & Tutorials -subCategory: Code reusability -menuOrder: 10 -redirect_from: - - /schema-generation ---- - - - -This functionality only works with data models written in JavaScript, not YAML. -For more information, check out the [Data Modeling Syntax][ref-modeling-syntax] page. - - - -Cube supports two ways to define data model files: with YAML or JavaScript -syntax. If you opt for JavaScript syntax, you can use the full power of this -programming language to configure your data model. In this guide we generate -several measure definitions based on an array of strings. - -One example, based on a real world scenario, is when you have a single `events` -table containing an `event_type` and `user_id` column. Based on this table you -want to create a separate user count measure for each event. - -It can be done as simple as - -```javascript -const events = ['app_engagement', 'login', 'purchase']; - -cube(`events`, { - sql_table: `events`, - - measures: Object.assign( - { - count: { - type: `count`, - }, - }, - events - .map((e) => ({ - [`${e}_user_count`]: { - type: `count_distinct`, - sql: `user_id`, - filters: [ - { - sql: `${CUBE}.event_type = '${e}'`, - }, - ], - }, - })) - .reduce((a, b) => Object.assign(a, b)) - ), -}); -``` - -In this case we use standard Javascript functions -[`Object.assign`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object/assign), -[`Array.map`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/map) -and -[`Array.reduce`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/reduce) -to add user count measure definitions based on `events` array. This approach -allows you to maintain list of events in very concise manner without boilerplate -code. This configuration can be reused using -[export / import feature][ref-export-import]. - -Please refer to the -[`asyncModule()`](/schema/reference/execution-environment#async-module) -documentation to learn how to use databases and other data sources for data -model generation. - -[ref-modeling-syntax]: /data-modeling/syntax -[ref-export-import]: /schema/advanced/export-import diff --git a/docs/content/Examples-Tutorials-Recipes/Recipes/Data-modeling/dynamic-union-tables.mdx b/docs/content/Examples-Tutorials-Recipes/Recipes/Data-modeling/dynamic-union-tables.mdx deleted file mode 100644 index e31f34d76c507..0000000000000 --- a/docs/content/Examples-Tutorials-Recipes/Recipes/Data-modeling/dynamic-union-tables.mdx +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: Using Dynamic Union Tables -permalink: /recipes/dynamically-union-tables -category: Examples & Tutorials -subCategory: Data modeling -menuOrder: 4 -redirect_from: - - /dynamically-union-tables ---- - - - -This functionality only works with data models written in JavaScript, not YAML. -For more information, check out the [Data Modeling Syntax][ref-modeling-syntax] page. - - - -It is quite often the case that you may have a lot of different tables in a -database, which actually relate to the same entity. - -For example, you can have “per client” tables with the same data, but related to -different customers: `elon_musk_table`, `john_doe_table`, `steve_jobs_table`, -etc. In this case, it would make sense to create a **single** Cube for -customers, which should be backed by a union table from all customers tables. - -It would be annoying to union all required tables manually. Luckily, since Cube -supports modelling data in JavaScript, we have the full power of JavaScript at our disposal. -We can write a function, which will generate a union table from **all** our -customers’ tables: - -```javascript -// model/utils.js -const customerTableNames = [ - { name: 'Albert Einstein', tablePrefix: 'albert_einstein' }, - { name: 'Blaise Pascal', tablePrefix: 'blaise_pascal' }, - { name: 'Isaac Newton', tablePrefix: 'isaac_newton' }, - { name: 'Charles Darwin', tablePrefix: 'charles_darwin' }, - { name: 'Michael Faraday', tablePrefix: 'michael_faraday' }, - { name: 'Enrico Fermi', tablePrefix: 'enrico_fermi' }, - { name: 'Thomas Edison', tablePrefix: 'thomas_edison' }, -]; - -export function unionData() { - return customerTableNames - .map( - (p) => `select - name, - email, - id, - order_id, - created_at, - '${p.name}' customer_name - from ${p.tablePrefix}_customer - ` - ) - .join(' UNION ALL '); -} -``` - -Then we can use the `unionData()` function inside the `Customers` cube. -`customer_name` would become a dimension to allow us to break down the data by -certain customers. - -```javascript -import { unionData } from '../utils'; - -cube(`customers`, { - sql: unionData(), - - measures: { - count: { - type: `count`, - }, - }, - - dimensions: { - customer_name: { - sql: `customer_name`, - type: `string`, - }, - }, -}); -``` - -[ref-modeling-syntax]: /data-modeling/syntax diff --git a/docs/content/Examples-Tutorials-Recipes/Recipes/Data-modeling/entity-attribute-value.mdx b/docs/content/Examples-Tutorials-Recipes/Recipes/Data-modeling/entity-attribute-value.mdx deleted file mode 100644 index 508ee6934d5a2..0000000000000 --- a/docs/content/Examples-Tutorials-Recipes/Recipes/Data-modeling/entity-attribute-value.mdx +++ /dev/null @@ -1,341 +0,0 @@ ---- -title: Implementing Entity-Attribute-Value Model (EAV) -permalink: /recipes/entity-attribute-value -category: Examples & Tutorials -subCategory: Data modeling -menuOrder: 4 ---- - -## Use case - -We want to create a cube for a dataset which uses the -[Entity-Attribute-Value](https://en.wikipedia.org/wiki/Entity–attribute–value_model) -model (EAV). It stores entities in a table that can be joined to another table -with numerous attribute-value pairs. Each entity is not guaranteed to have the -same set of associated attributes, thus making the entity-attribute-value -relation a sparse matrix. In the cube, we'd like every attribute to be modeled -as a dimension. - -## Data modeling - -Let's explore the `users` cube that contains the entities: - -```javascript -cube(`users`, { - sql_table: `users`, - - joins: { - orders: { - relationship: 'one_to_many', - sql: `${CUBE}.id = ${orders.user_id}`, - }, - }, - - dimensions: { - name: { - sql: `first_name || ' ' || last_name`, - type: `string`, - }, - }, -}); -``` - -The `users` cube is joined with the `orders` cube to reflect that there might be -many orders associated with a single user. The orders remain in various -statuses, as reflected by the `status` dimension, and their creation dates are -available via the `created_at` dimension: - -```javascript -cube(`orders`, { - sql_table: `orders`, - - dimensions: { - user_id: { - sql: `user_id`, - type: `string`, - }, - - status: { - sql: `status`, - type: `string`, - }, - - created_at: { - sql: `created_at`, - type: `time`, - }, - }, -}); -``` - -Currently, the dataset contains orders in the following statuses: - -```javascript -[ - { - 'orders.status': 'completed', - }, - { - 'orders.status': 'processing', - }, - { - 'orders.status': 'shipped', - }, -]; -``` - -Let's say that we'd like to know, for each user, the earliest creation date for -their orders in any of these statuses. In terms of the EAV model: - -- the users serve as _entities_ and they should be modeled with a _cube_ -- order statuses serve as _attributes_ and they should be modeled as - _dimensions_ -- the earliest creation dates for each status serve as attribute _values_ and - they will be modeled as _dimension values_ - -Let's explore some possible ways to model that. - -### <--{"id" : "Data modeling"}--> Static attributes - -We already know that the following statuses are present in the dataset: -`completed`, `processing`, and `shipped`. Let's assume this set of statuses is -not going to change often. - -Then, modeling the cube is as simple as defining a few joins (one join per -attribute): - -```javascript -cube(`users_statuses_joins`, { - sql: ` - SELECT - users.first_name, - users.last_name, - MIN(cOrders.created_at) AS cCreatedAt, - MIN(pOrders.created_at) AS pCreatedAt, - MIN(sOrders.created_at) AS sCreatedAt - FROM public.users AS users - LEFT JOIN public.orders AS cOrders - ON users.id = cOrders.user_id AND cOrders.status = 'completed' - LEFT JOIN public.orders AS pOrders - ON users.id = pOrders.user_id AND pOrders.status = 'processing' - LEFT JOIN public.orders AS sOrders - ON users.id = sOrders.user_id AND sOrders.status = 'shipped' - GROUP BY 1, 2 - `, - - dimensions: { - name: { - sql: `first_name || ' ' || last_name`, - type: `string`, - }, - - completed_created_at: { - sql: `cCreatedAt`, - type: `time`, - }, - - processing_created_at: { - sql: `pCreatedAt`, - type: `time`, - }, - - shipped_created_at: { - sql: `sCreatedAt`, - type: `time`, - }, - }, -}); -``` - -Querying the cube would yield data like this. As we can see, every user has -attributes that show the earliest creation date for their orders in all three -statuses. However, some attributes don't have values (meaning that a user -doesn't have orders in this status). - -```javascript -[ - { - 'users_statuses_joins.name': 'Ally Blanda', - 'users_statuses_joins.completed_created_at': '2019-03-05T00:00:00.000', - 'users_statuses_joins.processing_created_at': null, - 'users_statuses_joins.shipped_created_at': '2019-04-06T00:00:00.000', - }, - { - 'users_statuses_joins.name': 'Cayla Mayert', - 'users_statuses_joins.completed_created_at': '2019-06-14T00:00:00.000', - 'users_statuses_joins.processing_created_at': '2021-05-20T00:00:00.000', - 'users_statuses_joins.shipped_created_at': null, - }, - { - 'users_statuses_joins.name': 'Concepcion Maggio', - 'users_statuses_joins.completed_created_at': null, - 'users_statuses_joins.processing_created_at': '2020-07-14T00:00:00.000', - 'users_statuses_joins.shipped_created_at': '2019-07-19T00:00:00.000', - }, -]; -``` - -The drawback is that when the set of statuses changes, we'll need to amend the -cube definition in several places: update selected values and joins in SQL as -well as update the dimensions. Let's see how to work around that. - -### <--{"id" : "Data modeling"}--> Static attributes, DRY version - -We can embrace the -[Don't Repeat Yourself](https://en.wikipedia.org/wiki/Don%27t_repeat_yourself) -principle and eliminate the repetition by generating the cube definition -dynamically based on the list of statuses. Let's create a new JavaScript model -so we can move all repeated code patterns into handy functions and iterate over -statuses in relevant parts of the cube's code. - -```javascript -const statuses = ['completed', 'processing', 'shipped']; - -const createValue = (status, index) => - `MIN(orders_${index}.created_at) AS created_at_${index}`; - -const createJoin = (status, index) => - `LEFT JOIN public.orders AS orders_${index} - ON users.id = orders_${index}.user_id - AND orders_${index}.status = '${status}'`; - -const createDimension = (status, index) => ({ - [`${status}_created_at`]: { - sql: (CUBE) => `created_at_${index}`, - type: `time`, - }, -}); - -cube(`users_statuses_DRY`, { - sql: ` - SELECT - users.first_name, - users.last_name, - ${statuses.map(createValue).join(',')} - FROM public.users AS users - ${statuses.map(createJoin).join('')} - GROUP BY 1, 2 - `, - - dimensions: Object.assign( - { - name: { - sql: `first_name || ' ' || last_name`, - type: `string`, - }, - }, - statuses.reduce( - (all, status, index) => ({ - ...all, - ...createDimension(status, index), - }), - {} - ) - ), -}); -``` - -The new `users_statuses_DRY` cube is functionally identical to the -`users_statuses_joins` cube above. Querying this new cube would yield the same -data. However, there's still a static list of statuses present in the cube's -source code. Let's work around that next. - -### <--{"id" : "Data modeling"}--> Dynamic attributes - -We can eliminate the list of statuses from the cube's code by loading this list -from an external source, e.g., the data source. Here's the code from the -`fetch.js` file that defines the `fetchStatuses` function that would load the -statuses from the database. Note that it uses the `pg` package (Node.js client -for Postgres) and reuses the credentials from Cube. - -```javascript -const { Pool } = require('pg'); - -const pool = new Pool({ - host: process.env.CUBEJS_DB_HOST, - port: process.env.CUBEJS_DB_PORT, - user: process.env.CUBEJS_DB_USER, - password: process.env.CUBEJS_DB_PASS, - database: process.env.CUBEJS_DB_NAME, -}); - -const statusesQuery = ` - SELECT DISTINCT status - FROM public.orders -`; - -exports.fetchStatuses = async () => { - const client = await pool.connect(); - const result = await client.query(statusesQuery); - client.release(); - - return result.rows.map((row) => row.status); -}; -``` - -In the cube file, we will use the `fetchStatuses` function to load the list of -statuses. We will also wrap the cube definition with the `asyncModule` built-in -function that allows the data model to be created -[dynamically](https://cube.dev/docs/schema/advanced/dynamic-schema-creation). - -```javascript -const fetchStatuses = require('../fetch').fetchStatuses; - -asyncModule(async () => { - const statuses = await fetchStatuses(); - - const createValue = (status, index) => - `MIN(orders_${index}.created_at) AS created_at_${index}`; - - const createJoin = (status, index) => - `LEFT JOIN public.orders AS orders_${index} - ON users.id = orders_${index}.user_id - AND orders_${index}.status = '${status}'`; - - const createDimension = (status, index) => ({ - [`${status}_created_at`]: { - sql: (CUBE) => `created_at_${index}`, - type: `time`, - }, - }); - - cube(`users_statuses_dynamic`, { - sql: ` - SELECT - users.first_name, - users.last_name, - ${statuses.map(createValue).join(',')} - FROM public.users AS users - ${statuses.map(createJoin).join('')} - GROUP BY 1, 2 - `, - - dimensions: Object.assign( - { - name: { - sql: `first_name || ' ' || last_name`, - type: `string`, - }, - }, - statuses.reduce( - (all, status, index) => ({ - ...all, - ...createDimension(status, index), - }), - {} - ) - ), - }); -}); -``` - -Again, the new `users_statuses_dynamic` cube is functionally identical to the -previously created cubes. So, querying this new cube would yield the same data -too. - -## Source code - -Please feel free to check out the -[full source code](https://github.com/cube-js/cube/tree/master/examples/recipes/entity-attribute-value) -or run it with the `docker-compose up` command. You'll see the result, including -queried data, in the console. diff --git a/docs/content/Examples-Tutorials-Recipes/Recipes/Data-modeling/passing-dynamic-parameters-in-a-query.mdx b/docs/content/Examples-Tutorials-Recipes/Recipes/Data-modeling/passing-dynamic-parameters-in-a-query.mdx deleted file mode 100644 index 066420bee97dc..0000000000000 --- a/docs/content/Examples-Tutorials-Recipes/Recipes/Data-modeling/passing-dynamic-parameters-in-a-query.mdx +++ /dev/null @@ -1,198 +0,0 @@ ---- -title: Passing Dynamic Parameters in a Query -permalink: /recipes/passing-dynamic-parameters-in-a-query -category: Examples & Tutorials -subCategory: Data modeling -menuOrder: 4 ---- - -## Use case - -We want to know the ratio between the number of people in a particular city and -the total number of women in the country. The user can specify the city for the -filter. The trick is to get the value of the city from the user and use it in -the calculation. In the recipe below, we can learn how to join the data table -with itself and reshape the dataset! - -## Data modeling - -Let's explore the `users` cube data that contains various information about -users, including city and gender: - -| id | city | gender | name | -| --- | -------- | ------ | --------------- | -| 1 | Seattle | female | Wendell Hamill | -| 2 | Chicago | male | Rahsaan Collins | -| 3 | New York | female | Megane O'Kon | -| ... | ... | ... | ... | - -To calculate the ratio between the number of women in a particular city and the -total number of people in the country, we need to define three measures. One of -them can receive the city value from the filter in a query. Cube will apply this -filter via the `WHERE` clause to the dataset. So, we need to reshape the dataset -so that applying this filter wouldn’t affect the calculations. In this use case, -we can join the data table with itself to multiply the `city` column — applying -the filter would remove the multiplication while still allowing to access the -filter value: - - - -```yaml -cubes: - - name: users - sql: > - WITH data AS ( - SELECT - users.id AS id, - users.city AS city, - users.gender AS gender - FROM public.users - ), - cities AS ( - SELECT city - FROM data - ), - grouped AS ( - SELECT - cities.city AS city_filter, - data.id AS id, - data.city AS city, - data.gender AS gender - FROM cities, data - GROUP BY 1, 2, 3, 4 - ) - SELECT * - FROM grouped - - measures: - - name: total_number_of_women - sql: id - type: count - filters: - - sql: "gender = 'female'" - - - name: number_of_people_of_any_gender_in_the_city: - sql: id - type: count - filters: - - sql: "city = city_filter" - - - name: ratio - title: Ratio Women in the City to Total Number of People - sql: > - 1.0 * {number_of_people_of_any_gender_in_the_city} / - {total_number_of_women} - type: number - - dimensions: - - name: city_filter - sql: city_filter - type: string -``` - -```javascript -cube(`users`, { - sql: ` - WITH data AS ( - SELECT - users.id AS id, - users.city AS city, - users.gender AS gender - FROM public.users - ), - - cities AS ( - SELECT city - FROM data - ), - - grouped AS ( - SELECT - cities.city AS city_filter, - data.id AS id, - data.city AS city, - data.gender AS gender - FROM cities, data - GROUP BY 1, 2, 3, 4 - ) - - SELECT * - FROM grouped - `, - - measures: { - total_number_of_women: { - sql: 'id', - type: 'count', - filters: [{ sql: `${CUBE}.gender = 'female'` }], - }, - - number_of_people_of_any_gender_in_the_city: { - sql: 'id', - type: 'count', - filters: [{ sql: `${CUBE}.city = ${CUBE}.city_filter` }], - }, - - ratio: { - title: 'Ratio Women in the City to Total Number of People', - sql: ` - 1.0 * ${CUBE.number_of_people_of_any_gender_in_the_city} / - ${CUBE.total_number_of_women}`, - type: `number`, - }, - }, - - dimensions: { - city_filter: { - sql: `city_filter`, - type: `string`, - }, - }, -}); -``` - - - -## Query - -To get the ratio result depending on the city, we need to pass the value via a -filter in the query: - -```json -{ - "measures": [ - "users.total_number_of_women", - "users.number_of_people_of_any_gender_in_the_city", - "users.ratio" - ], - "filters": [ - { - "member": "users.city_filter", - "operator": "equals", - "values": ["Seattle"] - } - ] -} -``` - -## Result - -By joining the data table with itself and using the dimensions defined above, we -can get the ratio we wanted to achieve: - -```json -[ - { - "users.total_number_of_women": "259", - "users.number_of_people_of_any_gender_in_the_city": "99", - "users.ratio": "0.38223938223938223938" - } -] -``` - -## Source code - -Please feel free to check out the -[full source code](https://github.com/cube-js/cube/tree/master/examples/recipes/passing-dynamic-parameters-in-query) -or run it with the `docker-compose up` command. You'll see the result, including -queried data, in the console. diff --git a/docs/content/Examples-Tutorials-Recipes/Recipes/Data-modeling/percentiles.mdx b/docs/content/Examples-Tutorials-Recipes/Recipes/Data-modeling/percentiles.mdx deleted file mode 100644 index 1ca32639bb056..0000000000000 --- a/docs/content/Examples-Tutorials-Recipes/Recipes/Data-modeling/percentiles.mdx +++ /dev/null @@ -1,133 +0,0 @@ ---- -title: Calculating Average and Percentiles -permalink: /recipes/percentiles -category: Examples & Tutorials -subCategory: Data modeling -menuOrder: 4 ---- - -## Use case - -We want to understand the distribution of values for a certain numeric property -within a dataset. We're used to average values and intuitively understand how to -calculate them. However, we also know that average values can be misleading for -[skewed](https://en.wikipedia.org/wiki/Skewness) distributions which are common -in the real world: for example, 2.5 is the average value for both `(1, 2, 3, 4)` -and `(0, 0, 0, 10)`. - -So, it's usually better to use -[percentiles](https://en.wikipedia.org/wiki/Percentile). Parameterized by a -fractional number `n = 0..1`, where the n-th percentile is equal to a value that -exceeds a specified ratio of values in the distribution. The -[median](https://en.wikipedia.org/wiki/Median) is a special case: it's defined -as the 50th percentile (`n = 0.5`), and it can be casually thought of as "the -middle" value. 2.5 and 0 are the medians of `(1, 2, 3, 4)` and `(0, 0, 0, 10)`, -respectively. - -## Data modeling - -Let's explore the data in the `users` cube that contains various demographic -information about users, including their age: - -```javascript -[ - { - 'users.name': 'Abbott, Breanne', - 'users.age': 52, - }, - { - 'users.name': 'Abbott, Dallas', - 'users.age': 43, - }, - { - 'users.name': 'Abbott, Gia', - 'users.age': 36, - }, - { - 'users.name': 'Abbott, Tom', - 'users.age': 39, - }, - { - 'users.name': 'Abbott, Ward', - 'users.age': 67, - }, -]; -``` - -Calculating the average age is as simple as defining a measure with the built-in -[`avg` type](https://cube.dev/docs/schema/reference/types-and-formats#measures-types-avg). -Calculating the percentiles would require using database-specific functions. -However, almost every database has them under names of `PERCENTILE_CONT` and -`PERCENTILE_DISC`, -[Postgres](https://www.postgresql.org/docs/current/functions-aggregate.html) and -[BigQuery](https://cloud.google.com/bigquery/docs/reference/standard-sql/functions-and-operators#aggregate_functions) -included. - - - -```yaml -cubes: - - name: users - # ... - - measures: - - name: avg_age - type: avg - sql: age - - - name: median_age - type: number - sql: PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY age) - - - name: p95_age - type: number - sql: PERCENTILE_CONT(0.95) WITHIN GROUP (ORDER BY age) -``` - -```javascript -cube("users", { - measures: { - avg_age: { - sql: `age`, - type: `avg`, - }, - - median_age: { - sql: `PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY age)`, - type: `number`, - }, - - p95_age: { - sql: `PERCENTILE_CONT(0.95) WITHIN GROUP (ORDER BY age)`, - type: `number`, - }, - }, -}); -``` - - - -## Result - -Using the measures defined above, we can explore statistics about the age of our -users. - -```json -[ - { - "users.avg_age": "52.3100000000000000", - "users.median_age": 53, - "users.p95_age": 82 - } -] -``` - -For this particular dataset, the average age closely matches the median age, and -95% of all users are younger than 82 years. - -## Source code - -Please feel free to check out the -[full source code](https://github.com/cube-js/cube/tree/master/examples/recipes/percentiles) -or run it with the `docker-compose up` command. You'll see the result, including -queried data, in the console. diff --git a/docs/content/Examples-Tutorials-Recipes/Recipes/Data-modeling/snapshots.mdx b/docs/content/Examples-Tutorials-Recipes/Recipes/Data-modeling/snapshots.mdx deleted file mode 100644 index 6baf2e726d124..0000000000000 --- a/docs/content/Examples-Tutorials-Recipes/Recipes/Data-modeling/snapshots.mdx +++ /dev/null @@ -1,185 +0,0 @@ ---- -title: Implementing Data Snapshots -permalink: /recipes/snapshots -category: Examples & Tutorials -subCategory: Data modeling -menuOrder: 4 ---- - - - -This functionality only works with data models written in JavaScript, not YAML. -For more information, check out the [Data Modeling Syntax][ref-modeling-syntax] page. - - - -## Use case - -For a dataset that contains a sequence of changes to a property over time, we -want to be able to get the most recent state of said property at any given date. -In this recipe, we'll learn how to calculate snapshots of statuses at any given -date for a cube with `Product Id`, `Status`, and `Changed At` dimensions. - - - -We can consider the status property to be a -[slowly changing dimension](https://en.wikipedia.org/wiki/Slowly_changing_dimension) -(SCD) of type 2. Modeling data with slowly changing dimensions is an -essential part of the data engineering skillset. - - - -## Data modeling - -Let's explore the `statuses` cube that contains data like this: - -```json -[ - { - "statuses.order_id": 1, - "statuses.status": "shipped", - "statuses.changed_at": "2019-01-19T00:00:00.000" - }, - { - "statuses.order_id": 1, - "statuses.status": "processing", - "statuses.changed_at": "2019-03-14T00:00:00.000" - }, - { - "statuses.order_id": 1, - "statuses.status": "completed", - "statuses.changed_at": "2019-01-25T00:00:00.000" - }, - { - "statuses.order_id": 2, - "statuses.status": "processing", - "statuses.changed_at": "2019-08-21T00:00:00.000" - }, - { - "statuses.order_id": 2, - "statuses.status": "completed", - "statuses.changed_at": "2019-04-13T00:00:00.000" - }, - { - "statuses.order_id": 2, - "statuses.status": "shipped", - "statuses.changed_at": "2019-03-18T00:00:00.000" - } -] -``` - -We can see that statuses change occasionally. How do we count orders that -remained in the `shipped` status at a particular date? - -First, we need to generate a range with all dates of interest, from the earliest -to the latest. Second, we need to join the dates with the statuses and leave -only the most recent statuses to date. - -```javascript -cube(`status_snapshots`, { - extends: statuses, - - sql: ` - -- Create a range from the earlist date to the latest date - WITH range AS ( - SELECT date - FROM GENERATE_SERIES( - (SELECT MIN(changed_at) FROM ${statuses.sql()} AS statuses), - (SELECT MAX(changed_at) FROM ${statuses.sql()} AS statuses), - INTERVAL '1 DAY' - ) AS date - ) - - -- Calculate snapshots for every date in the range - SELECT range.date, statuses.* - FROM range - LEFT JOIN ${statuses.sql()} AS statuses - ON range.date >= statuses.changed_at - AND statuses.changed_at = ( - SELECT MAX(changed_at) - FROM ${statuses.sql()} AS sub_statuses - WHERE sub_statuses.order_id = statuses.order_id - ) - `, - - dimensions: { - date: { - sql: `date`, - type: `time`, - }, - }, -}); -``` - - - -To generate a range of dates, here we use the -[`GENERATE_SERIES` function](https://www.postgresql.org/docs/9.1/functions-srf.html) -which is Postgres-specific. Other databases have similar functions, e.g., -[`GENERATE_DATE_ARRAY`](https://cloud.google.com/bigquery/docs/reference/standard-sql/array_functions#generate_date_array) -in BigQuery. - - - -Please note that it makes sense to make the `status_snapshots` cube -[extend](https://cube.dev/docs/schema/reference/cube#parameters-extends) the -original `statuses` cube in order to reuse the dimension definitions. We only -need to add a new dimension that indicates the `date` of a snapshot. We're also -referencing the definition of the `statuses` cube with the -[`sql()` property](https://cube.dev/docs/schema/reference/cube#parameters-sql). - -## Query - -To count orders that remained in the `shipped` status at a particular date, we -will send a query that selects a snapshot by this date and also filters by the -status: - -```json -{ - "measures": ["status_snapshots.count"], - "filters": [ - { - "member": "status_snapshots.date", - "operator": "equals", - "values": ["2019-04-01"] - }, - { - "member": "status_snapshots.status", - "operator": "equals", - "values": ["shipped"] - } - ] -} -``` - -## Result - -If we execute a couple of such queries for distinct dates, we'll spot the -change: - -```json5 -// Shipped as of April 1, 2019: -[ - { - "status_snapshots.count": 16, - } -]; -``` - -```json5 -// Shipped as of May 1, 2019: -[ - { - "status_snapshots.count": 25, - } -] -``` - -## Source code - -Please feel free to check out the -[full source code](https://github.com/cube-js/cube/tree/master/examples/recipes/snapshots) -or run it with the `docker-compose up` command. You'll see the result, including -queried data, in the console. - -[ref-modeling-syntax]: /data-modeling/syntax diff --git a/docs/content/Examples-Tutorials-Recipes/Recipes/Data-modeling/using-dynamic-measures.mdx b/docs/content/Examples-Tutorials-Recipes/Recipes/Data-modeling/using-dynamic-measures.mdx deleted file mode 100644 index 0a0fd2e8e7826..0000000000000 --- a/docs/content/Examples-Tutorials-Recipes/Recipes/Data-modeling/using-dynamic-measures.mdx +++ /dev/null @@ -1,104 +0,0 @@ ---- -title: Using Dynamic Measures -permalink: /recipes/referencing-dynamic-measures -category: Examples & Tutorials -subCategory: Data modeling -menuOrder: 4 ---- - - - -This functionality only works with data models written in JavaScript, not YAML. -For more information, check out the [Data Modeling Syntax][ref-modeling-syntax] page. - - - -## Use case - -We want to understand the distribution of orders by their statuses. Let's -imagine that new order statuses can be added in the future, or we get a list of -statuses from an external API. To calculate the orders percentage distribution, -we need to create several [measures](/schema/fundamentals/concepts#measures) -that refer to each other. But we don't want to manually change the data model for -each new status. To solve this, we will create a -[schema dynamically](/schema/advanced/dynamic-schema-creation). - -## Data modeling - -To calculate the number of orders as a percentage, we need to know the total -number of orders and the number of orders with the desired status. We'll create -two measures for this. To calculate a percentage, we'll create a measure that -refers to another measure. - -```javascript -const statuses = ['processing', 'shipped', 'completed']; - -const createTotalByStatusMeasure = (status) => ({ - [`total_${status}_orders`]: { - title: `Total ${status} orders`, - type: `count`, - filters: [ - { - sql: (CUBE) => `${CUBE}."status" = '${status}'`, - }, - ], - }, -}); - -const createPercentageMeasure = (status) => ({ - [`percentage_of_${status}`]: { - title: `Percentage of ${status} orders`, - type: `number`, - format: `percent`, - sql: (CUBE) => - `ROUND(${CUBE[`total_${status}_orders`]}::NUMERIC / ${ - CUBE.total_orders - }::NUMERIC * 100.0, 2)`, - }, -}); - -cube(`orders`, { - sql_table: `orders`, - - measures: Object.assign( - { - total_orders: { - type: `count`, - title: `Total orders`, - }, - }, - statuses.reduce( - (all, status) => ({ - ...all, - ...createTotalByStatusMeasure(status), - ...createPercentageMeasure(status), - }), - {} - ) - ), -}); -``` - -## Result - -Using the measures defined above, we can explore the orders percentage -distribution and easily create new measures just by adding a new status. - -```javascript -[ - { - 'orders.percentage_of_processing': '33.54', - 'orders.percentage_of_shipped': '33.00', - 'orders.percentage_of_completed': '33.46', - }, -]; -``` - -## Source code - -Please feel free to check out the -[full source code](https://github.com/cube-js/cube/tree/master/examples/recipes/referencing-dynamic-measures) -or run it with the `docker-compose up` command. You'll see the result, including -queried data, in the console. - -[ref-modeling-syntax]: /data-modeling/syntax diff --git a/docs/content/Examples-Tutorials-Recipes/Recipes/Data-sources/multiple-sources-same-schema.mdx b/docs/content/Examples-Tutorials-Recipes/Recipes/Data-sources/multiple-sources-same-schema.mdx deleted file mode 100644 index 84d8a5440b380..0000000000000 --- a/docs/content/Examples-Tutorials-Recipes/Recipes/Data-sources/multiple-sources-same-schema.mdx +++ /dev/null @@ -1,156 +0,0 @@ ---- -title: Using Multiple Data Sources -permalink: /recipes/multiple-sources-same-schema -category: Examples & Tutorials -subCategory: Data sources -menuOrder: 3 ---- - -## Use case - -We need to access the data from different data sources for different tenants. -For example, we are the platform for the online website builder, and each client -can only view their data. The same data model is used for all clients. - -## Configuration - -Each client has its own database. In this recipe, the `Mango Inc` tenant keeps -its data in the remote `ecom` database while the `Avocado Inc` tenant works with -the local database (bootstrapped in the `docker-compose.yml` file) which has the -same data model. - -To enable multitenancy, use the -[`contextToAppId`](https://cube.dev/docs/config#options-reference-context-to-app-id) -function to provide distinct identifiers for each tenant. Also, implement the -[`driverFactory`](https://cube.dev/docs/config#options-reference-driver-factory) -function where you can select a data source based on the tenant name. -[JSON Web Token](https://cube.dev/docs/security) includes information about the -tenant name in the `tenant` property of the `securityContext`. - -```javascript -module.exports = { - // Provides distinct identifiers for each tenant which are used as caching keys - contextToAppId: ({ securityContext }) => - `CUBEJS_APP_${securityContext.tenant}`, - - // Selects the database connection configuration based on the tenant name - driverFactory: ({ securityContext }) => { - if (!securityContext.tenant) { - throw new Error('No tenant found in Security Context!'); - } - - if (securityContext.tenant === 'Avocado Inc') { - return { - type: 'postgres', - database: 'localDB', - host: 'postgres', - user: 'postgres', - password: 'example', - port: '5432', - }; - } - - if (securityContext.tenant === 'Mango Inc') { - return { - type: 'postgres', - database: 'ecom', - host: 'demo-db.cube.dev', - user: 'cube', - password: '12345', - port: '5432', - }; - } - - throw new Error('Unknown tenant in Security Context'); - }, -}; -``` - -## Query - -To get users for different tenants, we will send two identical requests with -different JWTs. Also, we send a query with unknown tenant to show that he cannot -access to the data model of other tenants. - -```json5 -// JWT payload for "Avocado Inc" -{ - "sub": "1234567890", - "tenant": "Avocado Inc", - "iat": 1000000000, - "exp": 5000000000 -} -``` - -```json5 -// JWT payload for "Mango Inc" -{ - "sub": "1234567890", - "tenant": "Mango Inc", - "iat": 1000000000, - "exp": 5000000000 -} -``` - -```json5 -// JWT payload for "Peach Inc" -{ - "sub": "1234567890", - "tenant": "Peach Inc", - "iat": 1000000000, - "exp": 5000000000 -} -``` - -## Result - -We have received different data from different data sources depending on the -tenant's name: - -```json5 -// Avocado Inc last users: -[ - { - "Users.id": 700, - "Users.name": "Freddy Gulgowski", - }, - { - "Users.id": 699, - "Users.name": "Julie Crooks", - }, - { - "Users.id": 698, - "Users.name": "Macie Ryan", - }, -] -``` - -```json5 -// Mango Inc last users: -[ - { - "Users.id": 705, - "Users.name": "Zora Vallery", - }, - { - "Users.id": 704, - "Users.name": "Fawn Danell", - }, - { - "Users.id": 703, - "Users.name": "Moyra Denney", - }, -]; -``` - -```json5 -// Peach Inc error: -{"error": "Error: Unknown tenant in Security Context"} -``` - -## Source code - -Please feel free to check out the -[full source code](https://github.com/cube-js/cube/tree/master/examples/recipes/multiple-data-sources) -or run it with the `docker-compose up` command. You'll see the result, including -queried data, in the console. diff --git a/docs/content/Examples-Tutorials-Recipes/Recipes/Data-sources/using-ssl-connections-to-data-source.mdx b/docs/content/Examples-Tutorials-Recipes/Recipes/Data-sources/using-ssl-connections-to-data-source.mdx deleted file mode 100644 index 2775780955ce7..0000000000000 --- a/docs/content/Examples-Tutorials-Recipes/Recipes/Data-sources/using-ssl-connections-to-data-source.mdx +++ /dev/null @@ -1,92 +0,0 @@ ---- -title: Using SSL Connections to a Data Source -permalink: /recipes/enable-ssl-connections-to-database -category: Examples & Tutorials -subCategory: Data sources -menuOrder: 3 ---- - -Cube supports SSL-encrypted connections to various data sources. -Please check the documentation for a particular [data source][ref-config-db] -for specific instructions. - -## Cube Core - -To enable it, set the `CUBEJS_DB_SSL` environment variable to `true`. Cube can -also be configured to use custom connection settings. For example, to use a -custom certificate authority and certificates, you could do the following: - -```dotenv -CUBEJS_DB_SSL_CA=/path/to/ssl/ca.pem -CUBEJS_DB_SSL_CERT=/path/to/ssl/cert.pem -CUBEJS_DB_SSL_KEY=/path/to/ssl/key.pem -``` - -You can also set the above environment variables to the contents of the PEM -files; for example: - -```dotenv -CUBEJS_DB_SSL_CA="-----BEGIN CERTIFICATE----- -MIIDDjCCAfYCCQCN/HhSZ3ofTDANBgkqhkiG9w0BAQsFADBJMQswCQYDVQQGEwJV -SzEMMAoGA1UECgwDSUJNMQ0wCwYDVQQLDARBSU9TMR0wGwYDVQQDDBRhaW9zLW9y -Y2gtZGV2LWVudi1DQTAeFw0yMTAyMTUyMzIyMTZaFw0yMzEyMDYyMzIyMTZaMEkx -CzAJBgNVBAYTAlVLMQwwCgYDVQQKDANJQk0xDTALBgNVBAsMBEFJT1MxHTAbBgNV -BAMMFGFpb3Mtb3JjaC1kZXYtZW52LUNBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A -MIIBCgKCAQEAyhYY9+4TduTsNRh/6MaRtE59j8HkAkoQYvNYZN7D1j1oV6yhzitn -oN4bD+HiQWe4J3mwAaJOAAJRCkIVyUXxwZUCPxGN/KVha/pcB8hN6LHfI6vInixp -U9kHNYWWBn428nMeMqts7yqly/HwG1/qO+j4178c8lZNS7Uwh76y+lAEaIkeBipq -i4WuCOiChFc/sIV7g4DcLKKbqzDWtRDjbsg7JRfsALO5gM360GrNYkhV4C5lm8Eh -ozNuaPhS65zO93PMj/3UTyuctXKa7WpaHJHoKZRXAuOwSamvqvFgIQ0SSnW+qcud -fL3GAPJn7d065gh7JvgcT86v7WWBiUNs0QIDAQABMA0GCSqGSIb3DQEBCwUAA4IB -AQCzw00d8e0e5AYZtzIk9hjczta7JHy2/cwTMv0opzBk6C26G6YZww+9brHW2w5U -mY/HKBnGnMadjMWOZmm9Vu0B0kalYY0lJdE8alO1aiv5B9Ms/XIt7FzzGtfv9gYJ -cw5/nzGBBMJNICC1kVLnzzlllLferhCIrczDyPcu16o1Flc7q1p8AbwQpC+A2I/L -8nWlFeHZ+watLtQ1lF3qDzzCumPHrJqAGmlp0265owCM8Q5zv8AL5DStIZvtexrI -JqbwLdbA8smyOFRwCckOWcWjnrEDjO2e3NLWINbB7Z4ZRviZSEH5UZlDLVu+ahGV -KmZIuh7+XpXzJ1MN0SBZXgXH ------END CERTIFICATE-----" -``` - -For a complete list of SSL-related environment variables, consult the -[Environment Variables Reference][ref-env-var]. - -## Cube Cloud - -When setting up a new deployment, select the SSL checkbox when entering -database credentials: - - - -To use custom SSL certificates between Cube Cloud and your database server, go -to the Configuration tab in the Settings screen: - - - -Depending on how SSL is configured on your database server, you may need to -specify additional environment variables, please check the [Environment -Variables reference][ref-config-env-vars] for more information. - - - - - -Add the following environment variables: - -| Environment Variable | Description | Example | -| -------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------- | -| `CUBEJS_DB_SSL` | If `true`, enables SSL encryption for database connections from Cube | `true`, `false` | -| `CUBEJS_DB_SSL_CA` | The contents of a CA bundle in PEM format, or a path to one. For more information, check the `options.ca` property for TLS Secure Contexts [in the Node.js documentation][nodejs-docs-tls-options] | A valid CA bundle or a path to one | -| `CUBEJS_DB_SSL_CERT` | The contents of an SSL certificate in PEM format, or a path to one. For more information, check the `options.cert` property for TLS Secure Contexts [in the Node.js documentation][nodejs-docs-tls-options] | A valid SSL certificate or a path to one | -| `CUBEJS_DB_SSL_KEY` | The contents of a private key in PEM format, or a path to one. For more information, check the `options.key` property for TLS Secure Contexts [in the Node.js documentation][nodejs-docs-tls-options] | A valid SSL private key or a path to one | - -[nodejs-docs-tls-options]: - https://nodejs.org/docs/latest/api/tls.html#tls_tls_createsecurecontext_options -[ref-config-db]: /config/databases -[ref-env-var]: /reference/environment-variables - diff --git a/docs/content/Examples-Tutorials-Recipes/Recipes/Queries/enforcing-mandatory-filters.mdx b/docs/content/Examples-Tutorials-Recipes/Recipes/Queries/enforcing-mandatory-filters.mdx deleted file mode 100644 index 95ea46ce54d22..0000000000000 --- a/docs/content/Examples-Tutorials-Recipes/Recipes/Queries/enforcing-mandatory-filters.mdx +++ /dev/null @@ -1,139 +0,0 @@ ---- -title: Enforcing Mandatory Filters -permalink: /recipes/enforcing-mandatory-filters -category: Examples & Tutorials -subCategory: Queries -menuOrder: 5 ---- - -## Use case - -Let's imagine that on New Year's Eve, December 30th, 2019, we renamed our store, -changed the design, and started selling completely different products. At the -same time, we decided to reuse the database for the new store. So, we'd like to -only show orders created after December 30th, 2019. In the recipe below, we'll -learn how to add mandatory filters to all queries. - -## Configuration - -To enforce mandatory filters we'll use the -[`queryRewrite`](https://cube.dev/docs/security/context#using-query-rewrite) -parameter in the `cube.js` configuration file. - -To solve this, we add a filter that will apply to all queries. This will make -sure we only show orders created after December 30th, 2019. - -```javascript -module.exports = { - queryRewrite: (query) => { - query.filters.push({ - member: `orders.created_at`, - operator: 'afterDate', - values: ['2019-12-30'], - }); - - return query; - }, -}; -``` - -## Query - -To get the orders we will send two queries with filters by status: - -```bash{outputLines: 1,3-13} -# Completed orders -curl cube:4000/cubejs-api/v1/load \ - -H "Authorization: eeyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJyb2xlIjoib3BlcmF0b3IiLCJpYXQiOjE2Mjg3NDUwNDUsImV4cCI6MTgwMTU0NTA0NX0.VErb2t7Bc43ryRwaOiEgXuU5KiolCT-69eI_i2pRq4o" \ - 'query={"measures": [], "order": [["Users.created_at", "asc"]], "dimensions": ["orders.number", "orders.created_at"], - "filters": [ - { - "member": "orders.status", - "operator": "equals", - "values": ["completed"] - } - ], - "limit": 5 - }' -``` - -```bash{outputLines: 1,3-13} -# Shipped orders -curl cube:4000/cubejs-api/v1/load \ - -H "Authorization: eeyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJyb2xlIjoib3BlcmF0b3IiLCJpYXQiOjE2Mjg3NDUwNDUsImV4cCI6MTgwMTU0NTA0NX0.VErb2t7Bc43ryRwaOiEgXuU5KiolCT-69eI_i2pRq4o" \ - 'query={"measures": [], "order": [["orders.created_at", "asc"]], "dimensions": ["orders.number", "orders.created_at"], - "filters": [ - { - "member": "orders.status", - "operator": "equals", - "values": ["shipped"] - } - ], - "limit": 5 - }' -``` - -## Result - -We have received orders created after December 30th, 2019. - -Completed orders: - -```javascript -[ - { - 'orders.number': 78, - 'orders.created_at': '2020-01-01T00:00:00.000', - }, - { - 'orders.number': 43, - 'orders.created_at': '2020-01-02T00:00:00.000', - }, - { - 'orders.number': 87, - 'orders.created_at': '2020-01-04T00:00:00.000', - }, - { - 'orders.number': 45, - 'orders.created_at': '2020-01-04T00:00:00.000', - }, - { - 'orders.number': 28, - 'orders.created_at': '2020-01-05T00:00:00.000', - }, -]; -``` - -Shipped orders: - -```javascript -[ - { - 'orders.number': 57, - 'orders.created_at': '2019-12-31T00:00:00.000', - }, - { - 'orders.number': 38, - 'orders.created_at': '2020-01-01T00:00:00.000', - }, - { - 'orders.number': 10, - 'orders.created_at': '2020-01-02T00:00:00.000', - }, - { - 'orders.number': 19, - 'orders.created_at': '2020-01-02T00:00:00.000', - }, - { - 'orders.number': 15, - 'orders.created_at': '2020-01-02T00:00:00.000', - }, -]; -``` - -## Source code - -Please feel free to check out the -[full source code](https://github.com/cube-js/cube/tree/master/examples/recipes/mandatory-filters) -or run it with the `docker-compose up` command. You'll see the result, including -queried data, in the console. diff --git a/docs/content/Examples-Tutorials-Recipes/Recipes/Queries/getting-unique-values-for-a-field.mdx b/docs/content/Examples-Tutorials-Recipes/Recipes/Queries/getting-unique-values-for-a-field.mdx deleted file mode 100644 index e52aca8fc97e8..0000000000000 --- a/docs/content/Examples-Tutorials-Recipes/Recipes/Queries/getting-unique-values-for-a-field.mdx +++ /dev/null @@ -1,137 +0,0 @@ ---- -title: Getting Unique Values for a Field -permalink: /recipes/getting-unique-values-for-a-field -category: Examples & Tutorials -subCategory: Queries -menuOrder: 5 ---- - -## Use case - -We have a dashboard with information about the users, and we'd like to filter -them by city. To do so, we need to display all unique values for cities in the -dropdown. In the recipe below, we'll learn how to get unique values for -[dimensions](https://cube.dev/docs/schema/reference/dimensions). - -## Data modeling - -To filter users by city, we need to define the appropriate dimension: - - - -```yaml -cubes: - - name: users - sql_table: users - - dimensions: - - name: city - sql: city - type: string - - - name: state - sql: state - type: string -``` - -```javascript -cube(`users`, { - sql_table: `users`, - - dimensions: { - city: { - sql: `city`, - type: `string`, - }, - - state: { - sql: `state`, - type: `string`, - }, - }, -}); -``` - - - -## Query - -It is enough to include only a dimension in the query to get all unique values -of that dimension: - -```json -{ - "dimensions": ["users.city"] -} -``` - -## Result - -We got the unique values of the `city` dimension, and now we can use them in the -dropdown on the dashboard: - -```json -[ - { - "users.city": "Austin" - }, - { - "users.city": "Chicago" - }, - { - "users.city": "Los Angeles" - }, - { - "users.city": "Mountain View" - } -] -``` - -## Choosing dimensions - -In case we need to choose a dimension or render dropdowns for all dimensions, we -can fetch the list of dimensions for all cubes from the [`/meta` -endpoint](https://cube.dev/docs/rest-api#api-reference-v-1-meta): - -```bash{promptUser: user} -curl http://localhost:4000/cubejs-api/v1/meta -``` - -```json -{ - "cubes": [ - { - "name": "users", - "title": "Users", - "measures": [], - "dimensions": [ - { - "name": "users.city", - "title": "Users City", - "type": "string", - "shortTitle": "City", - "suggestFilterValues": true - }, - { - "name": "users.state", - "title": "Users State", - "type": "string", - "shortTitle": "State", - "suggestFilterValues": true - } - ], - "segments": [] - } - ] -} -``` - -Then, we can iterate through dimension names and use any of them in a -[query](#query). - -## Source code - -Please feel free to check out the -[full source code](https://github.com/cube-js/cube/tree/master/examples/recipes/getting-unique-values-for-a-field) -or run it with the `docker-compose up` command. You'll see the result, including -queried data, in the console. diff --git a/docs/content/Examples-Tutorials-Recipes/Recipes/Queries/pagination.mdx b/docs/content/Examples-Tutorials-Recipes/Recipes/Queries/pagination.mdx deleted file mode 100644 index b0b15e93aec10..0000000000000 --- a/docs/content/Examples-Tutorials-Recipes/Recipes/Queries/pagination.mdx +++ /dev/null @@ -1,172 +0,0 @@ ---- -title: Implementing Pagination -permalink: /recipes/pagination -category: Examples & Tutorials -subCategory: Queries -menuOrder: 5 ---- - -## Use case - -We want to display a table of data with hundreds of rows. To make the table -easier to digest and to improve the performance of the query, we'll use -pagination. With the recipe below, we'll get the orders list sorted by the order -number. Every page will have 5 orders. - -## Data modeling - -We have the following data model: - - - -```yaml -cubes: - - name: Orders - sql_table: orders - - measures: - - name: count - type: count - - dimensions: - - name: number - sql: number - type: number - - - name: created_at - sql: created_at - type: time -``` - -```javascript -cube(`Orders`, { - sql_table: `orders`, - - measures: { - count: { - type: `count`, - }, - }, - - dimensions: { - number: { - sql: `number`, - type: `number`, - }, - - created_at: { - sql: `created_at`, - type: `time`, - }, - }, -}); -``` - - - -## Query - -To select orders that belong to a particular page, we can use the `limit` and -`offset` query properties. First, let's get the number of all orders that we have. - -```json -{ - "measures": [ - "orders.count" - ] -} -``` - -Then, let's retrieve first batch (page) of five orders: - -```json -{ - "dimensions": [ - "orders.number" - ], - "order": { - "orders.number": "asc" - }, - "limit": 5 -} -``` - -Now, let's retrieve the second batch (page) of five orders: - -```json -{ - "dimensions": [ - "orders.number" - ], - "order": { - "orders.number": "asc" - }, - "limit": 5, - "offset": 5 -} -``` - -## Result - -We have received five orders per query and can use them as we want. - -```javascript -// Orders count: - -[ - { - 'orders.count': '10000', - }, -]; -``` - -```javascript -// The first five orders: - -[ - { - 'orders.number': 1, - }, - { - 'orders.number': 2, - }, - { - 'orders.number': 3, - }, - { - 'orders.number': 4, - }, - { - 'orders.number': 5, - }, -]; -``` - -```javascript -// The next five orders: - -[ - { - 'orders.number': 6, - }, - { - 'orders.number': 7, - }, - { - 'orders.number': 8, - }, - { - 'orders.number': 9, - }, - { - 'orders.number': 10, - }, -]; -``` - -## Source code - -Please feel free to check out the -[full source code](https://github.com/cube-js/cube/tree/master/examples/recipes/pagination) -or run it with the `docker-compose up` command. You'll see the result, including -queried data, in the console. diff --git a/docs/content/Examples-Tutorials-Recipes/Recipes/Query-acceleration/Refreshing-select-partitions.mdx b/docs/content/Examples-Tutorials-Recipes/Recipes/Query-acceleration/Refreshing-select-partitions.mdx deleted file mode 100644 index 6522850f6d7f4..0000000000000 --- a/docs/content/Examples-Tutorials-Recipes/Recipes/Query-acceleration/Refreshing-select-partitions.mdx +++ /dev/null @@ -1,263 +0,0 @@ ---- -title: Refreshing Select Partitions -permalink: /recipes/refreshing-select-partitions -category: Examples & Tutorials -subCategory: Query acceleration -menuOrder: 6 ---- - -## Use case - -We have a dataset with orders and we want to aggregate data while having decent -performance. Orders have a creation time, so we can use -[partitioning](https://cube.dev/docs/caching/using-pre-aggregations#partitioning) -by time to optimize pre-aggregations build and refresh time. The problem is that -the order's status can change after a long period. In this case, we want to -rebuild only partitions associated with this order. - -In the recipe below, we'll learn how to use the -[`refresh_key`](https://cube.dev/docs/schema/reference/pre-aggregations#parameters-refresh-key-sql) -together with the -[`FITER_PARAMS`](https://cube.dev/docs/schema/reference/cube#filter-params) for -partition separately. - -## Data modeling - -Let's explore the `orders` cube data that contains various information about -orders, including number and status: - -| id | number | status | created_at | updated_at | -| --- | ------ | ---------- | ------------------- | ------------------- | -| 1 | 1 | processing | 2021-08-10 14:26:40 | 2021-08-10 14:26:40 | -| 2 | 2 | completed | 2021-08-20 13:21:38 | 2021-08-22 13:10:38 | -| 3 | 3 | shipped | 2021-09-01 10:27:38 | 2021-09-02 01:12:38 | -| 4 | 4 | completed | 2021-09-20 10:27:38 | 2021-09-20 10:27:38 | - -In our case, each order has `created_at` and `updated_at` properties. The -`updated_at` property is the last order update timestamp. To create a -pre-aggregation with partitions, we need to specify the -[`partition_granularity` property](https://cube.dev/docs/schema/reference/pre-aggregations#partition-granularity). -Partitions will be split monthly by the `created_at` dimension. - - - -```yaml -cubes: - - name: orders - # ... - - pre_aggregations: - - name: orders - type: rollup - dimensions: - - number - - status - - created_at - - updated_at - time_dimension: created_at - granularity: day - partition_granularity: month # this is where we specify the partition - refreshKey: - sql: SELECT max(updated_at) FROM public.orders # check for updates of the updated_at property -``` - -```javascript -cube(`orders`, { - pre_aggregations: { - orders: { - type: `rollup`, - dimensions: [number, status, created_at, updated_at], - time_dimension: created_at, - granularity: `day`, - partition_granularity: `month`, // this is where we specify the partition - refresh_key: { - sql: `SELECT max(updated_at) FROM public.orders` // check for updates of the updated_at property - }, - }, - }, -}); -``` - - - -As you can see, we defined custom a -[`refresh_key`](https://cube.dev/docs/schema/reference/pre-aggregations#parameters-refresh-key-sql) -that will check for new values of the `updated_at` property. The refresh key is -evaluated for each partition separately. For example, if we update orders from -August and update their `updated_at` property, the current refresh key will -update for **all** partitions. There is how it looks in the Cube logs: - -```bash -Executing SQL: 5b4c517f-b496-4c69-9503-f8cd2b4c73b6 --- - SELECT max(updated_at) FROM public.orders --- -Performing query completed: 5b4c517f-b496-4c69-9503-f8cd2b4c73b6 (15ms) -Performing query: 5b4c517f-b496-4c69-9503-f8cd2b4c73b6 -Performing query: 5b4c517f-b496-4c69-9503-f8cd2b4c73b6 -Executing SQL: 5b4c517f-b496-4c69-9503-f8cd2b4c73b6 --- - select min(("orders".created_at::timestamptz AT TIME ZONE 'UTC')) from public.orders AS "orders" --- -Executing SQL: 5b4c517f-b496-4c69-9503-f8cd2b4c73b6 --- - select max(("orders".created_at::timestamptz AT TIME ZONE 'UTC')) from public.orders AS "orders" --- -``` - -Note that the query for two partitions is the same. It's the reason why **all -partitions** will be updated. - -How do we fix this and update only the partition for August? We can use the -[`FILTER_PARAMS`](https://cube.dev/docs/schema/reference/cube#filter-params) for -that! - -Let's update our pre-aggregation definition: - - - -```yaml -cubes: - - name: orders - # ... - - pre_aggregations: - - name: orders - type: rollup - dimensions: - - number - - status - - created_at - - updated_at - time_dimension: created_at - granularity: day - partition_granularity: month # this is where we specify the partition - refreshKey: - sql: > - SELECT max(updated_at) - FROM public.orders - WHERE {FILTER_PARAMS.orders.created_at.filter('created_at')} -``` - -```javascript -cube(`orders`, { - pre_aggregations: { - orders: { - type: `rollup`, - dimensions: [number, status, created_at, updated_at], - time_dimension: created_at, - granularity: `day`, - partition_granularity: `month`, - refresh_key: { - sql: ` - SELECT max(updated_at) - FROM public.orders - WHERE ${FILTER_PARAMS.orders.created_at.filter('created_at')}` - }, - }, - }, -}); -``` - - - -Cube will filter data by the `created_at` property and then apply the refresh -key for the `updated_at` property. Here's how it looks in the Cube logs: - -```bash -Executing SQL: e1155b2f-859b-4e61-a760-17af891f5f0b --- - select min(("updated_orders".created_at::timestamptz AT TIME ZONE 'UTC')) from public.orders AS "updated_orders" --- -Executing SQL: e1155b2f-859b-4e61-a760-17af891f5f0b --- - select max(("updated_orders".created_at::timestamptz AT TIME ZONE 'UTC')) from public.orders AS "updated_orders" --- -Performing query completed: e1155b2f-859b-4e61-a760-17af891f5f0b (10ms) -Performing query completed: e1155b2f-859b-4e61-a760-17af891f5f0b (13ms) -Performing query: e1155b2f-859b-4e61-a760-17af891f5f0b -Performing query: e1155b2f-859b-4e61-a760-17af891f5f0b -Executing SQL: e1155b2f-859b-4e61-a760-17af891f5f0b --- - SELECT max(updated_at) FROM public.orders WHERE created_at >= '2021-08-01T00:00:00.000Z'::timestamptz AND created_at <= '2021-08-31T23:59:59.999Z'::timestamptz --- -Executing SQL: e1155b2f-859b-4e61-a760-17af891f5f0b --- - SELECT max(updated_at) FROM public.orders WHERE created_at >= '2021-09-01T00:00:00.000Z'::timestamptz AND created_at <= '2021-09-30T23:59:59.999Z'::timestamptz -``` - -Note that Cube checks the refresh key value using a date range over the -`created_at` property. With this refresh key, only one partition will be -updated. - -## Result - -We have received orders from two partitions of a pre-aggregation and only one of -them has been updated when an order changed its status: - -```json5 -// orders before update: -[ - { - "orders.number": "1", - "orders.status": "processing", - "orders.created_at": "2021-08-10T14:26:40.000", - "orders.updated_at": "2021-08-10T14:26:40.000" - }, - { - "orders.number": "2", - "orders.status": "completed", - "orders.created_at": "2021-08-20T13:21:38.000", - "orders.updated_at": "2021-08-20T13:21:38.000" - }, - { - "orders.number": "3", - "orders.status": "shipped", - "orders.created_at": "2021-09-01T10:27:38.000", - "orders.updated_at": "2021-09-01T10:27:38.000" - }, - { - "orders.number": "4", - "orders.status": "completed", - "orders.created_at": "2021-09-20T10:27:38.000", - "orders.updated_at": "2021-09-20T10:27:38.000" - } -] -``` - -```json5 -// orders after update: -[ - { - "orders.number": "1", - "orders.status": "shipped", - "orders.created_at": "2021-08-10T14:26:40.000", - "orders.updated_at": "2021-09-30T06:45:28.000" - }, - { - "orders.number": "2", - "orders.status": "completed", - "orders.created_at": "2021-08-20T13:21:38.000", - "orders.updated_at": "2021-08-20T13:21:38.000" - }, - { - "orders.number": "3", - "orders.status": "shipped", - "orders.created_at": "2021-09-01T10:27:38.000", - "orders.updated_at": "2021-09-01T10:27:38.000" - }, - { - "orders.number": "4", - "orders.status": "completed", - "orders.created_at": "2021-09-20T10:27:38.000", - "orders.updated_at": "2021-09-20T10:27:38.000" - } -] -``` - -## Source code - -Please feel free to check out the -[full source code](https://github.com/cube-js/cube/tree/master/examples/recipes/refreshing-select-partitions) -or run it with the `docker-compose up` command. You'll see the result, including -queried data, in the console. diff --git a/docs/content/Examples-Tutorials-Recipes/Recipes/Query-acceleration/incrementally-building-pre-aggregations-for-a-date-range.mdx b/docs/content/Examples-Tutorials-Recipes/Recipes/Query-acceleration/incrementally-building-pre-aggregations-for-a-date-range.mdx deleted file mode 100644 index b8a4067f43379..0000000000000 --- a/docs/content/Examples-Tutorials-Recipes/Recipes/Query-acceleration/incrementally-building-pre-aggregations-for-a-date-range.mdx +++ /dev/null @@ -1,264 +0,0 @@ ---- -title: Incrementally Building Pre-aggregations for a Date Range -permalink: /recipes/incrementally-building-pre-aggregations-for-a-date-range -category: Examples & Tutorials -subCategory: Query acceleration -tags: FILTER_PARAMS,incremental,pre-aggregations,partitions -menuOrder: 2 ---- - -## Use case - -In scenarios where a large dataset spanning multiple years is pre-aggregated -with partitioning, it is often useful to only rebuild pre-aggregations between a -certain date range (and therefore only a subset of all the partitions). This is -because recalculating all partitions is often an expensive and/or time-consuming -process. - -This is most beneficial when using data warehouses with partitioning support -(such as [AWS Athena][self-config-aws-athena] and [Google -BigQuery][self-config-google-bigquery]). - -## Data modeling - -Let's use an example of a cube with a nested SQL query: - - - -```yaml -cubes: - - name: users_with_organizations - sql: > - WITH users AS ( - SELECT - md5(company) AS organization_id, - id AS user_id, - created_at - FROM public.users - ), - organizations AS ( - ( - SELECT - md5(company) AS id, - company AS name, - MIN(created_at) - FROM - public.users - GROUP BY - 1, - 2 - ) - ) - SELECT - users.*, - organizations.name AS org_name - FROM - users - LEFT JOIN organizations - ON users.organization_id = organizations.id - - pre_aggregations: - - name: main - dimensions: - - id - - organization_id - time_dimension: created_at - refresh_key: - every: 1 day - incremental: true - granularity: day - partition_granularity: month - build_range_start: - sql: SELECT DATE('2021-01-01') - build_range_end: - sql: SELECT NOW() - - dimensions: - - name: id - sql: user_id - type: number - primary_key: true - - - name: organization_id - sql: organization_id - type: string - - - name: created_at - sql: created_at - type: time -``` - -```javascript -cube('users_with_organizations', { - - sql: ` - WITH users AS ( - SELECT - md5(company) AS organization_id, - id AS user_id, - created_at - FROM public.users - ), - organizations AS ( - ( - SELECT - md5(company) AS id, - company AS name, - MIN(created_at) - FROM - public.users - GROUP BY - 1, - 2 - ) - ) - SELECT - users.*, - organizations.name AS org_name - FROM - users - LEFT JOIN organizations - ON users.organization_id = organizations.id - `, - - pre_aggregations: { - main: { - dimensions: [CUBE.id, CUBE.organization_id] - time_dimension: CUBE.created_at, - refresh_key: { - every: `1 day`, - incremental: true, - }, - granularity: `day`, - partition_granularity: `month`, - build_range_start: { sql: `SELECT DATE('2021-01-01')` }, - build_range_end: { sql: `SELECT NOW()` }, - }, - }, - - dimensions: { - id: { - sql: `user_id`, - type: `number` - primary_key: true, - }, - - organization_id: { - sql: `organization_id`, - type: `string` - }, - - created_at: { - sql: `created_at`, - type: `time` - } - } -}); -``` - - - -The cube above pre-aggregates the results of the `sql` property, and is -configured to incrementally build them as long as the date range is not before -January 1st, 2021. - -However, if we only wanted to build pre-aggregations between a particular date -range within the users table, we would be unable to as the current configuration -only applies the date range to the final result of the SQL query defined in -`sql`. - -In order to do the above, we'll "push down" the predicates to the inner SQL -query using [`FILTER_PARAMS`][ref-schema-ref-cube-filterparam] in conjunction -with the [`build_range_start` and `build_range_end` -properties][ref-schema-ref-preagg-buildrange]: - - - -```yaml -cubes: - - name: users_with_organizations - sql: > - WITH users AS ( - SELECT - md5(company) AS organization_id, - id AS user_id, - created_at - FROM public.users - WHERE {FILTER_PARAMS.users_with_organizations.created_at.filter('created_at')} - ), - organizations AS ( - ( - SELECT - md5(company) AS id, - company AS name, - MIN(created_at) - FROM - public.users - GROUP BY - 1, - 2 - ) - ) - SELECT - users.*, - organizations.name AS org_name - FROM - users - LEFT JOIN organizations - ON users.organization_id = organizations.id - - # ... -``` - -```javascript -cube('users_with_organizations', { - sql: ` -WITH users AS ( - SELECT - md5(company) AS organization_id, - id AS user_id, - created_at - FROM public.users - WHERE ${FILTER_PARAMS.users_with_organizations.created_at.filter('created_at')} -), -organizations AS ( - ( - SELECT - md5(company) AS id, - company AS name, - MIN(created_at) - FROM - public.users - GROUP BY - 1, - 2 - ) -) -SELECT - users.*, - organizations.name AS org_name -FROM - users -LEFT JOIN organizations - ON users.organization_id = organizations.id -`, - - // ... -}); -``` - - - -## Result - -By adding `FILTER_PARAMS` to the subquery inside the `sql` property, we now -limit the initial size of the dataset by applying the filter as early as -possible. When the pre-aggregations are incrementally built, the same filter is -used to apply the build ranges as defined by `build_range_start` and -`build_range_end`. - -[ref-schema-ref-preagg-buildrange]: - /schema/reference/pre-aggregations#parameters-build-range-start-and-build-range-end -[ref-schema-ref-cube-filterparam]: /schema/reference/cube#filter-params -[self-config-aws-athena]: /config/databases/aws-athena/ -[self-config-google-bigquery]: /config/databases/google-bigquery diff --git a/docs/content/Examples-Tutorials-Recipes/Recipes/Query-acceleration/joining-multiple-data-sources.mdx b/docs/content/Examples-Tutorials-Recipes/Recipes/Query-acceleration/joining-multiple-data-sources.mdx deleted file mode 100644 index 20133f0040faf..0000000000000 --- a/docs/content/Examples-Tutorials-Recipes/Recipes/Query-acceleration/joining-multiple-data-sources.mdx +++ /dev/null @@ -1,239 +0,0 @@ ---- -title: Joining Data from Multiple Data Sources -permalink: /recipes/joining-multiple-data-sources -category: Examples & Tutorials -subCategory: Query acceleration -menuOrder: 6 ---- - -## Use case - -Let's imagine we store information about products and their suppliers in -separate databases. We want to aggregate data from these data sources while -having decent performance. In the recipe below, we'll learn how to create a -[rollup join](https://cube.dev/docs/schema/reference/pre-aggregations#parameters-type-rollupjoin) -between two databases to achieve our goal. - -## Configuration - -First of all, we should define our database connections with the `dataSource` -option in the `cube.js` configuration file: - -```javascript -module.exports = { - driverFactory: ({ dataSource }) => { - if (dataSource === 'suppliers') { - return { - type: 'postgres', - database: 'recipes', - host: 'demo-db-recipes.cube.dev', - user: 'cube', - password: '12345', - port: '5432', - }; - } - - if (dataSource === 'products') { - return { - type: 'postgres', - database: 'ecom', - host: 'demo-db-recipes.cube.dev', - user: 'cube', - password: '12345', - port: '5432', - }; - } - - throw new Error('dataSource is undefined'); - }, -}; -``` - -## Data modeling - -First, we'll define -[rollup](https://cube.dev/docs/schema/reference/pre-aggregations#parameters-type-rollup) -pre-aggregations for `products` and `suppliers`. Note that these pre-aggregations should -contain the dimension on which they're joined. In this case, it's the `supplier_id` -dimension in the `products` cube, and the `id` dimension in the `suppliers` cube: - - - -```yaml -cubes: - - name: products - # ... - - pre_aggregations: - - name: products_rollup - type: rollup - dimensions: - - name - - supplier_id - indexes: - category_index: - columns: - - supplier_id - - joins: - suppliers: - sql: "{supplier_id} = ${suppliers.id}" - relationship: many_to_one -``` - -```javascript -cube('products', { - // ... - - pre_aggregations: { - products_rollup: { - type: `rollup`, - dimensions: [name, supplier_id], - indexes: { - category_index: { - columns: [supplier_id], - } - } - }, - }, - - joins: { - suppliers: { - sql: `${supplier_id} = ${suppliers.id}`, - relationship: `many_to_one`, - }, - }, - - // ... -}) -``` - - - - - -```yaml -cubes: - - name: suppliers - # ... - - pre_aggregations: - - name: suppliers_rollup - type: rollup - dimensions: - - id - - company - - email - indexes: - category_index: - columns: - - id -``` - -```javascript -cube('suppliers', { - // ... - - pre_aggregations: { - suppliers_rollup: { - type: `rollup`, - dimensions: [id, company, email], - indexes: { - category_index: { - columns: [id], - } - } - } - } -}) -``` - - - -Then, we'll also define a `rollup_join` pre-aggregation in the `products` cube, which will enable -aggregating data from multiple data sources: - - - -```yaml -cubes: - - name: products - # ... - - pre_aggregations: - - name: combined_rollup - type: rollup_join - dimensions: - - suppliers.email - - suppliers.company - - name - rollups: - - suppliers.suppliers_rollup - - products_rollup -``` - -```javascript -cube('products', { - // ... - - pre_aggregations: { - combined_rollup: { - type: `rollup_join`, - dimensions: [suppliers.email, suppliers.company, name], - rollups: [suppliers.suppliers_rollup, products_rollup], - } - } -}); -``` - - - -## Query - -Let's get the product names and their suppliers' info, such as company name and -email, with the following query: - -```json -{ - "order": { - "products.name": "asc" - }, - "dimensions": [ - "products.name", - "suppliers.company", - "suppliers.email" - ], - "limit": 3 -} -``` - -## Result - -We'll get the data from two pre-aggregations joined into one `rollup_join`: - -```json -[ - { - "products.name": "Awesome Cotton Sausages", - "suppliers.company": "Justo Eu Arcu Inc.", - "suppliers.email": "id.risus@luctuslobortisClass.net" - }, - { - "products.name": "Awesome Fresh Keyboard", - "suppliers.company": "Quisque Purus Sapien Limited", - "suppliers.email": "Cras@consectetuercursuset.co.uk" - }, - { - "products.name": "Awesome Rubber Soap", - "suppliers.company": "Tortor Inc.", - "suppliers.email": "Mauris@ac.com" - } -] -``` - -## Source code - -Please feel free to check out the -[full source code](https://github.com/cube-js/cube/tree/master/examples/recipes/joining-multiple-datasources-data) -or run it with the `docker-compose up` command. You'll see the result, including -queried data, in the console. diff --git a/docs/content/Examples-Tutorials-Recipes/Recipes/Query-acceleration/non-additivity.mdx b/docs/content/Examples-Tutorials-Recipes/Recipes/Query-acceleration/non-additivity.mdx deleted file mode 100644 index acac35470a693..0000000000000 --- a/docs/content/Examples-Tutorials-Recipes/Recipes/Query-acceleration/non-additivity.mdx +++ /dev/null @@ -1,263 +0,0 @@ ---- -title: Accelerating Non-Additive Measures -permalink: /recipes/non-additivity -category: Examples & Tutorials -subCategory: Query acceleration -menuOrder: 6 ---- - -## Use case - -We want to run queries against -[pre-aggregations](https://cube.dev/docs/caching#pre-aggregations) only to -ensure our application's superior performance. Usually, accelerating a query is -as simple as including its measures and dimensions to the pre-aggregation -[definition](https://cube.dev/docs/schema/reference/pre-aggregations#parameters-measures). - -[Non-additive](https://cube.dev/docs/caching/pre-aggregations/getting-started#ensuring-pre-aggregations-are-targeted-by-queries-non-additivity) -measures (e.g., average values or distinct counts) are a special case. -Pre-aggregations with such measures are less likely to be -[selected](https://cube.dev/docs/caching/pre-aggregations/getting-started#ensuring-pre-aggregations-are-targeted-by-queries-selecting-the-pre-aggregation) -to accelerate a query. However, there are a few ways to work around that. - -## Data modeling - -Let's explore the `users` cube that contains various measures describing users' -age: - -- count of unique age values (`distinct_ages`) -- average age (`avg_age`) -- 90th [percentile](https://cube.dev/docs/recipes/percentiles) of age - (`p90_age`) - - - -```yaml -cubes: - - name: users - # ... - - measures: - - name: distinct_ages - sql: age - type: count_distinct - - - name: avg_age - sql: age - type: avg - - - name: p90_age - sql: PERCENTILE_CONT(0.9) WITHIN GROUP (ORDER BY age) - type: number -``` - -```javascript -cube(`users`, { - measures: { - distinct_ages: { - sql: `age`, - type: `count_distinct`, - }, - - avg_age: { - sql: `age`, - type: `avg`, - }, - - p90_age: { - sql: `PERCENTILE_CONT(0.9) WITHIN GROUP (ORDER BY age)`, - type: `number`, - }, - }, -}); -``` - - - -All of these measures are non-additive. Practically speaking, it means that the -pre-aggregation below would only accelerate a query that fully matches its -definition: - - - -```yaml -cubes: - - name: users - - pre_aggregations: - - name: main - measures: - - distinct_ages - - avg_age - - p90_age - dimensions: - - gender -``` - -```javascript -cube(`users`, { - // ... - pre_aggregations: { - main: { - measures: [distinct_ages, avg_age, p90_age], - dimensions: [gender], - }, - }, -}); -``` - - - -This query will match the pre-aggregation above and, thus, will be accelerated: - -```json -{ - "measures": [ - "users.distinct_ages", - "users.avg_age", - "users.p90_age" - ], - "dimensions": [ - "users.gender" - ] -} -``` - -Meanwhile, the query below won't match the same pre-aggregation because it -contains non-additive measures and omits the `gender` dimension. It won't be -accelerated: - -```json -{ - "measures": [ - "users.distinct_ages", - "users.avg_age", - "users.p90_age" - ] -} -``` - -Let's explore some possible workarounds. - -### <--{"id" : "Data modeling"}--> Replacing with approximate additive measures - -Often, non-additive `count_distinct` measures can be changed to have the -[`count_distinct_approx` type](https://cube.dev/docs/schema/reference/types-and-formats#measures-types-count-distinct-approx) -which will make them additive and orders of magnitude more performant. This -`count_distinct_approx` measures can be used in pre-aggregations. However, there -are two drawbacks: - -- This type is approximate, so the measures might yield slightly different - results compared to their `count_distinct` counterparts. Please consult with - your database's documentation to learn more. -- The `count_distinct_approx` is not supported with all databases. Currently, - Cube supports it for Athena, BigQuery, and Snowflake. - -For example, the `distinct_ages` measure can be rewritten as follows: - - - -```yaml -cubes: - - name: users - - measures: - - name: distinct_ages - sql: age - type: count_distinct_approx -``` - -```javascript -cube(`users`, { - measures: { - distinct_ages: { - sql: `age`, - type: `count_distinct_approx`, - }, - }, -}); -``` - - - -### <--{"id" : "Data modeling"}--> Decomposing into a formula with additive measures - -Non-additive `avg` measures can be rewritten as -[calculated measures](https://cube.dev/docs/schema/reference/measures#calculated-measures) -that reference additive measures only. Then, this additive measures can be used -in pre-aggregations. -Please note, however, that you shouldn't include `avg_age` measure in your pre-aggregation as it renders it non-additive. - -For example, the `avg_age` measure can be rewritten as follows: - - - -```yaml -cubes: - - name: users - - measures: - - name: avg_age - sql: "{age_sum} / {count}" - type: number - - - name: age_sum - sql: age - type: sum - - - name: count - type: count - - pre_aggregations: - - name: main - measures: - - age_sum - - count - dimensions: - - gender - -``` - -```javascript -cube(`users`, { - measures: { - avg_age: { - sql: `${age_sum} / ${count}`, - type: `number`, - }, - - age_sum: { - sql: `age`, - type: `sum`, - }, - - count: { - type: `count`, - }, - }, - - pre_aggregations: { - main: { - measures: [age_sum, count], - dimensions: [gender], - }, - }, -}); -``` - - - -### <--{"id" : "Data modeling"}--> Providing multiple pre-aggregations - -If the two workarounds described above don't apply to your use case, feel free -to create additional pre-aggregations with definitions that fully match your -queries with non-additive measures. You will get a performance boost at the -expense of a slightly increased overall pre-aggregation build time and space -consumed. - -## Source code - -Please feel free to check out the -[full source code](https://github.com/cube-js/cube/tree/master/examples/recipes/non-additivity) -or run it with the `docker-compose up` command. You'll see the result, including -queried data, in the console. diff --git a/docs/content/Examples-Tutorials-Recipes/Recipes/Query-acceleration/using-originalsql-and-rollups-effectively.mdx b/docs/content/Examples-Tutorials-Recipes/Recipes/Query-acceleration/using-originalsql-and-rollups-effectively.mdx deleted file mode 100644 index 3ee69a3e0cf75..0000000000000 --- a/docs/content/Examples-Tutorials-Recipes/Recipes/Query-acceleration/using-originalsql-and-rollups-effectively.mdx +++ /dev/null @@ -1,84 +0,0 @@ ---- -title: Using originalSql and rollup Pre-aggregations Effectively -permalink: /recipes/using-originalsql-and-rollups-effectively -category: Examples & Tutorials -subCategory: Query acceleration -menuOrder: 6 ---- - -## Use case - -For cubes that are built from an expensive SQL query, we can optimize -pre-aggregation builds so that they don't have to re-run the SQL query. - -## Configuration - -We can do this by creating a pre-aggregation of type -[`originalSql`][ref-schema-ref-preaggs-type-origsql] on the source (also known -as internal) database, and then configuring our existing `rollup` -pre-aggregations to use the `originalSql` pre-aggregation with the -[`useOriginalSqlPreAggregations` property][ref-schema-ref-preaggs-use-origsql]. - - - -Storing pre-aggregations on an internal database requires write-access. Please -ensure that your database driver is not configured with `readOnly: true`. - - - - - -```yaml -cubes: - - name: orders - sql: "" - - pre_aggregations: - - name: base - type: original_sql - external: false - - name: main - dimensions: - - id - - name - measures: - - count - time_dimension: created_at - granularity: day - use_original_sql_pre_aggregations: true -``` - -```javascript -cube('orders', { - sql: ``, - - pre_aggregations: { - base: { - type: `original_sql`, - external: false, - }, - - main: { - dimensions: [id, name], - measures: [count], - time_dimension: created_at, - granularity: `day`, - use_original_sql_pre_aggregations: true, - }, - }, - - // ... -}) -``` - - - -## Result - -With the above data model, the `main` pre-aggregation is built from the `base` -pre-aggregation. - -[ref-schema-ref-preaggs-type-origsql]: - /schema/reference/pre-aggregations#parameters-type-originalsql -[ref-schema-ref-preaggs-use-origsql]: - https://cube.dev/docs/schema/reference/pre-aggregations#use-original-sql-pre-aggregations diff --git a/docs/content/Examples-Tutorials-Recipes/Recipes/Upgrading-Cube/Migrating-from-Express-to-Docker.mdx b/docs/content/Examples-Tutorials-Recipes/Recipes/Upgrading-Cube/Migrating-from-Express-to-Docker.mdx deleted file mode 100644 index 81a9bb6d6b5f9..0000000000000 --- a/docs/content/Examples-Tutorials-Recipes/Recipes/Upgrading-Cube/Migrating-from-Express-to-Docker.mdx +++ /dev/null @@ -1,102 +0,0 @@ ---- -title: Migrating from Express to Docker -permalink: /recipes/migrating-from-express-to-docker -category: Examples & Tutorials -subCategory: Upgrading Cube -menuOrder: 13 ---- - -Since [`v0.23`][link-v-023-release], Cube CLI uses the `docker` template instead -of `express` as a default for project creation, and it is the recommended route -for production. To migrate you should move all Cube dependencies in -`package.json` to `devDependencies` and leave dependencies that you use to -configure Cube in `dependencies`. - -For example, your existing `package.json` might look something like: - -```json -{ - "name": "cubejs-app", - "version": "0.0.1", - "private": true, - "scripts": { - "dev": "node index.js" - }, - "dependencies": { - "@cubejs-backend/postgres-driver": "^0.20.0", - "@cubejs-backend/server": "^0.20.0" - } -} -``` - -It should become: - -```json -{ - "name": "cubejs-app", - "version": "0.0.1", - "private": true, - "scripts": { - "dev": "./node_modules/.bin/cubejs-server server" - }, - "dependencies": {}, - "devDependencies": { - "@cubejs-backend/postgres-driver": "^0.23.6", - "@cubejs-backend/server": "^0.23.7" - } -} -``` - -You should also rename your `index.js` file to `cube.js` and replace the -`CubejsServer.create()` call with export of configuration using -`module.exports`. - -For an `index.js` like the following: - -```javascript -const CubejsServer = require('@cubejs-backend/server'); - -const server = new CubejsServer({ - logger: (msg, params) => { - console.log(`${msg}: ${JSON.stringify(params)}`); - }, -}); - -server - .listen() - .then(({ version, port }) => { - console.log(`🚀 Cube API server (${version}) is listening on ${port}`); - }) - .catch((e) => { - console.error('Fatal error during server start: '); - console.error(e.stack || e); - }); -``` - -It should be renamed to `cube.js` and its' contents should look like: - -```javascript -module.exports = { - logger: (msg, params) => { - console.log(`${msg}: ${JSON.stringify(params)}`); - }, -}; -``` - -Finally, add a `docker-compose.yml` file alongside the `cube.js` configuration -file: - -```yaml -version: '2.2' - -services: - cube: - image: cubejs/cube:latest - ports: - - 4000:4000 - env_file: .env - volumes: - - .:/cube/conf -``` - -[link-v-023-release]: https://github.com/cube-js/cube/releases/tag/v0.23.0 diff --git a/docs/content/FAQs/General.mdx b/docs/content/FAQs/General.mdx deleted file mode 100644 index 726b74c648383..0000000000000 --- a/docs/content/FAQs/General.mdx +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: General -permalink: /faqs/general -category: FAQs ---- - -## Is there a row limit on the results of a query? - -The row limit for all query results is set to 10,000 rows by default. You may -specify a row limit up to 50,000 in query parameters for an individual query. If -more rows are needed, we recommend using pagination in your application to -request more rows. - -## Can I try Cube Cloud for free? - -Yes. Cube Cloud provides free [development instances](/cloud/configuration/deployment-types#development-instance) and a set of [paid tiers](https://cube.dev/pricing). Each tier comes with additional features, however, you're welcome to use development instances -indefinitely if they satisfy your needs. - -## What is the difference between CUBEJS_CONCURRENCY and CUBEJS_DB_MAX_POOL? - -`CUBEJS_CONCURRENCY` specifies the maximum number of queries that can be -executed concurrently on your source database. This variable should reflect the -limitations of your database, and will help limit the number of queries that are -sent from cube. - -`CUBEJS_DB_MAX_POOL` allows you to set a maximum number of connection pools to -your database. This only applies to databases that use connection pooling -(Postgresql, Redshift, Clickhouse) and is not applicable to databases without it -(BigQuery, Snowflake). The concurrency limit specified in `CUBEJS_CONCURRENCY` -will supersede the number of connections if it is lower. - -For example, if your database has a hard concurrency limit of 10 that cannot be -changed, but you wish to raise the concurrency limit to 50 you would first set -`CUBEJS_CONCURRENCY=50` and `CUBEJS_DB_MAX_POOL=5` or higher. Note that some -data warehouses (BigQuery, Snowflake) do not use connection pooling so this -parameter is not applicable. diff --git a/docs/content/FAQs/Tips-and-Tricks.mdx b/docs/content/FAQs/Tips-and-Tricks.mdx deleted file mode 100644 index 716bfae1e048b..0000000000000 --- a/docs/content/FAQs/Tips-and-Tricks.mdx +++ /dev/null @@ -1,119 +0,0 @@ ---- -title: Tips and Tricks -permalink: /faqs/tips-and-tricks -category: FAQs ---- - -## How can I read from two different database schemas in my database when I'm only able to select one while connecting? - -Use your first schema when setting up your database connection in Cube Cloud. - -To use your second database schema, update the `CUBE_DB_NAME` environment -variable in **Settings > Configuration**. Change `CUBE_DB_NAME` to the name of -your second schema. - -This will trigger a new build. Once it's completed click on Data -Model in the left hand side navigation, and then in the upper-right -corner, click the three-dot menu and select Generate Data Model. You -should be able to see the name of the second schema from your database and -generate new models. - -## Can I track my customers' query usage? - -You can track query usage by user (or other dimension) by setting up [Log -Export][ref-cloud-o11y-logs] and parsing the necessary information. - -## Can I bypass Row-Level Security when using the SQL API? - -There may be times when you want the permissions through Cube's REST API to be -different from the permissions of the SQL API. - -For example, perhaps your customers use the REST API to access their own data. -You might use row-level security to prevent them from seeing any data associated -with other customers. - -For your internal analytics, you could provide access to your Data Analysts via -the SQL API. Since this is for your internal use, you will need access to all -the data rather than a single customer's. To give yourself higher permissions -through the SQL API, you could create an exception for the usual Row-Level -Security checks. - -In the following data models, we have created some example Row-Level Security -rules and an exception for querying data via the SQL API. - -### Defining basic RLS - -First, in the `cube.js` configuration file, we'll define the -[`queryRewrite()`][ref-conf-ref-queryrewrite] property to push a filter to each -query depending on the `tenantId` within the [Security Context][ref-sec-ctx]. - -```javascript -module.exports = { - queryRewrite: (query, { securityContext }) => { - if (!securityContext.tenantId) { - throw new Error('No id found in Security Context!'); - } else { - query.filters.push({ - member: 'orders.tenant_id', - operator: 'equals', - values: [securityContext.tenantId], - }); - - return query; - } - }, -}; -``` - -With this logic, each tenant can see their data and nothing else. - -### Bypassing RLS for queries created with the SQL API - -When we want to bypass the RLS we defined above, we need to create a sort of -"superuser" only accessible when authenticating via the SQL API. We need to -define two new things for this to work: - -1. Leverage the [`checkSqlAuth()`][ref-conf-ref-checksqlauth] configuration - option to inject a new property into the Security Context that defines a - superuser. In this case, we'll call it `isSuperUser`. - -2. Handle the new `isSuperUser` property in our previously defined - `queryRewrite` to bypass the filter push. - -```javascript -module.exports = { - // Create a "superuser" security context for the SQL API - checkSqlAuth: async (req, username) => { - if (username === process.env.CUBEJS_SQL_USER) { - return { - password: process.env.CUBEJS_SQL_PASSWORD, - securityContext: { isSuperUser: true }, - }; - } - }, - queryRewrite: (query, { securityContext }) => { - // Bypass row-level-security when connected from the SQL API - if (securityContext.isSuperUser) { - return query; - } else if (!securityContext.tenantId) { - throw new Error('No id found in Security Context!'); - } else { - query.filters.push({ - member: 'orders.tenant_id', - operator: 'equals', - values: [securityContext.tenantId], - }); - - return query; - } - }, -}; -``` - -With this exception in place we should be able to query all the customers' data -via the SQL API without being hindered by the row-level security checks. - -[ref-cloud-o11y-logs]: /cloud/workspace/logs -[ref-conf-ref-checksqlauth]: /config#options-reference-check-sql-auth -[ref-conf-ref-queryrewrite]: /config#options-reference-query-rewrite -[ref-sec-ctx]: /security/context diff --git a/docs/content/FAQs/Troubleshooting.mdx b/docs/content/FAQs/Troubleshooting.mdx deleted file mode 100644 index 3da7b1d137ef8..0000000000000 --- a/docs/content/FAQs/Troubleshooting.mdx +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Troubleshooting -permalink: /faqs/troubleshooting -category: FAQs ---- - -## Error: Unsupported db type: undefined - -This error message might mean that there's no `CUBEJS_DB_TYPE` defined. Please -visit **Settings > Configuration** and define this environment variable. - -If this doesn't help, please reach out to us in our support chat, we'd be happy -to help! - -## Error: Internal: deadline has elapsed OR Error: Query execution timeout after 10 min of waiting - -This error happens when a query remains queued for an excessive amount of time. -To troubleshoot, try increasing concurrency and/or timeout limits. The default -concurrency is 4 for most data warehouses and the default timeout is 10 minutes. -You can increase these values by adjusting the `CUBEJS_CONCURRENCY` or -`CUBEJS_DB_TIMEOUT` environment variables in **Settings > Configuration**. If -your timeout limit is already high, we recommend either adding a pre-aggregation -or refactoring your SQL for better efficiency. - -If these methods don't help, please reach out to us in our support chat! - -## Error: Error during create table: CREATE TABLE with pre-aggregations - -This usually means Cube can't create a pre-aggregation table, which could be due -to a few different reasons. Further down the error log, you should see more -details which will help narrow down the scope of the issue. - -| If you see… | The issue is likely… | Recommendation | -| -------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------- | -| `has a key that already exists in Name index` | Multi-tenancy setup missing following configuration setting:
preAggregationsSchema: ({ securityContext }) => `pre_aggregations_${securityContext.tenantId},`
| Update configuration. | -| `Error: Query execution timeout after 10 min of waiting` | Pre-aggregation is too large to be built. | Try enabling [export bucket][ref-caching-using-preaggs-export-bucket] or reducing `partitionGranularity`. | - -For any other error types, feel free to reach out to us in our support chat. - -## Warning: There were queries in these timezones which are not added in the CUBEJS_SCHEDULED_REFRESH_TIMEZONES environment variable. - -If you want your query to use pre-aggregations, you must define all necessary -timezones using either the `CUBEJS_SCHEDULED_REFRESH_TIMEZONES` environment -variable, or in the `cube.js` file: - -```javascript -module.exports = { - // You can define one or multiple timezones based on your requirements - scheduledRefreshTimeZones: ['America/Vancouver', 'America/Toronto'], -}; -``` - -Without this configuration, Cube will default to `UTC`. The warning reflects -Cube's inability to find the query's timezone in the desired pre-aggregation. - -## Cube Cloud API is down after upgrading the version of Cube - -You may roll back to a previous Cube version at any time in **Settings > -Configuration**. - -We always recommend testing new Cube versions in your staging environment before -updating your production environment. We do not recommend setting your -production deployment to the latest version since it will automatically upgrade -to the latest version every time it's released on the next build or settings -update. - -[ref-caching-using-preaggs-export-bucket]: - /caching/using-pre-aggregations#pre-aggregation-build-strategies-export-bucket diff --git a/docs/content/Getting-Started/Cloud/01-Overview.mdx b/docs/content/Getting-Started/Cloud/01-Overview.mdx deleted file mode 100644 index 7bd1e60457f47..0000000000000 --- a/docs/content/Getting-Started/Cloud/01-Overview.mdx +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: Getting started with Cube Cloud -permalink: /getting-started/cloud/overview -category: Getting Started -subCategory: Cube Cloud -menuOrder: 2 ---- - - - -[Join our upcoming Office Hours on July 26 at 9am PST](https://cube.dev/events/unlock-data-cube-snowflake) on Getting Started with Cube Cloud and Snowflake. -Learn how to easily connect Cube Cloud and Snowflake, load your data, and integrate your BI tools. - - - -This getting started guide will show you how to use Cube Cloud with Snowflake. You will learn how to: - -- Load sample data into your Snowflake account -- Connect Cube Cloud to Snowflake -- Create your first Cube data model -- Connect to a BI tool to explore this model -- Create React application with Cube REST API - -## Prerequisites - - -- [Cube Cloud account](https://cubecloud.dev/auth/signup) -- [Snowflake account](https://signup.snowflake.com/) diff --git a/docs/content/Getting-Started/Cloud/02-Load data.mdx b/docs/content/Getting-Started/Cloud/02-Load data.mdx deleted file mode 100644 index 33bb892911242..0000000000000 --- a/docs/content/Getting-Started/Cloud/02-Load data.mdx +++ /dev/null @@ -1,115 +0,0 @@ ---- -title: Load data -permalink: /getting-started/cloud/load-data -category: Getting Started -subCategory: Cube Cloud -menuOrder: 2.1 ---- - - - -[Join our upcoming Office Hours on July 26 at 9am PST](https://cube.dev/events/unlock-data-cube-snowflake) on Getting Started with Cube Cloud and Snowflake. -Learn how to easily connect Cube Cloud and Snowflake, load your data, and integrate your BI tools. - - - -The following steps will guide you through setting up a Snowflake account and uploading the demo dataset, which is stored as CSV files in a public S3 bucket. - -First, let’s create a warehouse, database, and schema. Paste the following SQL into the Editor of the Snowflake worksheet and click Run. - -```sql -CREATE WAREHOUSE cube_demo_wh; -CREATE DATABASE cube_demo; -CREATE SCHEMA cube_demo.ecom; -``` - -We’re going to create four tables in the `ecom` schema and seed them with data from S3. - -First, let’s create `line_items` table. Delete the previous SQL in your Editor and then run the following command. - -```sql -CREATE TABLE cube_demo.ecom.line_items -( id INTEGER, - order_id INTEGER, - product_id INTEGER, - price INTEGER, - created_at TIMESTAMP -); -``` - -Clear all the content in the Editor and run the following command to load data into the `line_items` table. - -``` -COPY INTO cube_demo.ecom.line_items (id, order_id, product_id, price, created_at) -FROM 's3://cube-tutorial/line_items.csv' -FILE_FORMAT = (TYPE = 'CSV' FIELD_DELIMITER = ',' SKIP_HEADER = 1); -``` - -Now, we’re going to repeat these steps for three other tables. - -Run the following command to create the `orders` table. - -```sql -CREATE TABLE cube_demo.ecom.orders -( id INTEGER, - user_id INTEGER, - status VARCHAR, - completed_at TIMESTAMP, - created_at TIMESTAMP -); -``` - -Run the following command to load data into the `orders` table from S3. - -```sql -COPY INTO cube_demo.ecom.orders (id, user_id, status, completed_at, created_at) -FROM 's3://cube-tutorial/orders.csv' -FILE_FORMAT = (TYPE = 'CSV' FIELD_DELIMITER = ',' SKIP_HEADER = 1); - -``` - -Run the following command to create the `users` table. - -```sql -CREATE TABLE cube_demo.ecom.users -( id INTEGER, - user_id INTEGER, - city VARCHAR, - age INTEGER, - gender VARCHAR, - state VARCHAR, - first_name VARCHAR, - last_name VARCHAR, - created_at TIMESTAMP -); - -``` - -Run the following command to load data into the `users` table. - -```sql -COPY INTO cube_demo.ecom.users (id, city, age, gender, state, first_name, last_name, created_at) -FROM 's3://cube-tutorial/users.csv' -FILE_FORMAT = (TYPE = 'CSV' FIELD_DELIMITER = ',' SKIP_HEADER = 1); - -``` - -Run the following command to create the `products` table. - -```sql -CREATE TABLE cube_demo.ecom.products -( id INTEGER, - name VARCHAR, - product_category VARCHAR, - created_at TIMESTAMP -); - -``` - -Run the following command to load data into the `products` table. - -```sql -COPY INTO cube_demo.ecom.products (id, name, created_at, product_category) -FROM 's3://cube-tutorial/products.csv' -FILE_FORMAT = (TYPE = 'CSV' FIELD_DELIMITER = ',' SKIP_HEADER = 1); -``` diff --git a/docs/content/Getting-Started/Cloud/03-Connect-to-Snowflake.mdx b/docs/content/Getting-Started/Cloud/03-Connect-to-Snowflake.mdx deleted file mode 100644 index 507c4f1539897..0000000000000 --- a/docs/content/Getting-Started/Cloud/03-Connect-to-Snowflake.mdx +++ /dev/null @@ -1,84 +0,0 @@ ---- -title: Connect to Snowflake -permalink: /getting-started/cloud/connect-to-snowflake -category: Getting Started -subCategory: Cube Cloud -menuOrder: 3 ---- - - - -[Join our upcoming Office Hours on July 26 at 9am PST](https://cube.dev/events/unlock-data-cube-snowflake) on Getting Started with Cube Cloud and Snowflake. -Learn how to easily connect Cube Cloud and Snowflake, load your data, and integrate your BI tools. - - - -In this section, we’ll create a Cube Cloud deployment and connect it to Snowflake. -A deployment represents a data model, configuration, and managed infrastructure. - -To continue with this guide, you'll need to have a Cube Cloud account. If you -don't have one yet, [click here to sign up][cube-cloud-signup] for free. - -First, [sign in to your Cube Cloud account][cube-cloud-signin]. Then, -click Create Deployment: - -Give the deployment a name, select the cloud provider and region of your choice, -and click Next: - - - - - -Microsoft Azure is available in Cube Cloud on -[Premium](https://cube.dev/pricing) tier. [Contact us](https://cube.dev/contact) -for details. - - - -## Set up a Cube project - -Next, click Create to create a new project from scratch: - - - -## Connect to your Snowflake - -The last step is to connect Cube Cloud to Snowflake. First, select it from the grid: - - - -Then enter your Snowflake credentials: - -- **Username:** Your Snowflake username. Please note, it is usually **not** your email address. -- **Password:** Your Snowflake password. -- **Database:** `CUBE_DEMO`, that is the database we've created in the previous step. -- **Account:** Your snowflake account identifier. You can find it in your Snowflake URL as the `account_locator` part. -- **Region:** Your Snowflake account region. You can find it in your Snowflake URL. If your URL includes a `cloud` part, use both the `cloud_region_id` and `cloud` together e.g. `us-east-2.aws`, otherwise just use `cloud_region_id` -- **Warehouse:** `CUBE_DEMO_WH`, that is the warehouse we've created in the previous step. -- **Role:** You can leave it blank. - -Click Apply, Cube Cloud will test the connection and proceed to the next step. - -## Generate data model from your Snowflake schema - -Cube can now generate a basic data model from your data warehouse schema, which helps getting started with data modeling faster. -Select all four tables in our `ECOM` schema and click through the data model generation wizard. We'll inspect these generated files in the next section and start making changes to them. - -[aws-docs-sec-group]: - https://docs.aws.amazon.com/vpc/latest/userguide/security-groups.html -[aws-docs-sec-group-rule]: - https://docs.aws.amazon.com/vpc/latest/userguide/security-group-rules.html -[cube-cloud-signin]: https://cubecloud.dev/auth -[cube-cloud-signup]: https://cubecloud.dev/auth/signup -[ref-conf-db]: /config/databases -[ref-getting-started-cloud-generate-models]: - /getting-started/cloud/generate-models diff --git a/docs/content/Getting-Started/Cloud/04-Create-data-model.mdx b/docs/content/Getting-Started/Cloud/04-Create-data-model.mdx deleted file mode 100644 index 79a47e02b47af..0000000000000 --- a/docs/content/Getting-Started/Cloud/04-Create-data-model.mdx +++ /dev/null @@ -1,186 +0,0 @@ ---- -title: Create your first data model -permalink: /getting-started/cloud/create-data-model -category: Getting Started -subCategory: Cube Cloud -menuOrder: 4 ---- - - - -[Join our upcoming Office Hours on July 26 at 9am PST](https://cube.dev/events/unlock-data-cube-snowflake) on Getting Started with Cube Cloud and Snowflake. -Learn how to easily connect Cube Cloud and Snowflake, load your data, and integrate your BI tools. - - - -Cube follows a dataset-oriented data modeling approach, which is inspired by and expands upon dimensional modeling. -Cube incorporates this approach and provides a practical framework for implementing dataset-oriented data modeling. - -When building a data model in Cube, you work with two dataset-centric objects: **cubes** and **views**. -**Cubes** usually represent business entities such as customers, line items, and orders. -In cubes, you define all the calculations within the measures and dimensions of these entities. -Additionally, you define relationships between cubes, such as "an order has many line items" or "a user may place multiple orders." - -**Views** sit on top of a data graph of cubes and create a facade of your entire data model, with which data consumers can interact. -You can think of views as the final data products for your data consumers - BI users, data apps, AI agents, etc. -When building views, you select measures and dimensions from different connected cubes and present them as a single dataset to BI or data apps. - - - -## Working with cubes - -To begin building your data model, click on Enter Development Mode in Cube Cloud. This will take you to your personal developer space, where you can safely make changes to your data model without affecting the production environment. - -In the previous section, we generated four cubes from the Snowflake schema. To see the data graph of these four cubes and how they are connected to each other, click the Show Graph button on the Data Model page. - -Let's review the `orders` cube first and update it with additional dimensions and measures. - -Once you are in developer mode, navigate to the Data Model and click on the `orders.yml` file in the left sidebar inside the `model/cubes` directory to open it. - -You should see the following content of `model/cubes/orders.yml` file. - -```yaml -cubes: - - name: orders - sql_table: ECOM.ORDERS - - joins: - - name: users - sql: "{CUBE}.USER_ID = {users}.USER_ID" - relationship: many_to_one - - dimensions: - - name: status - sql: STATUS - type: string - - - name: id - sql: ID - type: number - primary_key: true - - - name: created_at - sql: CREATED_AT - type: time - - - name: completed_at - sql: COMPLETED_AT - type: time - - measures: - - name: count - type: count -``` - -As you can see, we already have a `count` measure that we can use to calculate the total count of our orders. - -Let's add an additional measure to the `orders` cube to calculate only **completed orders**. -The `status` dimension in the `orders` cube reflects the three possible statuses: **processing**, **shipped**, or **completed**. -We will create a new measure `completed_count` by using a filter on that dimension. -To do this, we will use a [filter parameter](/schema/reference/measures#parameters-filters) of the measure -and [refer](/data-modeling/syntax#referring-to-objects) to the existing dimension. - -Add the following measure definition to your `model/cubes/orders.yml` file. It should be included within the `measures` block. - -```yaml -- name: completed_count - type: count - filters: - - sql: "{CUBE}.status = 'completed'" -``` - -With these two measures in place, `count` and `completed_count`, we can create a **derived measure**. Derived measures are measures that you can create based on existing measures. Let's create the `completed_percentage` derived measure. - -Add the following measure definition to your `model/cubes/orders.yml` file within the `measures` block. - -```yaml -- name: completed_percentage - type: number - sql: "({completed_count} / NULLIF({count}, 0)) * 100.0" - format: percent -``` - -Below you can see what your updated `orders` cube should look like with two new measures. Feel free to copy this code and paste it into your `model/cubes/order.yml` file. - -```yaml -cubes: - - name: orders - sql_table: ECOM.ORDERS - - joins: - - name: users - sql: "{CUBE}.USER_ID = {users}.USER_ID" - relationship: many_to_one - - dimensions: - - name: status - sql: STATUS - type: string - - - name: id - sql: ID - type: number - primary_key: true - - - name: created_at - sql: CREATED_AT - type: time - - - name: completed_at - sql: COMPLETED_AT - type: time - - measures: - - name: count - type: count - - - name: completed_count - type: count - filters: - - sql: "{CUBE}.status = 'completed'" - - - name: completed_percentage - type: number - sql: "({completed_count} / NULLIF({count}, 0)) * 100.0" - format: percent -``` - -Click Save All in the upper corner to save changes to the data model. Now, you can navigate to Cube’s Playground. The Playground is a web-based tool that allows you to query your data without connecting any tools or writing any code. It's the fastest way to explore and test your data model. - -You can select measures and dimensions from different cubes in playground, including your newly created `completed_percentage` measure. - -## Working with views - -When building views, we recommend following entity-oriented design and structuring your views around your business entities. Usually, cubes tend to be normalized entities without duplicated or redundant members, while views are denormalized entities where you pick as many measures and dimensions from multiple cubes as needed to describe a business entity. - -Let's create our first view, which will provide all necessary measures and dimensions to explore orders. Views are usually located in the `views` folder and have a `_view` postfix. - -Create `model/views/orders_view.yml` with the following content: - -```yaml -views: - - name: orders_view - - cubes: - - join_path: orders - includes: - - status - - created_at - - count - - completed_count - - completed_percentage - - - join_path: orders.users - prefix: true - includes: - - city - - age - - state -``` - -When building views, you can leverage the `cubes` parameter, which enables you to include measures and dimensions from other cubes in the view. You can build your view by combining multiple joined cubes and specifying the path by which they should be joined for that particular view. - -After saving, you can experiment with your newly created view in the Playground. In the next section, we will learn how to query our `orders_view` using a BI tool. diff --git a/docs/content/Getting-Started/Cloud/05-Add-a-pre-aggregation.mdx b/docs/content/Getting-Started/Cloud/05-Add-a-pre-aggregation.mdx deleted file mode 100644 index 2fd3a15ed4cf2..0000000000000 --- a/docs/content/Getting-Started/Cloud/05-Add-a-pre-aggregation.mdx +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: Add a pre-aggregation -permalink: /getting-started/cloud/add-a-pre-aggregation -category: NON-EXISTENT -subCategory: Cube Cloud -menuOrder: 6 ---- - - - -[Join our upcoming Office Hours on July 26 at 9am PST](https://cube.dev/events/unlock-data-cube-snowflake) on Getting Started with Cube Cloud and Snowflake. -Learn how to easily connect Cube Cloud and Snowflake, load your data, and integrate your BI tools. - - - -In this step, we'll add a pre-aggregation to optimize the performance of a -specific query. Pre-aggregations are a caching technique that massively reduces -query time from seconds to milliseconds. They are extremely useful for speeding -up queries that are run frequently. - -From the Developer Playground page, execute a query: - - - -Just above the results, click on Query was not accelerated with -pre-aggregation to bring up the Rollup Designer: - - - -The Rollup Designer will automatically suggest a pre-aggregation for the query; -click Add to the Data Model and then retry the query in the -Playground. This time, the query should be accelerated with a pre-aggregation. - -It takes a bit of time to build a pre-aggregation, so the first run might not -necessarily fast. Click Rerun query a few times until you see a -subsecond query time. - - - -And with that, we conclude our Getting Started with Cube Cloud guide. If you'd -like to learn more about Cube Cloud, [check out this page][next]. - -[next]: /getting-started/cloud/learn-more diff --git a/docs/content/Getting-Started/Cloud/05-Query-from-BI.mdx b/docs/content/Getting-Started/Cloud/05-Query-from-BI.mdx deleted file mode 100644 index 45789661ad3fc..0000000000000 --- a/docs/content/Getting-Started/Cloud/05-Query-from-BI.mdx +++ /dev/null @@ -1,94 +0,0 @@ ---- -title: Query from BI -permalink: /getting-started/cloud/query-from-BI -category: Getting Started -subCategory: Cube Cloud -menuOrder: 5 ---- - - - -[Join our upcoming Office Hours on July 26 at 9am PST](https://cube.dev/events/unlock-data-cube-snowflake) on Getting Started with Cube Cloud and Snowflake. -Learn how to easily connect Cube Cloud and Snowflake, load your data, and integrate your BI tools. - - - -You can query Cube using a BI or visualization tool through the Cube SQL API. -To provide a good end-user experience in your BI tool, we recommend mapping the BI's data model to Cube's semantic layer. -This can be done automatically with Semantic Layer Sync or manually. - -## Semantic Layer Sync - -Semantic Layer Sync programmatically connects a BI tool to Cube and creates or updates BI-specific entities -that correspond to entities within the data model in Cube, such as cubes, views, measures, and dimensions. - - - -Semantic Layer Sync will synchronize all public cubes and views with connected BI tools. -We recommend making your cubes private and only exposing views. Both cubes and views are public by default. -To make cubes private, set the [public](/schema/reference/cube#parameters-public) parameter to `false`. - -```yaml -cubes: - - name: orders - sql_table: ECOM.ORDERS - public: false -``` - -Let’s create our first Semantic Layer Sync with [Apache Superset](https://superset.apache.org/)! - -You can create a new sync by navigating to the Semantic Layer Sync tab on the BI Integrations page and clicking + Create Sync. -Follow the steps in the wizard to create a sync. - -Under the hood, Semantic Layer Sync is configured using the `semanticLayerSync` option in the `cube.js` configuration file. - -Cube uses the Superset API, which requires a `user` and `password` for authentication. -You can use your own username and password or create a new service account. You can copy a `URL` from any page of your Superset workspace. - -Example `cube.js` configuration file for Superset: - -```yaml -module.exports = { - semanticLayerSync: () => { - return [{ - type: "superset", - name: "Superset Sync", - config: { - user: "mail@example.com", - password: "4dceae-606a03-93ae6dc7", - url: "superset.example.com", - } - }]; - } -}; -``` - -Replace the fields for user, password, and URL with your Superset credentials, then click on Save All. -You can now go to the BI Integrations page and trigger the synchronization of your newly created semantic layer. - -After running the sync, navigate to your Superset instance. You should see the `orders_view` dataset that was created in Superset. -Cube automatically maps all metrics and dimensions in Superset to measures and dimensions in the Cube data model. - -## Manual Setup - -Alternatively, you can connect to Cube and create all the mappings manually. -To do this, navigate to your Apache Superset instance and connect to Cube Cloud as if it were a Postgres database. - -You can find the credentials to connect to Cube on the BI Integrations page under the SQL API Connection tab. - -After connecting, create a new dataset in Superset and select "orders_view" as a table. -Now you can map Superset metrics and columns to Cube's measures and dimensions. - - - -As you can see, we use the `MEASURE` function in the "SQL expression" field. This function informs Cube that we are querying the measure and that it should be evaluated based on Cube's data model. You can now query Cube from Superset, as shown in the image below. - - - -In the next section, we will learn how to use Cube's REST API to query our view from a React app. diff --git a/docs/content/Getting-Started/Cloud/06-Learn-more.mdx b/docs/content/Getting-Started/Cloud/06-Learn-more.mdx deleted file mode 100644 index 1b23bbb0b015a..0000000000000 --- a/docs/content/Getting-Started/Cloud/06-Learn-more.mdx +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: Learn more -permalink: /getting-started/cloud/learn-more -category: Getting Started -subCategory: Cube Cloud -menuOrder: 7 ---- - - - -[Join our upcoming Office Hours on July 26 at 9am PST](https://cube.dev/events/unlock-data-cube-snowflake) on Getting Started with Cube Cloud and Snowflake. -Learn how to easily connect Cube Cloud and Snowflake, load your data, and integrate your BI tools. - - - -Now that you've set up your first deployment, learn more about other Cube Cloud -feaures. - -## Access Control - -Invite your team to collaborate in Cube Cloud and fine-tune their access to -deployments and features with -[role-based access control](/cloud/access-control/). You can create and manage -multiple deployments, each with their own set of users. - -## Team collaboration - -Cube Cloud has a web-based [data model editor](/cloud/workspace/cube-ide) that -can be used to make changes to data models and test them in a development -environment. - -You can also connect [Cube Cloud to Git](/cloud/deploys) to sync and deploy from -your repository. - -## Observability - -The [Queries](/cloud/inspecting-queries) page in Cube Cloud provides query-level -observability, allowing to detect and remedy performance issues with -pre-aggregations where possible. - -Cube Cloud also allows [setting up alerts](/cloud/workspace/alerts), so you can -be notified if something goes wrong with your deployment. diff --git a/docs/content/Getting-Started/Cloud/06-Query-from-React.mdx b/docs/content/Getting-Started/Cloud/06-Query-from-React.mdx deleted file mode 100644 index 63e4c8169161d..0000000000000 --- a/docs/content/Getting-Started/Cloud/06-Query-from-React.mdx +++ /dev/null @@ -1,77 +0,0 @@ ---- -title: Query from React app -permalink: /getting-started/cloud/query-from-react-app -category: Getting Started -subCategory: Cube Cloud -menuOrder: 6 ---- - -Cube offers both [REST](/http-api/rest) and [GraphQL](/http-api/graphql) APIs, which can be used to query data from applications built in React or other frontend frameworks. - -You can find your REST API endpoint on the Overview page. In development mode, Cube creates an isolated endpoint for testing data model changes without affecting production. -The structure of your REST API endpoint in development mode should follow the format below. - -```yaml -https://..cubecloudapp.dev/dev-mode//cubejs-api/v1 -``` - -To test your REST API from your terminal, you can use [curl](https://curl.se/). Click on “How to connect your application” next to the REST API, and it will display a code snippet that you can run in your terminal to test the endpoint with curl. - - - -Cube offers a frontend JavaScript SDK, as well as a React integration that you can use in your application. - -First, you’ll need to install two packages from `npm`: - -- [@cubejs-client/core](https://www.npmjs.com/package/@cubejs-client/core) -- [@cubejs-client/react](https://www.npmjs.com/package/@cubejs-client/react) - -Next, initialize `cubejsApi` within your application. - -Please note that you must sign your request with the correct authentication token. Cube uses the [JSON Web Token (JWT)](https://jwt.io/) standard by default to authenticate requests. You can copy a temporary token from the "How to connect to your application" modal window. For production use, you must generate this token from your secret key. You can learn more about this in the [Authentication & Authorization](/security) section of the documentation. - -```jsx -import cubejs from '@cubejs-client/core'; - -const cubejsApi = cubejs( - 'your-token', - { apiUrl: 'https://..cubecloudapp.dev/dev-mode//cubejs-api/v1' } -); -``` - -The Cube React package includes a `CubeProvider` that can be used in your React application. - -```jsx -import { CubeProvider } from '@cubejs-client/react'; - - - // your application - -``` - -Finally, you can use the `useCubeQuery` hook to load data from Cube into your React application. - -```jsx -import { useCubeQuery } from '@cubejs-client/react'; -... -const { resultSet, isLoading, error, progress } = useCubeQuery({ - "measures": ["orders_view.completed_count"], - "timeDimensions": [ - { - "dimension": "orders_view.created_at", - "granularity": "month" - } - ] -}); -``` - -For more information on the Cube JavaScript frontend package and integration with React, please refer to the documentation. - -You can also explore example applications built with React on top of the Cube REST API, along with their source code. - -- [React with Highcharts](https://highcharts-demo.cube.dev/#/) -- [React with AG Grid](https://react-pivot-table-demo.cube.dev/#/) -- [React query builder](https://react-dashboard-demo.cube.dev/#/) \ No newline at end of file diff --git a/docs/content/Getting-Started/Core/01-Overview.mdx b/docs/content/Getting-Started/Core/01-Overview.mdx deleted file mode 100644 index 55734d4d9afc6..0000000000000 --- a/docs/content/Getting-Started/Core/01-Overview.mdx +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: Getting started with Cube Core -permalink: /getting-started/core/overview -category: Getting Started -subCategory: Cube Core -menuOrder: 2 -redirect_from: - - /getting-started-docker - - /getting-started/docker ---- - -First, we'll create a new project, connect it to a database and generate a data -model from it. Then, we'll run queries using the Developer Playground and APIs. -Finally, we'll add a pre-aggregation to optimize query latency down to -milliseconds. - -This guide will walk you through the following tasks: - -- [Create a new project](/getting-started/core/create-a-project) -- [Run queries using the Developer Playground and APIs](/getting-started/core/query-data) -- [Add a pre-aggregation to optimize query performance](/getting-started/core/add-a-pre-aggregation) - -If you'd prefer to try Cube Cloud, then you can refer to [Getting Started using -Cube Cloud][ref-getting-started-cloud-overview] instead. - -[ref-getting-started-cloud-overview]: /getting-started/cloud/overview diff --git a/docs/content/Getting-Started/Core/02-Create-a-project.mdx b/docs/content/Getting-Started/Core/02-Create-a-project.mdx deleted file mode 100644 index 4628185010b2c..0000000000000 --- a/docs/content/Getting-Started/Core/02-Create-a-project.mdx +++ /dev/null @@ -1,118 +0,0 @@ ---- -title: Create a project -permalink: /getting-started/core/create-a-project -category: Getting Started -subCategory: Cube Core -menuOrder: 3 ---- - -In this step, we will create a Cube Core project on your computer, connect a -data source, and generate data models. - -## Scaffold a project - -Start by opening your terminal to create a new folder for the project, then -create a `docker-compose.yml` file within it: - -```bash{promptUser: user} -mkdir my-first-cube-project && cd $_ -touch docker-compose.yml -``` - -Open the `docker-compose.yml` file and add the following content: - -```yaml -version: '2.2' - -services: - cube: - image: cubejs/cube:latest - ports: - - 4000:4000 - - 15432:15432 - environment: - - CUBEJS_DEV_MODE=true - volumes: - - .:/cube/conf -``` - -Note that we're setting the `CUBEJS_DEV_MODE` environment variable to `true` to -enable the [Development Mode](/configuration/overview#development-mode). This is -handy for local development but not suitable for -[production](/deployment/production-checklist). - - - -If you're using Linux as the Docker host OS, you'll also need to add -`network_mode: 'host'` to your `docker-compose.yml`. - - - -## Start the development server - -From the newly-created project directory, run the following command to start -Cube: - -```bash{promptUser: user} -docker compose up -d -``` - - - -Using Windows? Remember to use [PowerShell][powershell-docs] or -[WSL2][wsl2-docs] to run the command below. - - - -## Connect a data source - -Head to [http://localhost:4000](http://localhost:4000) to open the [Developer -Playground][ref-devtools-playground]. - -The Playground has a database connection wizard that loads when Cube is first -started up and no `.env` file is found. After database credentials have been set -up, an `.env` file will automatically be created and populated with credentials. - - - - - -Want to use a sample database instead? Select PostgreSQL and use the -credentials below: - -
- -| Field | Value | -| -------- | ------------------ | -| Host | `demo-db.cube.dev` | -| Port | `5432` | -| Database | `ecom` | -| Username | `cube` | -| Password | `12345` | - -
- -After selecting the data source, enter valid credentials for it and -click Apply. Check the [Connecting to Databases][ref-conf-db] -page for more details on specific data sources. - - - -You should see tables available to you from the configured database; select the -`orders` table. After selecting the table, click Generate Data -Model and pick either YAML (recommended) -or JavaScript format: - - - -Finally, click Build in the dialog, which should take you to -the Build page. - -You're now ready for the next step, [querying the -data][ref-getting-started-core-query-cube]. - -[powershell-docs]: https://learn.microsoft.com/en-us/powershell/ -[ref-conf-db]: /config/databases -[ref-getting-started-core-query-cube]: /getting-started/core/query-data -[ref-devtools-playground]: /dev-tools/dev-playground -[wsl2-docs]: https://learn.microsoft.com/en-us/windows/wsl/install diff --git a/docs/content/Getting-Started/Core/03-Query-data.mdx b/docs/content/Getting-Started/Core/03-Query-data.mdx deleted file mode 100644 index 26dc83032cf86..0000000000000 --- a/docs/content/Getting-Started/Core/03-Query-data.mdx +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: Query data -permalink: /getting-started/core/query-data -category: Getting Started -subCategory: Cube Core -menuOrder: 4 ---- - -In this step, you will learn how to query your data using the data models you -created in the previous step. Cube provides several ways to query your data, and -we'll go over them here. - -## Playground - -[Playground](/dev-tools/dev-playground) is a web-based tool which allows for -model generation and data exploration. On the Build tab, you can -select the measures and dimensions, and then run the query. Let's do this for -the `orders` cube you generated in the previous step. - -Click + Measure to display the available measures and add -`orders.count`, then click + Dimension for available dimensions -and add `orders.status`: - - - -Then, click Run to execute the query and see the results: - - - -Please feel free to experiment: select other measures or dimensions, pick a -granularity for the time dimension instead of w/o grouping, or -choose another chart type instead of Table. - -## APIs and integrations - -Cube provides a [rich set of options][ref-downstream] to deliver data to other -tools: a suite of APIs, [JavaScript SDKs][ref-frontend-int], and integrations. - -Connectivity to BI tools and data notebooks is enabled by the [SQL -API][ref-sql-api] which is Postgres-compatible: if something connects to -Postgres, it will work with Cube. Check the Connect to BI tab -for connection instructions for specific BI tools: - - - -Connectivity to data applications is enabled by the [REST API][ref-rest-api] and -the [GraphQL API][ref-graphql-api] as well as [JavaScript -SDKs][ref-frontend-int]. Check the Frontend Integrations tab for -usage instructions for these APIs: - - - -Now that we've seen how to use Cube's APIs, let's take a look at [how to add -pre-aggregations][next] to speed up your queries. - -[ref-graphql-api]: /backend/graphql -[ref-rest-api]: /rest-api -[ref-downstream]: /config/downstream -[ref-frontend-int]: /frontend-introduction -[ref-sql-api]: /backend/sql -[next]: /getting-started/core/add-a-pre-aggregation diff --git a/docs/content/Getting-Started/Core/04-Add-a-pre-aggregation.mdx b/docs/content/Getting-Started/Core/04-Add-a-pre-aggregation.mdx deleted file mode 100644 index b40e196ade0c1..0000000000000 --- a/docs/content/Getting-Started/Core/04-Add-a-pre-aggregation.mdx +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Add a pre-aggregation -permalink: /getting-started/core/add-a-pre-aggregation -category: Getting Started -subCategory: Cube Core -menuOrder: 5 ---- - -In this step, we'll add a pre-aggregation to optimize the performance of a -specific query. Pre-aggregations are a caching technique that massively reduces -query time from seconds to milliseconds. They are extremely useful for speeding -up queries that are run frequently. - -From the Build tab, execute a query: - - - -Just above the results, click on Query was not accelerated with -pre-aggregation to bring up the Rollup Designer: - - - -The Rollup Designer will automatically suggest a pre-aggregation for the query; -click Add to the Data Model and then retry the query in the -Playground. This time, the query should be accelerated with a pre-aggregation: - - - -And with that, we conclude our Getting Started with Cube guide. If you'd like to -learn more about Cube, [check out this page][next]. - -[next]: /getting-started/core/learn-more diff --git a/docs/content/Getting-Started/Core/05-Learn-more.mdx b/docs/content/Getting-Started/Core/05-Learn-more.mdx deleted file mode 100644 index 9bd0238b9e13d..0000000000000 --- a/docs/content/Getting-Started/Core/05-Learn-more.mdx +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Learn more -permalink: /getting-started/core/learn-more -category: Getting Started -subCategory: Cube Core -menuOrder: 6 ---- - -Now that you've set up your first project, learn more about what else Cube can -do for you. - -## Data Modeling - -Learn more about [data modeling](/schema/fundamentals/concepts) and how to -effectively define metrics in your data models. Advanced users: check out -[related recipes](/recipes#recipes-data-schema) for tackling specific data -modeling problems. - -## Querying - -Cube can be queried in a variety of ways. Explore how to use -[REST API](/rest-api), [GraphQL API](/backend/graphql), and -[SQL API](/backend/sql), or how to -[connect a BI or data visualization tool](/config/downstream). - -## Caching - -Learn more about the [two-level cache](/docs/caching) and how -[pre-aggregations help speed up queries](/caching/pre-aggregations/getting-started). -For a deeper dive, take a look at the -[related recipes](/recipes#recipes-query-acceleration). - -## Access Control - -Cube uses [JSON Web Tokens](https://jwt.io/) to -[authenticate requests for the HTTP APIs](/security), and -[`checkSqlAuth()`](/config#check-sql-auth) to -[authenticate requests for the SQL API](/backend/sql/security). Cube also -supports using JWT claims for securing access to data, check out these -[related recipes](/recipes#recipes-access-control). diff --git a/docs/content/Getting-Started/Migrate-from-Core/Import-Bitbucket-repository-via-SSH.mdx b/docs/content/Getting-Started/Migrate-from-Core/Import-Bitbucket-repository-via-SSH.mdx deleted file mode 100644 index f2969e485a703..0000000000000 --- a/docs/content/Getting-Started/Migrate-from-Core/Import-Bitbucket-repository-via-SSH.mdx +++ /dev/null @@ -1,210 +0,0 @@ ---- -title: 'Import a Bitbucket repository to Cube Cloud' -permalink: /cloud/getting-started/ssh/bitbucket ---- - -This guide walks you through setting up Cube Cloud, importing a -[Bitbucket][bitbucket] repository with an existing Cube project via SSH, and -connecting to your database. - -## Step 1: Create an account - -Navigate to [cubecloud.dev](https://cubecloud.dev/), and create a new Cube Cloud -account. - -## Step 2: Create a new Deployment - -Click Create Deployment. This is the first step in the -deployment creation. Give it a name and select the cloud provider and region of -your choice. - -
- Cube Cloud Create Deployment Screen -
- - - -Microsoft Azure is available in Cube Cloud on -[Premium](https://cube.dev/pricing) tier. [Contact us](https://cube.dev/contact) -for details. - - - -## Step 3: Import Git repository - -Next up, the second step in creating a Cube App from scratch in Cube Cloud is to -click Import Git repository via SSH. - -
- Cube Cloud Upload Project Screen -
- -Now go to your Bitbucket repository and on the Clone dialog, -switch to SSH and copy the URL: - -
- Getting the repository's SSH URL from Bitbucket -
- -Back in Cube Cloud, paste the URL and click Generate SSH key: - -
- Getting SSH key from Cube Cloud -
- -Now copy the SSH key and go back to Bitbucket and into the repository's -settings. Click Access keys from the navigation, then click - Add key button. Give the key a label (`Cube Cloud`, for -example) and paste the SSH key in the relevant field: - -
- Add Cube Cloud deploy key to Bitbucket -
- -Click Add SSH key to save, then go back to Cube Cloud and -click Connect. After a connection is successfully established, -you should see the next screen: - -
- Getting webhook URL from Cube Cloud -
- -Copy the Cube Cloud Git Webhook URL, go to your Bitbucket -repository's settings, and click Webhooks from the navigation. -Click Add webhook, then add a title (`Cube Cloud`, for example). -Paste the URL into the correct field, ensure the Repository > -Push trigger is checked and click Save. - -
- Add Cube Cloud webhook to Bitbucket -
- -Back in Cube Cloud, click Connect to test the webhook. - -## Step 4: Connect your Database - -Enter your credentials to connect to your database. Check the [connecting to -databases][link-connecting-to-databases] guide for more details. - - - -Want to use a sample database instead? We also have a sample database where you -can try out Cube Cloud: - -
- -| Field | Value | -| -------- | ------------------ | -| Host | `demo-db.cube.dev` | -| Port | `5432` | -| Database | `ecom` | -| Username | `cube` | -| Password | `12345` | - -
- -
- Cube Cloud Setup Database Screen -
- -In the UI it'll look exactly like the image below. - -
- Cube Cloud Setup Database Screen -
- -If you run into issues here, make sure to allow the Cube Cloud IPs to access -your database. This means you need to enable these IPs in your firewall. If you -are using AWS, this would mean adding a security group with allowed IPs. - -## Step 5: Generate the Data Model - -Step five in this case consists of generating data models. Start by selecting -the database tables to generate the data models from, then -hit Generate. - -
- Cube Cloud Setup Database Screen -
- -Cube Cloud will generate the data models and spin up your Cube deployment. With -this, you're done. You've created a Cube deployment, configured a database -connection, and generated data models! - -
- Cube Cloud Setup Database Screen -
- -You're ready for the last step, running queries in the Playground. - -## Step 6: Try out Cube Cloud - -Now you can navigate to Playground to try out your queries or connect your -application to the Cube Cloud API. - -
- Cube Cloud Playground -
- -[bitbucket]: https://bitbucket.org/ -[link-connecting-to-databases]: /config/databases diff --git a/docs/content/Getting-Started/Migrate-from-Core/Import-Git-repository-via-SSH.mdx b/docs/content/Getting-Started/Migrate-from-Core/Import-Git-repository-via-SSH.mdx deleted file mode 100644 index 9fe7ff1d21ea2..0000000000000 --- a/docs/content/Getting-Started/Migrate-from-Core/Import-Git-repository-via-SSH.mdx +++ /dev/null @@ -1,172 +0,0 @@ ---- -title: 'Import a Git repository to Cube Cloud' -permalink: /cloud/getting-started/ssh/git ---- - -This guide walks you through setting up Cube Cloud, importing a generic Git -repository with an existing Cube project via SSH, and connecting to your -database. - -## Step 1: Create an account - -Navigate to [cubecloud.dev](https://cubecloud.dev/), and create a new Cube Cloud -account. - -## Step 2: Create a new Deployment - -Click Create Deployment button. This is the first step in the -deployment creation. Give it a name and select the cloud provider and region of -your choice. - -
- Cube Cloud Create Deployment Screen -
- - - -Microsoft Azure is available in Cube Cloud on -[Premium](https://cube.dev/pricing) tier. [Contact us](https://cube.dev/contact) -for details. - - - -## Step 3: Import Git repository - -Next up, the second step in creating a Cube App from scratch in Cube Cloud is to -click Import Git repository via SSH button. - -
- Cube Cloud Upload Project Screen -
- -Enter the SSH URL of your Git repository, and click Generate SSH -key: - -
- Getting SSH key from Cube Cloud -
- -Copy the SSH key and add it to your hosted Git repository. Back in Cube Cloud, -click Connect. After a connection is successfully established, -you should see the next screen: - -
- Getting webhook URL from Cube Cloud -
- -Copy the Cube Cloud Git Webhook URL and add it your hosted Git -repository's webhooks. Ensure that the Git repository can push events which -should trigger a build on Cube Cloud. Back in Cube Cloud, -click Connect to test the webhook. - -## Step 4: Connect your Database - -Enter your credentials to connect to your database. Check the [connecting to -databases][link-connecting-to-databases] guide for more details. - - - -Want to use a sample database instead? We also have a sample database where you -can try out Cube Cloud: - -
- -| Field | Value | -| -------- | ------------------ | -| Host | `demo-db.cube.dev` | -| Port | `5432` | -| Database | `ecom` | -| Username | `cube` | -| Password | `12345` | - -
- -
- Cube Cloud Setup Database Screen -
- -In the UI it'll look exactly like the image below. - -
- Cube Cloud Setup Database Screen -
- -If you run into issues here, make sure to allow the Cube Cloud IPs to access -your database. This means you need to enable these IPs in your firewall. If you -are using AWS, this would mean adding a security group with allowed IPs. - -## Step 5: Generate the Data Model - -Step five in this case consists of generating data models. Start by selecting -the database tables to generate the data models from, then -hit Generate. - -
- Cube Cloud Setup Database Screen -
- -Cube Cloud will generate the data models and spin up your Cube deployment. With -this, you're done. You've created a Cube deployment, configured a database -connection, and generated data models! - -
- Cube Cloud Setup Database Screen -
- -You're ready for the last step, running queries in the Playground. - -## Step 6: Try out Cube Cloud - -Now you can navigate to Playground to try out your queries or connect your -application to the Cube Cloud API. - -
- Cube Cloud Playground -
- -[link-connecting-to-databases]: /config/databases diff --git a/docs/content/Getting-Started/Migrate-from-Core/Import-GitHub-repository.mdx b/docs/content/Getting-Started/Migrate-from-Core/Import-GitHub-repository.mdx deleted file mode 100644 index 3e75d21366193..0000000000000 --- a/docs/content/Getting-Started/Migrate-from-Core/Import-GitHub-repository.mdx +++ /dev/null @@ -1,149 +0,0 @@ ---- -title: 'Import a GitHub repository to Cube Cloud' -permalink: /cloud/getting-started/github ---- - -This guide walks you through setting up Cube Cloud, importing a [GitHub][github] -repository with an existing Cube project, and connecting to your database. - -## Step 1: Create an account - -Navigate to [cubecloud.dev](https://cubecloud.dev/), and create a new Cube Cloud -account. - -## Step 2: Create a new Deployment - -Click Create Deployment. This is the first step in the deployment -creation. Give it a name and select the cloud provider and region of your -choice. - -
- Cube Cloud Create Deployment Screen -
- - - -Microsoft Azure is available in Cube Cloud on -[Premium](https://cube.dev/pricing) tier. [Contact us](https://cube.dev/contact) -for details. - - - -## Step 3: Import repository from GitHub - -Next up, click Import from a GitHub repository. This will pop open a -GitHub app authorization dialog where you must first install the Cube Cloud -GitHub app to the desired organization. After authorizing Cube Cloud to access -your GitHub repository, you should see the following screen in Cube Cloud: - -
- Cube Cloud Import from GitHub screen -
- -Select your repository and the desired branch to use as the default. If the -repository is a monorepo, you should also provide the path to the Cube project -within it. Deployments connected to a Git monorepo will trigger a rebuild -**only** when committed files begin with the provided path. - -## Step 4: Connect your Database - -Enter your credentials to connect to your database. Check the [connecting to -databases][link-connecting-to-databases] guide for more details. - - - -Want to use a sample database instead? We also have a sample database where you -can try out Cube Cloud: - -
- -| Field | Value | -| -------- | ------------------ | -| Host | `demo-db.cube.dev` | -| Port | `5432` | -| Database | `ecom` | -| Username | `cube` | -| Password | `12345` | - -
- -
- Cube Cloud Setup Database Screen -
- -In the UI it'll look exactly like the image below. - -
- Cube Cloud Setup Database Screen -
- -If you run into issues here, make sure to allow the Cube Cloud IPs to access -your database. This means you need to enable these IPs in your firewall. If you -are using AWS, this would mean adding a security group with allowed IPs. - -## Step 5: Generate the Data Model - -Step four in this case consists of generating data models. Start by selecting -the database tables to generate the data models from, then -hit Generate. - -
- Cube Cloud Setup Database Screen -
- -Cube Cloud will generate the data models and spin up your Cube deployment. With -this, you're done. You've created a Cube deployment, configured a database -connection, and generated data models! - -
- Cube Cloud Setup Database Screen -
- -You're ready for the last step, running queries in the Playground. - -## Step 6: Try out Cube Cloud - -Now you can navigate to Playground to try out your queries or connect your -application to Cube Cloud API. - -
- Cube Cloud Playground -
- -[github]: https://github.com -[link-connecting-to-databases]: /config/databases diff --git a/docs/content/Getting-Started/Migrate-from-Core/Import-GitLab-repository-via-SSH.mdx b/docs/content/Getting-Started/Migrate-from-Core/Import-GitLab-repository-via-SSH.mdx deleted file mode 100644 index 39aef5789f779..0000000000000 --- a/docs/content/Getting-Started/Migrate-from-Core/Import-GitLab-repository-via-SSH.mdx +++ /dev/null @@ -1,210 +0,0 @@ ---- -title: 'Import a GitLab repository to Cube Cloud' -permalink: /cloud/getting-started/ssh/gitlab ---- - -This guide walks you through setting up Cube Cloud, importing a [GitLab][gitlab] -repository with an existing Cube project via SSH, and connecting to your -database. - -## Step 1: Create an account - -Navigate to [cubecloud.dev](https://cubecloud.dev/), and create a new Cube Cloud -account. - -## Step 2: Create a new Deployment - -Click Create Deployment. This is the first step in the deployment -creation. Give it a name and select the cloud provider and region of your -choice. - -
- Cube Cloud Create Deployment Screen -
- - - -Microsoft Azure is available in Cube Cloud on -[Premium](https://cube.dev/pricing) tier. [Contact us](https://cube.dev/contact) -for details. - - - -## Step 3: Import Git repository - -Next up, the second step in creating a Cube App from scratch in Cube Cloud is to -click Import Git repository via SSH. - -
- Cube Cloud Upload Project Screen -
- -Now go to your GitLab repository and from the Clone dropdown menu, -copy the Clone with SSH URL: - -
- Getting the repository's SSH URL from GitLab -
- -Back in Cube Cloud, paste the URL and click Generate SSH key: - -
- Getting SSH key from Cube Cloud -
- -Now copy the SSH key and go back to GitLab and paste it into the repository's -settings. Find the Deploy keys section and click Expand. -Give the key a title (`Cube Cloud`, for example) and paste the SSH key in the -relevant field: - -
- Add Cube Cloud deploy key to GitLab -
- -Ensure Grant write permissions to this key is checked, then -click Add key. Go back to Cube Cloud and -click Connect. After a connection is successfully established, -you should see the next screen: - -
- Getting webhook URL from Cube Cloud -
- -Copy the Cube Cloud Git Webhook URL and go to your GitLab -project's Webhooks settings. Paste the URL into the correct -field, ensure the Push events trigger is checked and -click Add webhook. - -
- Add Cube Cloud webhook to GitLab -
- -Back in Cube Cloud, click Connect to test the webhook. - -## Step 4: Connect your Database - -Enter your credentials to connect to your database. Check the [connecting to -databases][link-connecting-to-databases] guide for more details. - - - -Want to use a sample database instead? We also have a sample database where you -can try out Cube Cloud: - -
- -| Field | Value | -| -------- | ------------------ | -| Host | `demo-db.cube.dev` | -| Port | `5432` | -| Database | `ecom` | -| Username | `cube` | -| Password | `12345` | - -
- -
- Cube Cloud Setup Database Screen -
- -In the UI it'll look exactly like the image below. - -
- Cube Cloud Setup Database Screen -
- -If you run into issues here, make sure to allow the Cube Cloud IPs to access -your database. This means you need to enable these IPs in your firewall. If you -are using AWS, this would mean adding a security group with allowed IPs. - -## Step 5: Generate the Data Model - -Step five in this case consists of generating data models. Start by selecting -the database tables to generate the data models from, then -hit Generate. - -
- Generating data models for a new Cube Cloud deployment -
- -Cube Cloud will generate the data models and spin up your Cube deployment. With -this, you're done. You've created a Cube deployment, configured a database -connection, and generated data models! - -
- Cube Cloud Playground -
- -You're ready for the last step, running queries in the Playground. - -## Step 6: Try out Cube Cloud - -Now you can navigate to Playground to try out your queries or connect your -application to the Cube Cloud API. - -
- Cube Cloud Playground -
- -[gitlab]: https://gitlab.com/ -[link-connecting-to-databases]: /config/databases diff --git a/docs/content/Getting-Started/Migrate-from-Core/Upload-with-CLI.mdx b/docs/content/Getting-Started/Migrate-from-Core/Upload-with-CLI.mdx deleted file mode 100644 index cafafaf9b26f0..0000000000000 --- a/docs/content/Getting-Started/Migrate-from-Core/Upload-with-CLI.mdx +++ /dev/null @@ -1,99 +0,0 @@ ---- -title: 'Import a local project to Cube Cloud with CLI' -permalink: /cloud/getting-started/cli ---- - -This guide walks you through setting up Cube Cloud, importing an existing Cube -project from a local machine via SSH, and connecting to your database. - - - -## Step 1: Create Deployment - -Click Create Deployment. Then give it a name and select the -cloud provider and region of your choice. - -
- Cube Cloud Create Deployment Screen -
- - - -Microsoft Azure is available in Cube Cloud on -[Premium](https://cube.dev/pricing) tier. [Contact us](https://cube.dev/contact) -for details. - - - -## Step 2: Upload your Cube project - -The next step is to upload your existing Cube project to the Cube Cloud. - -You can do it by running the following command from terminal in your Cube -project directory. - -```bash{promptUser: user} -npx cubejs-cli deploy --token -``` - -
- Cube Cloud Upload Project Screen -
- -## Step 3: Connect your Database - -Enter your credentials to connect to your database. Check the [connecting to -databases][ref-cloud-connecting-to-databases] guide for more details. - - - -Want to use a sample database instead? We also have a sample database where you -can try out Cube Cloud: - -
- -| Field | Value | -| -------- | ------------------ | -| Host | `demo-db.cube.dev` | -| Port | `5432` | -| Database | `ecom` | -| Username | `cube` | -| Password | `12345` | - -
- -
- Cube Cloud Setup Database Screen -
- -## Step 4: Try out Cube Cloud - -Now you can navigate to [the Playground][ref-cloud-playground] to try out your -queries or connect your application to Cube Cloud API. - -
- Cube Cloud Playground -
- -[ref-cloud-connecting-to-databases]: /config/databases -[ref-cloud-playground]: /cloud/dev-tools/dev-playground diff --git a/docs/content/Getting-Started/Overview.mdx b/docs/content/Getting-Started/Overview.mdx deleted file mode 100644 index 9e6df23e4671a..0000000000000 --- a/docs/content/Getting-Started/Overview.mdx +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Getting started with Cube -menuTitle: Overview -permalink: /getting-started -category: Getting Started -menuOrder: 1 -isDisableFeedbackBlock: false ---- - -Cube is a cloud-native application, packaged and distributed as Docker images -and designed to run in a containerized environment. - -You can get started with Cube in one of two ways. - -We recommend using [Cube Cloud][cube-cloud], our managed platform for Cube, -because it's the easiest way to build, test, deploy, and manage Cube projects. -Cube Cloud includes features such as collaboration for teams, a web-based data -model editor, auto-scaling, and observability. Cube Cloud also comes with a -free tier for development and proof-of-concept projects. - -Alternatively, you can run Cube on your own infrastructure with Docker. - - - - - - -## Migrating from Cube Core to Cube Cloud - -Cube Cloud supports several ways for importing existing Cube projects: - -- [Import a GitHub repository](/cloud/getting-started/github) -- [Import a GitLab repository](/cloud/getting-started/ssh/gitlab) -- [Import a Bitbucket repository](/cloud/getting-started/ssh/bitbucket) -- [Import a Git repository](/cloud/getting-started/ssh/git) -- [Import a local project with CLI](/cloud/getting-started/cli) - -[cube-cloud]: https://cube.dev/cloud/ diff --git a/docs/content/Guides/Data-Store-Cost-Saving-Guide.mdx b/docs/content/Guides/Data-Store-Cost-Saving-Guide.mdx deleted file mode 100644 index a1233e90e69d8..0000000000000 --- a/docs/content/Guides/Data-Store-Cost-Saving-Guide.mdx +++ /dev/null @@ -1,165 +0,0 @@ ---- -title: Data Store Cost Saving Guide -permalink: /guides/data-store-cost-saving-guide -category: Guides -menuOrder: 2 ---- - -As a semantic layer, Cube supports various [data sources][ref-config-data-sources], -including cloud-based data warehouses and regular databases, and various -[use cases][ref-intro] such as embedded analytics and business intelligence. - -Depending on the upstream data source and the use case, Cube can be configured -in a way that provides the best economic effect and maximizes cost savings. - -## Reducing Data Store Usage - -Cloud-based data stores commonly use two pricing models: *on-demand*, when you -get charged for the storage and compute resources used to process each query, -or *flat-rate*, when queries consume an pre-agreed resource quota. - -Regardless of the pricing model, it's usually a good idea to implement -measures that reduce usage: - -- In case of the on-demand model, reduced usage will directly lead to a -reduced bill. -- In case of the flat-rate model, reduced usage will reduce resource -contention and leave more spare resources to other consumers (e.g., for ad-hoc -exploration) or allow to reduce the allocated quota, leading to a reduced -bill. - -Often, reducing data store usage allows cloud-based -[data warehouses][snowflake-auto-suspend] and -[regular databases][neon-auto-suspend] to more frequently auto-suspend their -compute resources or scale them down to zero. - -### Using pre-aggregations - -Cube queries upstream data sources for the following purposes: - -- Serving interactive queries, incoming to any of -[supported APIs][ref-config-apis], and populating the first, in-memory, layer -of caching. -- Populating pre-aggregations, the second layer of [caching][ref-caching]. -- Ensuring and checking the freshness of data in both layers of caching. - -[Pre-aggregations][ref-caching-using-pre-aggs] are one of Cube's core tenets. -They contain condensed subsets of source data, optimized for serving queries -more effectively than a data store is able to. Using pre-aggregations is the -most effective way to reduce data store usage and costs. - - - -When defining your data model or generally operating the semantic -layer, consider [inspecting your queries][ref-query-history] and -[configuring necessary pre-aggregations][ref-caching-using-pre-aggs] on a -periodic basis. - - - -### Preventing non-cached queries - -In some use cases, it's possible to configure pre-aggregations in a way that -they match all necessary queries and always serve them from cache. However, a -new or malformed query would still be able to hit the data store. Preventing -non-cached queries ensures that accidental spend can't happen. - -Cube supports the [rollup-only mode][ref-caching-rollup-only-mode] which -prevents queries from hitting the data store. Only queries served from -pre-aggregations are allowed; other queries are rejected. - - - -Consider using the rollup-only mode if all your queries are matched by -pre-aggregations and served from cache. Use [Query History][ref-query-history] -to check the status of any query. - - - -### Refreshing data timely - -Pre-aggregations are populated by Cube on [schedule][ref-pre-agg-refresh-key], -a [condition][ref-pre-agg-refresh-key], or an external trigger. Adjusting the -timing and frequency of pre-aggregation refresh in accordance with your use -case can substantially reduce the data store usage and costs. For example, if -you know that data is loaded and transformed in a data warehouse daily, it -makes sense to refresh pre-aggregations with the same cadence. - - - -Consider configuring the [refresh keys][ref-pre-agg-refresh-key] of -pre-aggregations according to your use case. - - - -In real-time use cases, when queries should return fresh data, it is still -possible to leverage pre-aggregations. Cube supports -[lambda pre-aggregations][ref-caching-lambda-pre-aggs] which transparently -combine cached data and recent data from the data store. Lambda -pre-aggregations can reduce data store usage in cases when queries span both -historical and recent data. - - - -Cosider using [lambda pre-aggregations][ref-caching-lambda-pre-aggs] in -real-time use cases. - - - -### Refreshing data incrementally - -Often, [fact tables][wiki-fact-table] contain [time-series][wiki-time-series] -data that change in time by appending new rows, not updating the existing ones. -In that case, data can be partitioned by a time dimension and only metrics -calculated from the last partition would need to be updated when new data -arrives. For example, the volume of sales at Acme Corporation in year 2022 -doesn't change when new orders are made in 2023; however, the volume of sales -in year 2023 will be changing as new orders are made during that year. -Caching metrics for historical data and refreshing time-series data -incrementally can help drastically reduce data source usage. - -Cube supports [partitioned and incrementally refreshed][ref-caching-partitioning-pre-aggs] -pre-aggregations that, on update, retrieve only the last partition of data -from the data store. - - - -Consider configuring [partitions][ref-caching-partitioning-pre-aggs] and using -incremental refresh for time-series data. Use the -[Pre-aggregations][ref-workspace-pre-aggs] view to check the status of every -partition. - - - -Moreover, Cube supports customizable update windows that -may span more than one partition, supporting use cases when relatively fresh -historical data is indeed being updated. For example, financial transactions -are often [settled in two or three business days][wiki-settlement-cycle]; -however, certain facts about transactions can change between their creation -and settlement—but never after a transaction was settled. In this case, it -makes sense to use an update window that spans from creation to settlement. - - - -Consider configuring wider [update windows][ref-caching-partitioning-pre-aggs] -when data outside the last partition can be updated. - - - -[ref-config-data-sources]: /config/databases -[ref-intro]: /introduction -[ref-caching]: /caching -[ref-pre-agg-refresh-key]: /schema/reference/pre-aggregations#parameters-refresh-key -[ref-config-apis]: /config/downstream#ap-is-references -[ref-caching-using-pre-aggs]: /caching/pre-aggregations/getting-started -[ref-query-history]: /cloud/inspecting-queries -[ref-caching-using-pre-aggs]: /caching/using-pre-aggregations -[ref-caching-rollup-only-mode]: /caching/using-pre-aggregations#rollup-only-mode -[ref-caching-lambda-pre-aggs]: /caching/pre-aggregations/lambda-pre-aggregations -[wiki-fact-table]: https://en.wikipedia.org/wiki/Fact_table -[wiki-time-series]: https://en.wikipedia.org/wiki/Time_series -[ref-caching-partitioning-pre-aggs]: /caching/using-pre-aggregations#partitioning -[wiki-settlement-cycle]: https://en.wikipedia.org/wiki/T%2B2 -[ref-workspace-pre-aggs]: http://localhost:8000/cloud/inspecting-pre-aggregations -[snowflake-auto-suspend]: https://docs.snowflake.com/en/user-guide/warehouses-overview#auto-suspension-and-auto-resumption -[neon-auto-suspend]: https://neon.tech/docs/introduction/autoscaling \ No newline at end of file diff --git a/docs/content/Guides/Style-Guide.mdx b/docs/content/Guides/Style-Guide.mdx deleted file mode 100644 index 51fe32e8695f4..0000000000000 --- a/docs/content/Guides/Style-Guide.mdx +++ /dev/null @@ -1,325 +0,0 @@ ---- -title: Cube Style Guide -menuTitle: Style Guide -permalink: /style-guide -category: Guides -menuOrder: 1 ---- - -This style guide includes best practices on data modeling in Cube. - -While Cube allows for certain flexibility with regards to data modeling, following this -fairly opinionated style guide helps create maintainable semantic layers and reduce effort -to support them in the long run. - -This style guide is intended to be used by: -* All users of Cube Cloud and self-hosted Cube Core deployments. -* Solution integrators from the [Cube Partner Network][cpn]. -* Cube team (for demo projects, documentation, and customer engagements). - -## Syntax - -* Default to [YAML syntax][ref-syntax-model] for data modeling. Use JavaScript syntax for dynamic data models only. -* Use [snake case][ref-syntax-naming] when using either YAML or JavaScript syntax. -* Follow the recommendations on [YAML syntax][self-yaml] and [SQL syntax][self-sql] below. - -## Folder structure - -* Put [cube and view files][ref-syntax-folder-structure] in `model/cubes` and `model/views` folders. -* Within these folders, create subfolders to reflect your business units structure. - -```tree -cube_project -└── model - ├── cubes - │ ├── finance - │ │ ├── stripe_invoices.yml - │ │ └── stripe_payments.yml - │ └── sales - │ └── base_opportunities.yml - └── views - ├── product - │ └── cloud_tenants.yml - └── sales - └── opportunities.yml -``` - - -## Cubes - -* Cubes should remain private; set [`public: false`][ref-public] for all cubes. Only views can be exposed to visualization tools. -* A cube's name should represent a business entity and be plural. If a cube's name may clash with a view's name, add the `base_` prefix to the cube's name, e.g., `base_opportunities.yml`. -* If possible, use [`sql_table`][ref-sql-table] instead of `sql`, i.e., use `sql_table: schema.table` instead of `sql: SELECT * FROM schema.table`. -* Use `many_to_one`, `one_to_many`, `one_to_one` [join relationship types][ref-join-rel] instead of `belongs_to`, `has_many`, `has_one`. -* Applicable [cube parameters][ref-cube-params] should be ordered as follows: - - `name` - - `sql_alias` - - `extends` - - `data_source` - - `sql` - - `sql_table` - - `title` - - `description` - - `public` - - `refresh_key` - - `pre_aggregations` - - `joins` - - `dimensions` - - `measures` - -### Dimensions & measures - -* Primary key of the cube should be the first dimension listed. -* Applicable [dimension][ref-dimension-params] and [measure parameters][ref-measure-params] should be ordered as follows: - - `name` - - `title` - - `description` - - `sql` - - `type` - - `primary_key` - - `sub_query` - - `public` - - `format` - - `filters` - - `drill_members` -* Use `title` and `description` if the name is not intuitive. - -### Example cube - -```yaml -cubes: - - name: line_items - sql_table: public.line_items - public: false - - joins: - - name: products - sql: "{CUBE}.product_id = {products.id}" - relationship: many_to_one - - - name: orders - sql: "{CUBE}.order_id = {orders.id}" - relationship: many_to_one - - dimensions: - - name: id - sql: id - type: number - primary_key: true - - - name: created_date - sql: created_at - type: time - - measures: - - name: count - type: count - - - name: total_amount - sql: price - type: sum -``` - -## Views - -* Views should be designed for data consumers and optimized for consumption in visualization tools. -* Applicable [view parameters][ref-view-params] should be ordered as follows: - - `name` - - `description` - - `includes` - - `cubes` - -### Example view - -```yaml -views: - - name: orders - - cubes: - - join_path: base_orders - includes: - # dimensions - - status - - created_date - - # measures - - total_amount - - total_amlunt_shipped - - count - - average_order_value - - - join_path: base_orders.line_items.products - includes: - - name: name - alias: product - - - join_path: base_orders.line_items.products.product_categories - includes: - - name: name - alias: product_category - - - join_path: base_orders.users - prefix: true - includes: - - city -``` - -## SQL style guide - -* Indent with 2 spaces. -* Use trailing commas. -* Use upper case for SQL keywords and function names. -* Use `!=` instead of `<>`. -* Always use the `AS` keyword when aliasing columns, expressions, and tables. -* Unless SQL query is a trivial one-liner, start SQL query from the new line. -* Use new lines, optimize for readability and maintainability. -* Use [common table expressions (CTE)][wiki-cte] rather than subqueries. -* When joining multiple tables, always prefix the column names with the table name/alias. -* Use single quotes for strings. -* Avoid initialisms and unnecessary table aliases. -* If there's only one thing in the list (e.g., projection expressions in `SELECT`), put it on the same line as the opening keyword. -* If there are multiple things in the list, put each one on its own line (including the first one), indented one level more than the opening keyword. - - -### Example SQL - -```yaml -cubes: - - name: california_users - sql: > - SELECT - id, - first_name, - last_name - FROM public.users - WHERE state = 'CA' - - dimensions: - - name: id - sql: id - type: number - primary_key: true - - - name: first_name - sql: first_name - type: string - - - name: last_name - sql: last_name - type: string - - measures: - - name: count - type: count - -``` - -## YAML style guide - -* Use `.yml` extension instead of `.yaml`. -* Indent with 2 spaces. -* Indent list items. -* Use a new line to separate list items that are dictionaries, where appropriate. -* Make sure lines are no longer than 80 characters. -* If quotes are needed around a string, use double quotes. - -### Example YAML - -```yaml -cubes: - - name: users - sql_table: public.users - - dimensions: - - name: id - sql: id - type: number - primary_key: true - - - name: city - sql: city - type: string - - - name: lifetime_value - sql: "{line_items.total_amount}" - type: number - sub_query: true - - measures: - - name: count - type: count - - - name: total_orders_amount - sql: "{lifetime_value}" - type: sum -``` - -## JavaScript style guide - -* Indent with 2 spaces. -* Don't use trailing semicolons. -* Don't use trailing commas after last elements of arrays and objects. -* Use a new line to separate list items that are objects, where appropriate. -* Make sure lines are no longer than 80 characters. -* If quotes are needed around a string, use [backticks][wiki-backtick]. - -### Example YAML - -```javascript -cube(`users`, { - sql_table: `public.users`, - - dimensions: { - id: { - sql: `id`, - type: `number`, - primary_key: true - }, - - city: { - sql: `city`, - type: `string` - }, - - lifetime_value: { - sql: `${line_items.total_amount}`, - type: `number`, - sub_query: true - } - }, - - measures: { - count: { - type: `count` - }, - - total_orders_amount: { - sql: `${lifetime_value}`, - type: `sum` - } - } -}) -``` - -## Credits - -This style guide was inspired in part by: - -* [Brooklyn Data' SQL style guide](https://github.com/brooklyn-data/co/blob/main/sql_style_guide.md) -* [LAMS Style Guide](https://looker-open-source.github.io/look-at-me-sideways/rules.html) - -[cpn]: https://cube.dev/consulting/cube-partner-network -[ref-syntax-model]: /data-modeling/syntax#model-syntax -[ref-syntax-naming]: /data-modeling/syntax#naming -[ref-syntax-folder-structure]: /data-modeling/syntax#folder-structure -[ref-public]: /schema/reference/cube#parameters-public -[ref-sql-table]: /schema/reference/cube#parameters-sql-table -[ref-join-rel]: /schema/reference/joins#parameters-relationship -[ref-cube-params]: /schema/reference/cube#parameters -[ref-measure-params]: /schema/reference/measures#parameters -[ref-dimension-params]: /schema/reference/dimensions#parameters -[ref-view-params]: /schema/reference/view#parameters -[self-yaml]: #yaml-style-guide -[self-sql]: #sql-style-guide -[wiki-cte]: https://en.wikipedia.org/wiki/Hierarchical_and_recursive_queries_in_SQL#Common_table_expression -[wiki-backtick]: https://en.wikipedia.org/wiki/Backtick \ No newline at end of file diff --git a/docs/content/Monitoring/Alerts.mdx b/docs/content/Monitoring/Alerts.mdx deleted file mode 100644 index 923acc1ed9b9e..0000000000000 --- a/docs/content/Monitoring/Alerts.mdx +++ /dev/null @@ -1,100 +0,0 @@ ---- -title: Alerts -permalink: /cloud/workspace/alerts -category: Monitoring -menuOrder: 1 -redirect_from: - - /cloud/dev-tools/alerts/ ---- - -Cube Cloud lets you set up alerts so you can be notified when there are issues -with any of the deployments in your account. - - - -Alerts are available in Cube Cloud on [Starter](https://cube.dev/pricing) tier. - - - -Alert conditions are checked **once per minute** for configured -[alert types](#alert-types) and [deployments](#deployments). If an alert -condition is triggered, an email notification will be sent to configured -[recipients](#recipients). - -Cube Cloud only sends an outage email **once** per configured alert; when the -issue resolves, a resolution email is also sent. - -## Alert Types - -Cube Cloud has three different types of alerts: - -- **API outages.** These are triggered if the deployment's API becomes - unavailable by monitoring the [`/livez` endpoint][ref-rest-api-livez]. -- **Database timeouts.** These are triggered if either the APIs or the database - are taking longer than expected to respond. -- **Pre-aggregation build failures.** These are triggered if any pre-aggregation - fails to build. - -## How to Configure - - - -Only **admin** users can configure alerts in Cube Cloud. - - - -To set up alerts in Cube Cloud, click your username in the top-right for the -menu, then click **Alerts**: - - - -You should now see the Alerts page. The example screenshot below shows no alerts -configured for this deployment: - - - -Click the **New Alert** button to bring up a dialog where you can configure the -alert: - - - -### Deployments - -By default, alerts are configured for all deployments in your account. You can -configure alerts for specific deployments by clicking **Specific** instead of -**All** and then checking the specific deployments you want to monitor: - - - -### Recipients - -When creating a new alert, by default, all users will receive email alerts for -any issues. You can also specify particular users who should receive alerts by -toggling the **Specific users** option and selecting them: - - - -Optionally, you can also add additional email addresses that should receive -email notifications by entering the email address in the **Custom email** field: - - - -[ref-rest-api-livez]: /rest-api#api-reference-livez diff --git a/docs/content/Monitoring/Integrations/Datadog.mdx b/docs/content/Monitoring/Integrations/Datadog.mdx deleted file mode 100644 index 7a651873b900d..0000000000000 --- a/docs/content/Monitoring/Integrations/Datadog.mdx +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: Integration with Datadog -permalink: /monitoring/datadog -category: Monitoring -menuOrder: 10 ---- - -[Datadog][datadog] is a popular fully managed observability service. This -guide demonstrates how to set up Cube Cloud to export logs to Datadog. - -## Configuration - -First, enable [monitoring integrations][ref-monitoring-integrations] in Cube -Cloud. - -### <--{"id" : "Configuration"}--> Exporting logs - -To export logs to Datadog, go to Organization Settings → API Keys -obtain an API key: - - - -Then, configure the [`datadog_logs`][vector-datadog-logs] sink in your -[`vector.toml` configuration file][ref-monitoring-integrations-conf]. - -Example configuration: - -```toml -[sinks.datadog_logs] -type = "datadog_logs" -inputs = [ - "cubejs-server", - "refresh-scheduler", - "ext-db", - "warmup-job", - "cubestore" -] -default_api_key = "0746ad78335ac7c0219ee38b47ada6a6" -site = "datadoghq.eu" -compression = "gzip" -healthcheck = false -``` - -Note that Datadog accounts belong to specific [sites][datadog-docs-sites] -throughout the world. Use the `site` option to configure the sink -appropriately. When miscofigured, Vector agent outputs the following error: -`Client request was forbidden`. - -Commit the configuration for Vector, it should take effect in a minute. -Then, navigate to Logs in Datadog and watch the logs coming: - - - -[datadog]: https://www.datadoghq.com -[datadog-docs-sites]: https://docs.datadoghq.com/getting_started/site/ -[vector-datadog-logs]: https://vector.dev/docs/reference/configuration/sinks/datadog_logs/ -[ref-monitoring-integrations]: /monitoring/integrations -[ref-monitoring-integrations-conf]: /monitoring/integrations#configuration \ No newline at end of file diff --git a/docs/content/Monitoring/Integrations/Grafana-Cloud.mdx b/docs/content/Monitoring/Integrations/Grafana-Cloud.mdx deleted file mode 100644 index dc319e280cd49..0000000000000 --- a/docs/content/Monitoring/Integrations/Grafana-Cloud.mdx +++ /dev/null @@ -1,108 +0,0 @@ ---- -title: Integration with Grafana Cloud -permalink: /monitoring/grafana-cloud -category: Monitoring -menuOrder: 11 ---- - -[Grafana Cloud][grafana] is a popular fully managed observability service. -This guide demonstrates how to set up Cube Cloud to export logs to Grafana -Cloud. - -## Configuration - -First, enable [monitoring integrations][ref-monitoring-integrations] in Cube -Cloud. - -### <--{"id" : "Configuration"}--> Exporting logs - -To export logs to Grafana Cloud, go to your account and get credentials -for Loki, the logging service in Grafana Cloud: - - - -Then, configure the [`loki`][vector-loki] sink in your -[`vector.toml` configuration file][ref-monitoring-integrations-conf]. - -Example configuration: - -```toml -[sinks.loki] -type = "loki" -inputs = [ - "cubejs-server", - "refresh-scheduler", - "ext-db", - "warmup-job", - "cubestore" -] -endpoint = "https://logs-prod-012.grafana.net" - -[sinks.loki.auth] -strategy = "basic" -user = "618504" -password = "eyJrIjoiMDVmMzE4MTk4ZDY1NGE5NWQ3Y2M2Yjk0MzE0M2JkZmJjNTMzMzU5ZCIsIm4iOiJrZXkiLCJpZCI6ODc1NzE5fQ==" - -[sinks.loki.encoding] -codec = "json" - -[sinks.loki.cubestore] -levels = [ - "trace", - "info", - "debug", - "error" -] - -[sinks.loki.labels] -app = "cube-cloud" -env = "production" -``` - -Commit the configuration for Vector, it should take effect in a minute. -Then, navigate to Logs in Grafana Cloud and watch the logs -coming. - - - -### <--{"id" : "Configuration"}--> Exporting metrics - -To export metrics to Grafana Cloud, go to your account and get credentials -for Prometheus, the metrics service in Grafana Cloud: - - - -Then, configure the [`prometheus_remote_write`][vector-prometheus-rw] sink -in your [`vector.toml` configuration file][ref-monitoring-integrations-conf]. - -Example configuration: - -```toml -[sinks.prometheus] -type = "prometheus_remote_write" -inputs = [ - "metrics" -] -endpoint = "https://prometheus-prod-24-prod-eu-west-2.grafana.net/api/prom/push" - -[sinks.prometheus.auth] -strategy = "basic" -user = "1033221" -password = "eyJrIjoiYTg1OTQ2OGY4Yzg3MTQxODc5OTA4NDUxMGM4NTA2ZDQ3ZjliYWZjOCIsIm4iOiJwcnciLCJpZCI6ODc1NzE5fQ==" -``` - -Commit the configuration for Vector, it should take effect in a minute. -Then, navigate to Explore in Grafana Cloud, select metrics -from the drop down: - - - -Then, you can create visualizations and add them to a dashboard: - - - -[grafana]: https://grafana.com -[vector-loki]: https://vector.dev/docs/reference/configuration/sinks/loki/ -[vector-prometheus-rw]: https://vector.dev/docs/reference/configuration/sinks/prometheus_remote_write/ -[ref-monitoring-integrations]: /monitoring/integrations -[ref-monitoring-integrations-conf]: /monitoring/integrations#configuration diff --git a/docs/content/Monitoring/Integrations/Integrations.mdx b/docs/content/Monitoring/Integrations/Integrations.mdx deleted file mode 100644 index d147f69f49318..0000000000000 --- a/docs/content/Monitoring/Integrations/Integrations.mdx +++ /dev/null @@ -1,264 +0,0 @@ ---- -title: Integrations -permalink: /monitoring/integrations -category: Monitoring -menuOrder: 2 -redirect_from: - - /cloud/workspace/logs ---- - -Cube Cloud allows exporting logs and metrics to external monitoring tools so -you can leverage your existing monitoring stack and retain logs and metrics -for the long term. - - - -Monitoring integrations are available in Cube Cloud on -[Enterprise](https://cube.dev/pricing) tier. -[Contact us](https://cube.dev/contact) for details. - - - - - -Monitoring integrations are only available for [dedicated VPC][ref-vpc] deployments. - - - -Under the hood, Cube Cloud uses [Vector][vector], an open-source tool for -collecting and delivering monitoring data. It supports a [wide range of -destinations][vector-docs-sinks], also known as _sinks_. - - - -## Guides - -Monitoring integrations work with various popular monitoring tools. Check -the following guides and configuration examples to get tool-specific -instructions: - - - - - - -## Configuration - -To enable monitoring integrations, navigate -to Settings → Monitoring Integrations and -click Enable Vector to add a Vector agent to your deployment. - - - -Under Metrics export, you will see credentials for the -`prometheus_exporter` sink, in case you'd like to setup -[metrics export][self-sinks-for-metrics]. - -Additionally, create a [`vector.toml` configration file][vector-docs-config] -next to your `cube.js` file. This file is used to keep sinks configration. -You have to commit this file for Vector configuration to take effect. - -### <--{"id" : "Configuration"}--> Environment variables - -You can use environment variables prefixed with `CUBE_CLOUD_MONITORING_` -to reference configuration parameters securely in the `vector.toml` file. - -Example configuration for exporting logs to -[Datadog][vector-docs-sinks-datadog]: - -```toml -[sinks.datadog] -type = "datadog_logs" -default_api_key = "$CUBE_CLOUD_MONITORING_DATADOG_API_KEY" -``` - -### <--{"id" : "Configuration"}--> Inputs for logs - -Sinks accept the `inputs` option that allows to specify which components of -a Cube Cloud deployment should export their logs. Supported inputs: - -- `cubejs-server` -- `refresh-scheduler` -- `ext-db` -- `warmup-job` -- `cubestore` - -Example configuration for exporting logs to -[Datadog][vector-docs-sinks-datadog]: - -```toml -[sinks.datadog] -type = "datadog_logs" -inputs = [ - "cubejs-server", - "refresh-scheduler", - "ext-db", - "warmup-job", - "cubestore" -] -default_api_key = "da8850ce554b4f03ac50537612e48fb1" -compression = "gzip" -``` - -When exporting Cube Store logs using the `cubestore` input, you can filter -logs by providing an array of their severity levels via the `levels` option. -If not specified, only `error` and `info` logs will be exported. - -| Level | Exported by default? | -| --------- | -------------------- | -| `error` | ✅ Yes | -| `info` | ✅ Yes | -| `debug` | ❌ No | -| `trace` | ❌ No | - -### <--{"id" : "Configuration"}--> Sinks for logs - -You can use a [wide range of destinations][vector-docs-sinks] for logs, -including the following ones: - -- [AWS Cloudwatch][vector-docs-sinks-cloudwatch] -- [AWS S3][vector-docs-sinks-s3], -[Google Cloud Storage][vector-docs-sinks-gcs], and -[Azure Blob Storage][vector-docs-sinks-azureblob] -- [Datadog][vector-docs-sinks-datadog] - -Example configuration for exporting all logs, including all Cube Store logs -to [Azure Blob Storage][vector-docs-sinks-azureblob]: - -```toml -[sinks.azure] -type = "azure_blob" -container_name = "my-logs" -connection_string = "DefaultEndpointsProtocol=https;AccountName=mylogstorage;AccountKey=storageaccountkeybase64encoded;EndpointSuffix=core.windows.net" -inputs = [ - "cubejs-server", - "refresh-scheduler", - "ext-db", - "warmup-job", - "cubestore" -] - -[sinks.azure.cubestore] -levels = [ - "trace", - "info", - "debug", - "error" -] -``` - -### <--{"id" : "Configuration"}--> Inputs for metrics - -Metrics are exported using the `metrics` input. You can filter them by -providing an array of metric names via the `list` option. - -| Name | Type | Applies to | Description | -| ------------------------ | ---------------------------------------- | ------------------------ | ---------------------------------------------- | -| `cpu` | [`gauge`][vector-docs-metrics-gauge] | Node of a deployment | Percent of free CPU against requests | -| `memory` | [`gauge`][vector-docs-metrics-gauge] | Node of a deployment | Percent of free Memory against requests | -| `requests-count` | [`counter`][vector-docs-metrics-counter] | Deployment | Total number of processed requests | -| `requests-errors-count` | [`counter`][vector-docs-metrics-counter] | Deployment | Number of requests processed with errors | -| `requests-success-count` | [`counter`][vector-docs-metrics-counter] | Deployment | Number of requests processed successfully | -| `requests-duration` | [`counter`][vector-docs-metrics-counter] | Deployment | Total time taken to process requests (seconds) | - -You can further filter exported metrics by providing an array of `inputs` -that applies to metics only. - -Example configuration for exporting all metrics from `cubejs-server` to -[Prometheus][vector-docs-sinks-prometheus] using the -`prometheus_remote_write` sink: - -```toml -[sinks.prometheus] -type = "prometheus_remote_write" -inputs = [ - "metrics" -] -endpoint = "https://prometheus.example.com:8087/api/v1/write" - -[sinks.prometheus.auth] -# Strategy, credentials, etc. - -[sinks.prometheus.metrics] -list = [ - "cpu", - "memory", - "requests-count", - "requests-errors-count", - "requests-success-count", - "requests-duration" -] -inputs = [ - "cubejs-server" -] -``` - -### <--{"id" : "Configuration"}--> Sinks for metrics - -Metrics are exported in the Prometheus format which is compatible with -the following sinks: - -- [`prometheus_exporter`][vector-docs-sinks-prometheus-exporter] (native to -[Prometheus][prometheus], compatible with [Mimir][mimir]) -- [`prometheus_remote_write`][vector-docs-sinks-prometheus] (compatible with -[Grafana Cloud][grafana-cloud]) - -Example configuration for exporting all metrics from `cubejs-server` to -[Prometheus][vector-docs-sinks-prometheus-exporter] using the -`prometheus_exporter` sink: - -```toml -[sinks.prometheus] -type = "prometheus_exporter" -inputs = [ - "metrics" -] - -[sinks.prometheus.metrics] -list = [ - "cpu", - "memory", - "requests-count", - "requests-errors-count", - "requests-success-count", - "requests-duration" -] -inputs = [ - "cubejs-server" -] -``` - -Navigate to Settings → Monitoring Integrations to take the -credentials `prometheus_exporter` under Metrics export: - - - -You can also customize the user name and password for `prometheus_exporter` by -setting `CUBE_CLOUD_MONITORING_METRICS_USER` and -`CUBE_CLOUD_MONITORING_METRICS_PASSWORD` environment variables, respectively. - -[self-sinks-for-metrics]: #configuration-sinks-for-metrics -[ref-vpc]: /cloud/configuration/connecting-with-a-vpc -[vector]: https://vector.dev/ -[vector-docs-config]: https://vector.dev/docs/reference/configuration/ -[vector-docs-sinks]: https://vector.dev/docs/reference/configuration/sinks/ -[vector-docs-sinks-cloudwatch]: https://vector.dev/docs/reference/configuration/sinks/aws_cloudwatch_logs/ -[vector-docs-sinks-s3]: https://vector.dev/docs/reference/configuration/sinks/aws_s3/ -[vector-docs-sinks-azureblob]: https://vector.dev/docs/reference/configuration/sinks/azure_blob/ -[vector-docs-sinks-gcs]: https://vector.dev/docs/reference/configuration/sinks/gcp_cloud_storage/ -[vector-docs-sinks-datadog]: https://vector.dev/docs/reference/configuration/sinks/datadog_logs/ -[vector-docs-sinks-prometheus]: https://vector.dev/docs/reference/configuration/sinks/prometheus_remote_write/ -[vector-docs-sinks-prometheus-exporter]: https://vector.dev/docs/reference/configuration/sinks/prometheus_exporter/ -[vector-docs-metrics-gauge]: https://vector.dev/docs/about/under-the-hood/architecture/data-model/metric/#gauge -[vector-docs-metrics-counter]: https://vector.dev/docs/about/under-the-hood/architecture/data-model/metric/#counter -[prometheus]: https://prometheus.io -[mimir]: https://grafana.com/oss/mimir/ -[grafana-cloud]: https://grafana.com/products/cloud/ diff --git a/docs/content/Reference/CLI/CLI-Reference.mdx b/docs/content/Reference/CLI/CLI-Reference.mdx deleted file mode 100644 index f807ab06c4c6f..0000000000000 --- a/docs/content/Reference/CLI/CLI-Reference.mdx +++ /dev/null @@ -1,160 +0,0 @@ ---- -title: Command reference -permalink: /reference -category: Reference -subCategory: CLI -menuOrder: 6 ---- - -## create - -The `create` command generates barebones Cube app. - -### <--{"id" : "create"}--> Usage - -```bash{promptUser: user} -npx cubejs-cli create APP-NAME -d DB-TYPE [-t TEMPLATE] -``` - -### <--{"id" : "create"}--> Flags - -| Parameter | Description | Values | -| --------------------------- | -------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------- | -| `-d, --db-type ` | Preconfigure Cube app for selected database. | `postgres`, `mysql`, `athena`, `mongobi`, `bigquery`, `redshift`, `mssql`, `clickhouse`, `snowflake`, `presto`, `druid` | -| `-t, --template