diff --git a/CNAME b/CNAME
index 5d3be932f2f95..1a3ca76186416 100644
--- a/CNAME
+++ b/CNAME
@@ -1 +1 @@
-istio.io
+preliminary.istio.io
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 4696c8e6ee195..7950e85dc2ae9 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -1,5 +1,7 @@
-# Contribution guidelines
+## Contribution guidelines
-So, you want to hack on the Istio web site? Yay! Please refer to Istio's overall
+So, you want to hack on the Istio web site? Cool! Please refer to Istio's overall
[contribution guidelines](https://github.com/istio/community/blob/master/CONTRIBUTING.md)
to find out how you can help.
+
+For specifics on hacking on our site, check this [info](https://istio.io/about/contribute/)
diff --git a/Gemfile b/Gemfile
index af8715d432024..7b231be9a2199 100644
--- a/Gemfile
+++ b/Gemfile
@@ -3,5 +3,6 @@ source "https://rubygems.org"
gem "github-pages", group: :jekyll_plugins
gem "jekyll-include-cache", "~> 0.1"
gem "nokogiri", ">= 1.8.1"
-gem "html-proofer", :git => "https://github.com/ldemailly/html-proofer.git"
+gem "html-proofer", ">= 3.8.0"
gem "rake"
+gem "mdl"
diff --git a/Gemfile.lock b/Gemfile.lock
index 344a041bbf928..662aeaac0897a 100644
--- a/Gemfile.lock
+++ b/Gemfile.lock
@@ -1,17 +1,3 @@
-GIT
- remote: https://github.com/ldemailly/html-proofer.git
- revision: a9fd7130ddf4bffc54ef938783d09a110e46574b
- specs:
- html-proofer (3.7.5)
- activesupport (>= 4.2, < 6.0)
- addressable (~> 2.3)
- colorize (~> 0.8)
- mercenary (~> 0.3.2)
- nokogiri (~> 1.7)
- parallel (~> 1.3)
- typhoeus (~> 0.7)
- yell (~> 2.0)
-
GEM
remote: https://rubygems.org/
specs:
@@ -28,38 +14,42 @@ GEM
coffee-script-source (1.11.1)
colorator (1.1.0)
colorize (0.8.1)
- commonmarker (0.17.7)
+ commonmarker (0.17.9)
ruby-enum (~> 0.5)
concurrent-ruby (1.0.5)
+ em-websocket (0.5.1)
+ eventmachine (>= 0.12.9)
+ http_parser.rb (~> 0.6.0)
ethon (0.11.0)
ffi (>= 1.3.0)
+ eventmachine (1.2.5)
execjs (2.7.0)
- faraday (0.13.1)
+ faraday (0.14.0)
multipart-post (>= 1.2, < 3)
- ffi (1.9.18)
+ ffi (1.9.23)
forwardable-extended (2.6.0)
gemoji (3.0.0)
- github-pages (172)
+ github-pages (180)
activesupport (= 4.2.9)
- github-pages-health-check (= 1.3.5)
- jekyll (= 3.6.2)
+ github-pages-health-check (= 1.4.0)
+ jekyll (= 3.7.3)
jekyll-avatar (= 0.5.0)
- jekyll-coffeescript (= 1.0.2)
- jekyll-commonmark-ghpages (= 0.1.3)
+ jekyll-coffeescript (= 1.1.1)
+ jekyll-commonmark-ghpages (= 0.1.5)
jekyll-default-layout (= 0.1.4)
- jekyll-feed (= 0.9.2)
- jekyll-gist (= 1.4.1)
- jekyll-github-metadata (= 2.9.3)
- jekyll-mentions (= 1.2.0)
+ jekyll-feed (= 0.9.3)
+ jekyll-gist (= 1.5.0)
+ jekyll-github-metadata (= 2.9.4)
+ jekyll-mentions (= 1.3.0)
jekyll-optional-front-matter (= 0.3.0)
jekyll-paginate (= 1.1.0)
jekyll-readme-index (= 0.2.0)
- jekyll-redirect-from (= 0.12.1)
- jekyll-relative-links (= 0.5.2)
+ jekyll-redirect-from (= 0.13.0)
+ jekyll-relative-links (= 0.5.3)
jekyll-remote-theme (= 0.2.3)
- jekyll-sass-converter (= 1.5.0)
- jekyll-seo-tag (= 2.3.0)
- jekyll-sitemap (= 1.1.1)
+ jekyll-sass-converter (= 1.5.2)
+ jekyll-seo-tag (= 2.4.0)
+ jekyll-sitemap (= 1.2.0)
jekyll-swiss (= 0.4.0)
jekyll-theme-architect (= 0.1.0)
jekyll-theme-cayman (= 0.1.0)
@@ -74,61 +64,74 @@ GEM
jekyll-theme-slate (= 0.1.0)
jekyll-theme-tactile (= 0.1.0)
jekyll-theme-time-machine (= 0.1.0)
- jekyll-titles-from-headings (= 0.5.0)
- jemoji (= 0.8.1)
- kramdown (= 1.14.0)
+ jekyll-titles-from-headings (= 0.5.1)
+ jemoji (= 0.9.0)
+ kramdown (= 1.16.2)
liquid (= 4.0.0)
- listen (= 3.0.6)
+ listen (= 3.1.5)
mercenary (~> 0.3)
- minima (= 2.1.1)
+ minima (= 2.4.0)
+ nokogiri (>= 1.8.1, < 2.0)
rouge (= 2.2.1)
terminal-table (~> 1.4)
- github-pages-health-check (1.3.5)
+ github-pages-health-check (1.4.0)
addressable (~> 2.3)
net-dns (~> 0.8)
octokit (~> 4.0)
public_suffix (~> 2.0)
- typhoeus (~> 0.7)
+ typhoeus (~> 1.3)
html-pipeline (2.7.1)
activesupport (>= 2)
nokogiri (>= 1.4)
- i18n (0.9.1)
+ html-proofer (3.8.0)
+ activesupport (>= 4.2, < 6.0)
+ addressable (~> 2.3)
+ colorize (~> 0.8)
+ mercenary (~> 0.3.2)
+ nokogiri (~> 1.8.1)
+ parallel (~> 1.3)
+ typhoeus (~> 1.3)
+ yell (~> 2.0)
+ http_parser.rb (0.6.0)
+ i18n (0.9.5)
concurrent-ruby (~> 1.0)
- jekyll (3.6.2)
+ jekyll (3.7.3)
addressable (~> 2.4)
colorator (~> 1.0)
+ em-websocket (~> 0.5)
+ i18n (~> 0.7)
jekyll-sass-converter (~> 1.0)
- jekyll-watch (~> 1.1)
+ jekyll-watch (~> 2.0)
kramdown (~> 1.14)
liquid (~> 4.0)
mercenary (~> 0.3.3)
pathutil (~> 0.9)
- rouge (>= 1.7, < 3)
+ rouge (>= 1.7, < 4)
safe_yaml (~> 1.0)
jekyll-avatar (0.5.0)
jekyll (~> 3.0)
- jekyll-coffeescript (1.0.2)
+ jekyll-coffeescript (1.1.1)
coffee-script (~> 2.2)
coffee-script-source (~> 1.11.1)
- jekyll-commonmark (1.1.0)
+ jekyll-commonmark (1.2.0)
commonmarker (~> 0.14)
jekyll (>= 3.0, < 4.0)
- jekyll-commonmark-ghpages (0.1.3)
+ jekyll-commonmark-ghpages (0.1.5)
commonmarker (~> 0.17.6)
jekyll-commonmark (~> 1)
rouge (~> 2)
jekyll-default-layout (0.1.4)
jekyll (~> 3.0)
- jekyll-feed (0.9.2)
+ jekyll-feed (0.9.3)
jekyll (~> 3.3)
- jekyll-gist (1.4.1)
+ jekyll-gist (1.5.0)
octokit (~> 4.2)
- jekyll-github-metadata (2.9.3)
+ jekyll-github-metadata (2.9.4)
jekyll (~> 3.1)
octokit (~> 4.0, != 4.4.0)
jekyll-include-cache (0.1.0)
jekyll (~> 3.3)
- jekyll-mentions (1.2.0)
+ jekyll-mentions (1.3.0)
activesupport (~> 4.0)
html-pipeline (~> 2.3)
jekyll (~> 3.0)
@@ -137,19 +140,19 @@ GEM
jekyll-paginate (1.1.0)
jekyll-readme-index (0.2.0)
jekyll (~> 3.0)
- jekyll-redirect-from (0.12.1)
+ jekyll-redirect-from (0.13.0)
jekyll (~> 3.3)
- jekyll-relative-links (0.5.2)
+ jekyll-relative-links (0.5.3)
jekyll (~> 3.3)
jekyll-remote-theme (0.2.3)
jekyll (~> 3.5)
rubyzip (>= 1.2.1, < 3.0)
typhoeus (>= 0.7, < 2.0)
- jekyll-sass-converter (1.5.0)
+ jekyll-sass-converter (1.5.2)
sass (~> 3.4)
- jekyll-seo-tag (2.3.0)
+ jekyll-seo-tag (2.4.0)
jekyll (~> 3.3)
- jekyll-sitemap (1.1.1)
+ jekyll-sitemap (1.2.0)
jekyll (~> 3.3)
jekyll-swiss (0.4.0)
jekyll-theme-architect (0.1.0)
@@ -192,45 +195,56 @@ GEM
jekyll-theme-time-machine (0.1.0)
jekyll (~> 3.5)
jekyll-seo-tag (~> 2.0)
- jekyll-titles-from-headings (0.5.0)
+ jekyll-titles-from-headings (0.5.1)
jekyll (~> 3.3)
- jekyll-watch (1.5.1)
+ jekyll-watch (2.0.0)
listen (~> 3.0)
- jemoji (0.8.1)
+ jemoji (0.9.0)
activesupport (~> 4.0, >= 4.2.9)
gemoji (~> 3.0)
html-pipeline (~> 2.2)
- jekyll (>= 3.0)
- kramdown (1.14.0)
+ jekyll (~> 3.0)
+ kramdown (1.16.2)
liquid (4.0.0)
- listen (3.0.6)
- rb-fsevent (>= 0.9.3)
- rb-inotify (>= 0.9.7)
+ listen (3.1.5)
+ rb-fsevent (~> 0.9, >= 0.9.4)
+ rb-inotify (~> 0.9, >= 0.9.7)
+ ruby_dep (~> 1.2)
+ mdl (0.4.0)
+ kramdown (~> 1.12, >= 1.12.0)
+ mixlib-cli (~> 1.7, >= 1.7.0)
+ mixlib-config (~> 2.2, >= 2.2.1)
mercenary (0.3.6)
mini_portile2 (2.3.0)
- minima (2.1.1)
- jekyll (~> 3.3)
- minitest (5.10.3)
+ minima (2.4.0)
+ jekyll (~> 3.5)
+ jekyll-feed (~> 0.9)
+ jekyll-seo-tag (~> 2.1)
+ minitest (5.11.3)
+ mixlib-cli (1.7.0)
+ mixlib-config (2.2.6)
+ tomlrb
multipart-post (2.0.0)
net-dns (0.8.0)
- nokogiri (1.8.1)
+ nokogiri (1.8.2)
mini_portile2 (~> 2.3.0)
- octokit (4.7.0)
+ octokit (4.8.0)
sawyer (~> 0.8.0, >= 0.5.3)
- parallel (1.12.0)
- pathutil (0.16.0)
+ parallel (1.12.1)
+ pathutil (0.16.1)
forwardable-extended (~> 2.6)
public_suffix (2.0.5)
- rake (12.3.0)
- rb-fsevent (0.10.2)
+ rake (12.3.1)
+ rb-fsevent (0.10.3)
rb-inotify (0.9.10)
ffi (>= 0.5.0, < 2)
rouge (2.2.1)
- ruby-enum (0.7.1)
+ ruby-enum (0.7.2)
i18n
+ ruby_dep (1.5.0)
rubyzip (1.2.1)
safe_yaml (1.0.4)
- sass (3.5.3)
+ sass (3.5.6)
sass-listen (~> 4.0.0)
sass-listen (4.0.0)
rb-fsevent (~> 0.9, >= 0.9.4)
@@ -241,9 +255,10 @@ GEM
terminal-table (1.8.0)
unicode-display_width (~> 1.1, >= 1.1.1)
thread_safe (0.3.6)
- typhoeus (0.8.0)
- ethon (>= 0.8.0)
- tzinfo (1.2.4)
+ tomlrb (1.2.6)
+ typhoeus (1.3.0)
+ ethon (>= 0.9.0)
+ tzinfo (1.2.5)
thread_safe (~> 0.1)
unicode-display_width (1.3.0)
yell (2.0.7)
@@ -253,10 +268,11 @@ PLATFORMS
DEPENDENCIES
github-pages
- html-proofer!
+ html-proofer (>= 3.8.0)
jekyll-include-cache (~> 0.1)
+ mdl
nokogiri (>= 1.8.1)
rake
BUNDLED WITH
- 1.16.0
+ 1.16.1
diff --git a/OWNERS b/OWNERS
new file mode 100644
index 0000000000000..b682f960cff78
--- /dev/null
+++ b/OWNERS
@@ -0,0 +1,4 @@
+
+approvers:
+ - geeknoid
+ - linsun
diff --git a/README.md b/README.md
index 06edc37c58c22..688321aebfa5e 100644
--- a/README.md
+++ b/README.md
@@ -1,22 +1,31 @@
-# istio.github.io
+## istio.github.io
-This repository contains the source code for the [istio.io](https://istio.io) web site.
+This repository contains the source code for the [istio.io](https://istio.io),
+[preliminary.istio.io](https://preliminary.istio.io) and [archive.istio.io](https://archive.istio.io) sites.
Please see the main Istio [README](https://github.com/istio/istio/blob/master/README.md)
file to learn about the overall Istio project and how to get in touch with us. To learn how you can
contribute to any of the Istio components, please
see the Istio [contribution guidelines](https://github.com/istio/community/blob/master/CONTRIBUTING.md).
-The website uses [Jekyll](https://jekyllrb.com/) templates and is hosted on GitHub Pages. Please make sure you are
-familiar with these before editing.
+* [Working with the site](#working-with-the-site)
+* [Linting](#linting)
+* [Versions and releases](#versions-and-releases)
+ * [How versioning works](#how-versioning-works)
+ * [Publishing content immediately](#publishing-content-immediately)
+ * [Creating a version](#creating-a-version)
-To run the site locally with Docker, use the following command from the toplevel directory for this git repo
+## Working with the site
+
+We use [Jekyll](https://jekyllrb.com/) to generate our sites.
+
+To run the site locally with Docker, use the following command from the top level directory for this git repo
(e.g. pwd must be `~/github/istio.github.io` if you were in `~/github` when you issued
`git clone https://github.com/istio/istio.github.io.git`)
```bash
# First time: (slow)
-docker run --name istio-jekyll --volume=$(pwd):/srv/jekyll -it -p 4000:4000 jekyll/jekyll:3.5.2 sh -c "bundle install && rake test && bundle exec jekyll serve --incremental --host 0.0.0.0"
+docker run --name istio-jekyll --volume=$(pwd):/srv/jekyll -w /srv/jekyll -it -p 4000:4000 jekyll/jekyll:3.7.3 sh -c "bundle install && rake test && bundle exec jekyll serve --incremental --host 0.0.0.0"
# Then open browser with url 127.0.0.1:4000 to see the change.
# Subsequent, each time you want to see a new change and you stopped the previous run by ctrl+c: (much faster)
docker start istio-jekyll -a -i
@@ -24,23 +33,24 @@ docker start istio-jekyll -a -i
docker rm istio-jekyll
```
-The `rake test` part is to make sure you are not introducing html errors or bad links, you should see
-```
+The `rake test` part is to make sure you are not introducing HTML errors or bad links, you should see
+
+```bash
HTML-Proofer finished successfully.
```
-in the output
-
-> In some cases the `--incremental` may not work properly and you might have to remove it.
-## Local/Native Jekyll install:
+in the output.
-Alternatively, if you just want to develop locally w/o Docker/Kubernetes/Minikube, you can try installing Jekyll locally. You may need to install other prerequisites manually (which is where using the docker image shines). Here's an example of doing so for Mac OS X:
+Alternatively, if you just want to develop locally w/o Docker/Kubernetes/Minikube, you can try installing Jekyll locally.
+You may need to install other prerequisites manually (which is where using the docker image shines). Here's an example of doing
+so for Mac OS X:
-```
+```bash
xcode-select --install
sudo xcodebuild -license
brew install ruby
gem update --system
+gem install mdspell
gem install bundler
gem install jekyll
cd istio.github.io
@@ -48,3 +58,117 @@ bundle install
bundle exec rake test
bundle exec jekyll serve
```
+
+## Linting
+
+You should run `scripts/linters.sh` prior to checking in your changes.
+This will run 3 tests:
+
+* HTML proofing, which ensures all your links are valid along with other checks.
+
+* Spell checking.
+
+* Style checking, which makes sure your markdown file complies with some common style rules.
+
+If you get a spelling error, you have three choices to address it:
+
+* It's a real typo, so fix your markdown.
+
+* It's a command/field/symbol name, so stick some `backticks` around it.
+
+* It's really valid, so go add the word to the .spelling file at the root of the repo.
+
+## Versions and releases
+
+Istio maintains three variations of its public site:
+
+* [istio.io](https://istio.io) is the main site, showing documentation for the current release of the product.
+This site is currently hosted on Firebase.
+
+* [archive.istio.io](https://archive.istio.io) contains snapshots of the documentation for previous releases of the product.
+This is useful for customers still using these older releases.
+This site is currently hosted on Firebase.
+
+* [preliminary.istio.io](https://preliminary.istio.io) contains the actively updated documentation for the next release of the product.
+This site is hosted by GitHub Pages.
+
+The user can trivially navigate between the different variations of the site using the gear menu in the top right
+of each page.
+
+### How versioning works
+
+* Documentation changes are primarily committed to the master branch of istio.github.io. Changes committed to this branch
+are automatically reflected on preliminary.istio.io.
+
+* The content of istio.io is taken from the latest release-XXX branch. The specific branch that
+is used is determined by the `BRANCH` variable in this [script](https://github.com/istio/admin-sites/blob/master/current.istio.io/build.sh)
+
+* The content of archive.istio.io is taken from the older release-XXX branches. The set of branches that
+are included on archive.istio.io is determined by the `TOBUILD` variable in this
+[script](https://github.com/istio/admin-sites/blob/master/archive.istio.io/build.sh)
+
+> The above means that if you want to do a change to the main istio.io site, you will need
+to make the change in the master branch of istio.github.io and then merge that change into the
+release branch.
+
+### Publishing content immediately
+
+Checking in updates to the master branch will automatically update preliminary.istio.io, and will only be reflected on
+istio.io the next time a release is created, which can be several weeks in the future. If you'd like some changes to be
+immediately reflected on istio.io, you need to check your changes both to the master branch and to the
+current release branch (named release-XXX such as release-0.7).
+
+### Creating a version
+
+Here are the steps necessary to create a new documentation version. Let's assume the current
+version of Istio is 0.6 and you wish to introduce 0.7 which has been under development.
+
+1. Create a new release branch off of master, named as release-*major*.*minor*, which in this case would be
+release-0.7. There is one such branch for every release.
+
+1. In the **master** branch, edit the file `_data/istio.yml` and update the `version` field to have the version
+of the next release of Istio. In this case, you would set the field to 0.8.
+
+1. In the **master** branch, edit the file `_data/releases.yml` and add a new entry at the top of the file
+for version 0.8. You'll need to make sure the URLs are updated for the first few entries. The top
+entry (0.8) should point to preliminary.istio.io. The second entry (0.7) should point to istio.io. The third
+and subsequent entries should point to archive.istio.io.
+
+1. Commit the previous two edits to GitHub.
+
+1. In the **release** branch you created, edit the file `_data/istio.yml`. Set the `preliminary` field to `false`.
+
+1. Commit the previous edit to GitHub.
+
+1. Go to the Google Search Console and create a new search engine that searches the archive.istio.io/V<major>.<minor>
+directory. This search engine will be used to perform version-specific searches on archive.istio.io.
+
+1. In the **previous release's** branch (in this case release-0.6), edit the file `_data/istio.yml`. Set the
+`archive` field to true, the `archive_date` field to the current date, and the `search_engine_id` field
+to the ID of the search engine you created in the prior step.
+
+1. Switch to the istio/admin-sites repo.
+
+1. Navigate to the archive.istio.io directory.
+
+1. Edit the `build.sh` script to add the newest archive version (in this case
+release-0.6) to the `TOBUILD` variable.
+
+1. Commit the previous edit to GitHub.
+
+1. Run the `build.sh` script.
+
+1. Once the script completes, run `firebase deploy`. This will update archive.istio.io to contain the
+right set of archives, based on the above steps.
+
+1. Navigate to the current.istio.io directory.
+
+1. Edit the `build.sh` script to set the `BRANCH` variable to the current release branch (in this case release-0.7)
+
+1. Run the `build.sh` script.
+
+1. Once the script completes, run 'firebase deploy`. This will update the content of istio.io to reflect what is the new release
+branch you created.
+
+Once all this is done, browse the three sites (preliminary.istio.io, istio.io, and archive.istio.io) to make sure
+everything looks good.
diff --git a/Rakefile b/Rakefile
index 3582bd8fc57a1..3cfb235b8ea97 100644
--- a/Rakefile
+++ b/Rakefile
@@ -1,7 +1,9 @@
require 'html-proofer'
task :test do
- sh "bundle exec jekyll build --incremental"
+ sh "rm -fr _rakesite"
+ sh "mkdir _rakesite"
+ sh "bundle exec jekyll build --config _config.yml,_rake_config_override.yml"
typhoeus_configuration = {
:timeout => 30,
# :verbose => true
@@ -17,5 +19,5 @@ task :test do
:url_ignore => [/localhost|github\.com\/istio\/istio\.github\.io\/edit\/master\//],
:typhoeus => typhoeus_configuration,
}
- HTMLProofer.check_directory("./_site", options).run
+ HTMLProofer.check_directory("./_rakesite", options).run
end
diff --git a/_about/contribute/creating-a-pull-request.md b/_about/contribute/creating-a-pull-request.md
index 8c05dc8c67660..59b0eebf86976 100644
--- a/_about/contribute/creating-a-pull-request.md
+++ b/_about/contribute/creating-a-pull-request.md
@@ -1,23 +1,21 @@
---
title: Creating a Pull Request
-overview: Shows you how to create a GitHub pull request in order to submit your docs for approval.
+description: Shows you how to create a GitHub pull request in order to submit your docs for approval.
-order: 20
+weight: 20
-layout: about
-type: markdown
redirect_from: /docs/welcome/contribute/creating-a-pull-request.html
---
To contribute to Istio documentation, create a pull request against the
-[istio/istio.github.io](https://github.com/istio/istio.github.io){: target="_blank"}
+[istio/istio.github.io](https://github.com/istio/istio.github.io)
repository. This page shows the steps necessary to create a pull request.
## Before you begin
-1. Create a [GitHub account](https://github.com){: target="_blank"}.
+1. Create a [GitHub account](https://github.com).
-1. Sign the [Contributor License Agreement](https://github.com/istio/community/blob/master/CONTRIBUTING.md#contributor-license-agreements)
+1. Sign the [Contributor License Agreement](https://github.com/istio/community/blob/master/CONTRIBUTING.md#contributor-license-agreements).
Documentation will be published under the [Apache 2.0](https://github.com/istio/istio.github.io/blob/master/LICENSE) license.
@@ -26,7 +24,7 @@ Documentation will be published under the [Apache 2.0](https://github.com/istio/
Before you can edit documentation, you need to create a fork of Istio's documentation GitHub repository:
1. Go to the
-[istio/istio.github.io](https://github.com/istio/istio.github.io){: target="_blank"}
+[istio/istio.github.io](https://github.com/istio/istio.github.io)
repository.
1. In the upper-right corner, click **Fork**. This creates a copy of Istio's
@@ -57,3 +55,6 @@ repository. This opens a page that shows the status of your pull request.
1. During the next few days, check your pull request for reviewer comments.
If needed, revise your pull request by committing changes to your
new branch in your fork.
+
+> Once your changes have been committed, they will show up immediately on [preliminary.istio.io](https://preliminary.istio.io), but
+will only show up on [istio.io](http://istio.io) the next time we produce a new release, which happens around once a month.
diff --git a/_about/contribute/editing.md b/_about/contribute/editing.md
index 78eaa56d8d10c..28d7b8fbab598 100644
--- a/_about/contribute/editing.md
+++ b/_about/contribute/editing.md
@@ -1,17 +1,18 @@
---
title: Editing Docs
-overview: Lets you start editing this site's documentation.
-
-order: 10
+description: Lets you start editing this site's documentation.
+
+weight: 10
-layout: about
-type: markdown
redirect_from: /docs/welcome/contribute/editing.html
---
Click the button below to visit the GitHub repository for this whole web site. You can then click the
-**Fork** button in the upper-right area of the screen to
+**Fork** button in the upper-right area of the screen to
create a copy of our site in your GitHub account called a _fork_. Make any changes you want in your fork, and when you
are ready to send those changes to us, go to the index page for your fork and click **New Pull Request** to let us know about it.
Browse this site's source code
+
+> Once your changes have been committed, they will show up immediately on [preliminary.istio.io](https://preliminary.istio.io), but
+will only show up on [istio.io](http://istio.io) the next time we produce a new release, which happens around once a month.
diff --git a/_about/contribute/index.md b/_about/contribute/index.md
index 7a81d0c780592..16c808dc5be06 100644
--- a/_about/contribute/index.md
+++ b/_about/contribute/index.md
@@ -1,13 +1,11 @@
---
title: Contributing to the Docs
-overview: Learn how to contribute to improve and expand the Istio documentation.
+description: Learn how to contribute to improve and expand the Istio documentation.
-order: 100
+weight: 100
-layout: about
-type: markdown
toc: false
redirect_from: /docs/welcome/contribute/index.html
---
-{% include section-index.html docs=site.docs %}
+{% include section-index.html docs=site.about %}
diff --git a/_about/contribute/reviewing-doc-issues.md b/_about/contribute/reviewing-doc-issues.md
index f1d17af5928b7..5c2ef7df21bf5 100644
--- a/_about/contribute/reviewing-doc-issues.md
+++ b/_about/contribute/reviewing-doc-issues.md
@@ -1,16 +1,14 @@
---
title: Doc Issues
-overview: Explains the process involved in accepting documentation updates.
-
-order: 60
+description: Explains the process involved in accepting documentation updates.
+
+weight: 60
-layout: about
-type: markdown
redirect_from: /docs/welcome/contribute/reviewing-doc-issues.html
---
This page explains how documentation issues are reviewed and prioritized for the
-[istio/istio.github.io](https://github.com/istio/istio.github.io){: target="_blank"} repository.
+[istio/istio.github.io](https://github.com/istio/istio.github.io) repository.
The purpose is to provide a way to organize issues and make it easier to contribute to
Istio documentation. The following should be used as the standard way of prioritizing,
labeling, and interacting with issues.
@@ -25,7 +23,7 @@ the issue with your reasoning for the change.
P1
Major content errors affecting more than 1 page
-
Broken code sample on a heavily trafficked page
+
Broken code sample on a heavily trafficked page
Errors on a “getting started” page
Well known or highly publicized customer pain points
Automation issues
@@ -52,7 +50,7 @@ the issue with your reasoning for the change.
## Handling special issue types
-If a single problem has one or more issues open for it, the problem should be consolidated into a single issue. You should decide which issue to keep open
+If a single problem has one or more issues open for it, the problem should be consolidated into a single issue. You should decide which issue to keep open
(or open a new issue), port over all relevant information, link related issues, and close all the other issues that describe the same problem. Only having
a single issue to work on will help reduce confusion and avoid duplicating work on the same problem.
diff --git a/_about/contribute/staging-your-changes.md b/_about/contribute/staging-your-changes.md
index 7e56e0e5dee40..92b95be05d2f6 100644
--- a/_about/contribute/staging-your-changes.md
+++ b/_about/contribute/staging-your-changes.md
@@ -1,11 +1,9 @@
---
title: Staging Your Changes
-overview: Explains how to test your changes locally before submitting them.
+description: Explains how to test your changes locally before submitting them.
-order: 40
+weight: 40
-layout: about
-type: markdown
redirect_from: /docs/welcome/contribute/staging-your-changes.html
---
diff --git a/_about/contribute/style-guide.md b/_about/contribute/style-guide.md
index 83f27e3d4542b..b842769742991 100644
--- a/_about/contribute/style-guide.md
+++ b/_about/contribute/style-guide.md
@@ -1,11 +1,9 @@
---
title: Style Guide
-overview: Explains the dos and donts of writing Istio docs.
-
-order: 70
+description: Explains the dos and donts of writing Istio docs.
+
+weight: 70
-layout: about
-type: markdown
redirect_from: /docs/welcome/contribute/style-guide.html
---
@@ -29,18 +27,18 @@ objects use
[camelCase](https://en.wikipedia.org/wiki/Camel_case).
Don't split the API object name into separate words. For example, use
-PodTemplateList, not Pod Template List.
+`PodTemplateList`, not Pod Template List.
Refer to API objects without saying "object," unless omitting "object"
leads to an awkward construction.
|Do |Don't
|--------------------------------------------|------
-|The Pod has two Containers. |The pod has two containers.
-|The Deployment is responsible for ... |The Deployment object is responsible for ...
-|A PodList is a list of Pods. |A Pod List is a list of pods.
-|The two ContainerPorts ... |The two ContainerPort objects ...
-|The two ContainerStateTerminated objects ...|The two ContainerStateTerminateds ...
+|The `Pod` has two Containers. |The pod has two containers.
+|The `Deployment` is responsible for ... |The `Deployment` object is responsible for ...
+|A `PodList` is a list of Pods. |A Pod List is a list of pods.
+|The two `ContainerPorts` ... |The two `ContainerPort` objects ...
+|The two `ContainerStateTerminated` objects ...|The two `ContainerStateTerminated` ...
### Use angle brackets for placeholders
@@ -49,10 +47,10 @@ represents.
1. Display information about a pod:
- ```bash
- kubectl describe pod
+ ```command
+ $ kubectl describe pod
```
-
+
where `` is the name of one of your pods.
### Use **bold** for user interface elements
@@ -81,7 +79,7 @@ represents.
|Do | Don't
|----------------------------|------
-|The `kubectl run` command creates a Deployment.|The "kubectl run" command creates a Deployment.
+|The `kubectl run` command creates a `Deployment`.|The "kubectl run" command creates a `Deployment`.
|For declarative management, use `kubectl apply`.|For declarative management, use "kubectl apply".
### Use `code` style for object field names
@@ -97,19 +95,20 @@ For field values of type string or integer, use normal style without quotation m
|Do | Don't
|----------------------------------------------|------
-|Set the value of `imagePullPolicy` to Always. | Set the value of `imagePullPolicy` to "Always".|Set the value of `image` to nginx:1.8. | Set the value of `image` to `nginx:1.8`.
+|Set the value of `imagePullPolicy` to Always. | Set the value of `imagePullPolicy` to "Always".
+|Set the value of `image` to nginx:1.8. | Set the value of `image` to `nginx:1.8`.
|Set the value of the `replicas` field to 2. | Set the value of the `replicas` field to `2`.
### Only capitalize the first letter of headings
For any headings, only apply an uppercase letter to the first word of the heading,
-except is a word is a proper noun or an acronym.
+except if a word is a proper noun or an acronym.
|Do | Don't
|------------------------|-----
|Configuring rate limits | Configuring Rate Limits
|Using Envoy for ingress | Using envoy for ingress
-|Using HTTPS | Using https
+|Using HTTPS | Using https
## Code snippet formatting
@@ -123,12 +122,8 @@ except is a word is a proper noun or an acronym.
Verify that the pod is running on your chosen node:
-```bash
-kubectl get pods --output=wide
-```
-The output is similar to this:
-
-```bash
+```command
+$ kubectl get pods --output=wide
NAME READY STATUS RESTARTS AGE IP NODE
nginx 1/1 Running 0 13s 10.200.0.4 worker0
```
@@ -150,7 +145,7 @@ Synonyms:
- “Sidecar” -- mostly restricted to conceptual docs
- “Proxy -- only if context is obvious
-Related Terms
+Related Terms:
- Proxy agent - This is a minor infrastructural component and should only show up in low-level detail documentation.
It is not a proper noun.
@@ -171,7 +166,7 @@ forms of configuration.
No dash, it's *load balancing* not *load-balancing*.
-### Service mesh
+### Service mesh
Not a proper noun. Use in place of service fabric.
@@ -208,7 +203,7 @@ Use simple and direct language. Avoid using unnecessary phrases, such as saying
|Do | Don't
|----------------------------|------
-|To create a ReplicaSet, ... | In order to create a ReplicaSet, ...
+|To create a `ReplicaSet`, ... | In order to create a `ReplicaSet`, ...
|See the configuration file. | Please see the configuration file.
|View the Pods. | With this next command, we'll view the Pods.
@@ -216,9 +211,17 @@ Use simple and direct language. Avoid using unnecessary phrases, such as saying
|Do | Don't
|---------------------------------------|------
-|You can create a Deployment by ... | We'll create a Deployment by ...
+|You can create a `Deployment` by ... | We'll create a `Deployment` by ...
|In the preceding output, you can see...| In the preceding output, we can see ...
+### Create useful links
+
+There are good hyperlinks, and bad hyperlinks. The common practice of calling links *here* or *click here* are examples of
+bad hyperlinks. Check out this excellent article explaining what makes a good hyperlink and try to keep these guidelines in
+mind when creating or reviewing site content.
+
+[Why “click here” is a terrible link, and what to write instead](http://stephanieleary.com/2015/05/why-click-here-is-a-terrible-link-and-what-to-write-instead/).
+
## Patterns to avoid
### Avoid using "we"
@@ -229,7 +232,7 @@ whether they're part of the "we" you're describing.
|Do | Don't
|------------------------------------------|------
|Version 1.4 includes ... | In version 1.4, we have added ...
-|Kubernetes provides a new feature for ... | We provide a new feature ...
+|Istio provides a new feature for ... | We provide a new feature ...
|This page teaches you how to use pods. | In this page, we are going to learn about pods.
### Avoid jargon and idioms
diff --git a/_about/contribute/writing-a-new-topic.md b/_about/contribute/writing-a-new-topic.md
index e5bd8e540d0ce..4e4b7cc0f744f 100644
--- a/_about/contribute/writing-a-new-topic.md
+++ b/_about/contribute/writing-a-new-topic.md
@@ -1,11 +1,9 @@
---
title: Writing a New Topic
-overview: Explains the mechanics of creating new documentation pages.
-
-order: 30
+description: Explains the mechanics of creating new documentation pages.
+
+weight: 30
-layout: about
-type: markdown
redirect_from: /docs/welcome/contribute/writing-a-new-topic.html
---
{% include home.html %}
@@ -25,7 +23,7 @@ is the best fit for your content:
Concept
-
A concept page explains some significant aspect of Istio. For example, a concept page might describe the
+
A concept page explains some significant aspect of Istio. For example, a concept page might describe the
Mixer's configuration model and explain some of its subtleties.
Typically, concept pages don't include sequences of steps, but instead provide links to
tasks that do.
@@ -55,10 +53,10 @@ is the best fit for your content:
Setup
A setup page is similar to a task page, except that it is focused on installation
- ectivities.
+ activities.
-
+
Blog Post
@@ -79,50 +77,47 @@ all in lower case.
## Updating the front matter
-Every documentation file needs to start with Jekyll
+Every documentation file needs to start with Jekyll
[front matter](https://jekyllrb.com/docs/frontmatter/).
The front matter is a block of YAML that is between the
triple-dashed lines at the top of each file. Here's the
chunk of front matter you should start with:
-```
+```yaml
---
title:
-overview:
-
-order:
-
-layout: docs
-type: markdown
+description:
+weight:
---
```
Copy the above at the start of your new markdown file and update
-the ``, `` and `` fields for your particular file. The available front
+the ``, `` and `` fields for your particular file. The available front
matter fields are:
|Field | Description
|---------------|------------
|`title` | The short title of the page
-|`overview` | A one-line description of what the topic is about
-|`order` | An integer used to determine the sort order of this page relative to other pages in the same directory.
+|`description` | A one-line description of what the topic is about
+|`weight` | An integer used to determine the sort order of this page relative to other pages in the same directory.
|`layout` | Indicates which of the Jekyll layouts this page uses
-|`index` | Indicates whether the page should appear in the doc's top nav tabs
-|`draft` | When true, prevents the page from shownig up in any navigation area
-|`publish_date` | For blog posts, indicates the date of publication of the post
+|`draft` | When true, prevents the page from showing up in any navigation area
+|`publishdate` | For blog posts, indicates the date of publication of the post
|`subtitle` | For blog posts, supplies an optional subtitle to be displayed below the main title
|`attribution` | For blog posts, supplies an optional author's name
+|`toc` | Set this to false to prevent the page from having a table of contents generated for it
+|`force_inline_toc` | Set this to true to force the generated table of contents from being inserted inline in the text instead of in a sidebar
## Choosing a directory
Depending on your page type, put your new file in a subdirectory of one of these:
-* _blog/
-* _docs/concepts/
-* _docs/guides/
-* _docs/reference/
-* _docs/setup/
-* _docs/tasks/
+- _blog/
+- _docs/concepts/
+- _docs/guides/
+- _docs/reference/
+- _docs/setup/
+- _docs/tasks/
You can put your file in an existing subdirectory, or you can create a new
subdirectory. For blog posts, put the file into a subdirectory for the current
@@ -135,24 +130,28 @@ Put image files in an `img` subdirectory of where you put your markdown file. Th
If you must use a PNG or JPEG file instead, and the file
was generated from an original SVG file, please include the
SVG file in the repository even if it isn't used in the web
-site itself. This is so we can update the imagery over time
+site itself. This is so we can update the imagery over time
if needed.
Within markdown, use the following sequence to add the image:
```html
{% raw %}
-{% include figure.html width='75%' ratio='69.52%'
- img='./img/myfile.svg'
- alt='Alternate text to display when the image is not available'
- title='A tooltip displayed when hovering over the image'
- caption='A caption displayed under the image'
+{% include image.html width="75%" ratio="69.52%"
+ link="./img/myfile.svg"
+ alt="Alternate text to display when the image is not available"
+ title="A tooltip displayed when hovering over the image"
+ caption="A caption displayed under the image"
%}
{% endraw %}
```
-You need to fill in all the values. The width represents the percentage of space used by the image
-relative to the surrounding text. The ratio is (image height / image width) * 100.
+The `width`, `ratio`, `link` and `caption` values are required. If the `title` value isn't
+supplied, it'll default to the same as `caption`. If the `alt` value is not supplied, it'll
+default to `title` or if that's not defined, to `caption`.
+
+`width` represents the percentage of space used by the image
+relative to the surrounding text. `ratio` (image height / image width) * 100.
## Linking to other pages
@@ -181,10 +180,10 @@ current hierarchy:
{% raw %}[see here]({{home}}/docs/adir/afile.html){% endraw %}
```
- In order to use \{\{home\}\} in a file,
+ In order to use \{\{home\}\} in a file,
you need to make sure that the file contains the following
line of boilerplate right after the block of front matter:
-
+
```markdown
...
---
@@ -197,7 +196,7 @@ current hierarchy:
You can embed blocks of preformatted content using the normal markdown technique:
-
```
+
```plain
func HelloWorld() {
fmt.Println("Hello World")
}
@@ -206,14 +205,14 @@ func HelloWorld() {
The above produces this kind of output:
-```
+```plain
func HelloWorld() {
fmt.Println("Hello World")
}
```
-In general, you should indicate the nature of the content in the preformatted block. You do this
-by appending a name after the initial set of tick marks
+You must indicate the nature of the content in the preformatted block by appending a name after the initial set of tick
+marks:
```go
func HelloWorld() {
@@ -230,8 +229,76 @@ func HelloWorld() {
}
```
-You can use `markdown`, `yaml`, `json`, `java`, `javascript`, `c`, `cpp`, `csharp`, `go`, `html`, `protobuf`,
-`perl`, `docker`, and `bash`.
+You can use `markdown`, `yaml`, `json`, `java`, `javascript`, `c`, `cpp`, `csharp`, `go`, `html`, `protobuf`,
+`perl`, `docker`, and `bash`, along with `command` and its variants described below.
+
+### Showing commands and command output
+
+If you want to show one or more bash command-lines with some output, you use the `command` indicator:
+
+
```command
+$ echo "Hello"
+Hello
+```
+
+
+which produces:
+
+```command
+$ echo "Hello"
+Hello
+```
+
+You can have as many command-lines as you want, but only one chunk of output is recognized.
+
+
+
+which yields:
+
+```command
+$ echo "Hello" >file.txt
+$ cat file.txt
+Hello
+```
+
+You can also use line continuation in your command-lines:
+
+```command
+$ echo "Hello" \
+ >file.txt
+$ echo "There" >>file.txt
+$ cat file.txt
+Hello
+There
+```
+
+If the output is the command is JSON or YAML, you can use `command-output-as-json` and `command-output-as-yaml`
+instead of merely `command` in order to apply syntax coloring to the command's output.
+
+## Displaying file content
+
+You can pull in an external file and display its content as a preformatted block. This is handy to display a
+config file or a test file. To do so, you use a Jekyll include statement such as:
+
+```html
+{% raw %}{% include file-content.html url='https://raw.githubusercontent.com/istio/istio/master/Makefile' %}{% endraw %}
+```
+
+which produces the following result:
+
+{% include file-content.html url='https://raw.githubusercontent.com/istio/istio/master/Makefile' %}
+
+If the file is from a different origin site, CORS should be enabled on that site. Note that the
+GitHub raw content site (raw.githubusercontent.com) is CORS
+enabled so it may be used here.
+
+Note that unlike normal preformatted blocks, dynamically loaded preformatted blocks unfortunately
+do not get syntax colored.
## Adding redirects
@@ -241,44 +308,40 @@ redirects to the site very easily.
In the page that is the target of the redirect (where you'd like users to land), you simply add the
following to the front-matter:
-```
+```plain
redirect_from:
```
For example
-```
+```plain
---
-title: Frequantly Asked Questions
-overview: Questions Asked Frequently
+title: Frequently Asked Questions
+description: Questions Asked Frequently
-order: 12
+weight: 12
-layout: docs
-type: markdown
redirect_from: /faq
---
-```
+```
With the above in a page saved as _help/faq.md, the user will be able to access the page by going
to istio.io/help/faq as normal, as well as istio.io/faq.
You can also add many redirects like so:
-
-```
+
+```plain
---
-title: Frequantly Asked Questions
-overview: Questions Asked Frequently
+title: Frequently Asked Questions
+description: Questions Asked Frequently
-order: 12
+weight: 12
-layout: docs
-type: markdown
redirect_from:
- /faq
- /faq2
- /faq3
---
-```
+```
diff --git a/_about/feature-stages.md b/_about/feature-stages.md
index 5e0d73f1534db..cb7b1934dea89 100644
--- a/_about/feature-stages.md
+++ b/_about/feature-stages.md
@@ -1,11 +1,9 @@
---
title: Feature Status
-overview: List of features and their release stages.
+description: List of features and their release stages.
-order: 10
+weight: 10
-layout: about
-type: markdown
redirect_from:
- "/docs/reference/release-roadmap.html"
- "/docs/reference/feature-stages.html"
@@ -13,28 +11,26 @@ redirect_from:
---
{% include home.html %}
-Starting with 0.3, Istio releases are delivered on a monthly cadence. You can download the current version by visiting our
-[release page](https://github.com/istio/istio/releases).
-
-Please note that the phases (alpha, beta, and stable) are applied to individual features
+This page lists the relative maturity and support
+level of every Istio feature. Please note that the phases (Alpha, Beta, and Stable) are applied to individual features
within the project, not to the project as a whole. Here is a high level description of what these labels means:
-## Feature Phase Definition
+## Feature phase definitions
-| | Alpha | Beta | Stable
+| | Alpha | Beta | Stable
|-------------------|-------------------|-------------------|-------------------
-| **Purpose** | Demo-able, works end-to-end but has limitations | Usable in production, not a toy anymore | Dependable, production hardened
+| **Purpose** | Demo-able, works end-to-end but has limitations | Usable in production, not a toy anymore | Dependable, production hardened
| **API** | No guarantees on backward compatibility | APIs are versioned | Dependable, production-worthy. APIs are versioned, with automated version conversion for backward compatibility
-| **Performance** | Not quantified or guaranteed | Not quantified or guaranteed | Perf (latency/scale) is quantified, documented, with guarantees against regression
+| **Performance** | Not quantified or guaranteed | Not quantified or guaranteed | Performance (latency/scale) is quantified, documented, with guarantees against regression
| **Deprecation Policy** | None | Weak - 3 months | Dependable, Firm. 1 year notice will be provided before changes
## Istio features
Below is our list of existing features and their current phases. This information will be updated after every monthly release.
-### Traffic Management
+### Traffic management
-| Feature | Phase
+| Feature | Phase
|-------------------|-------------------
| [Protocols: HTTP 1.1](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/http_connection_management.html#http-protocols) | Beta
| [Protocols: HTTP 2.0](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/http_connection_management.html#http-protocols) | Alpha
@@ -47,60 +43,63 @@ Below is our list of existing features and their current phases. This informatio
| [Routing Rules: Circuit Break]({{home}}/docs/tasks/traffic-management/request-routing.html) | Alpha
| [Routing Rules: Header Rewrite]({{home}}/docs/tasks/traffic-management/request-routing.html) | Alpha
| [Routing Rules: Traffic Splitting]({{home}}/docs/tasks/traffic-management/request-routing.html) | Alpha
-| [Memquota Implementation and Integration]({{home}}/docs/tasks/telemetry/metrics-logs.html) | Alpha
+| Improved Routing Rules: Composite Service | Alpha
+| [Quota / Redis Rate Limiting (Adapter and Server)]({{home}}/docs/tasks/policy-enforcement/rate-limiting.html) | Alpha
+| [Memquota Implementation and Integration]({{home}}/docs/tasks/telemetry/metrics-logs.html) | Stable
| [Ingress TLS]({{home}}/docs/tasks/traffic-management/ingress.html) | Alpha
+| Egress Policy and Telemetry | Alpha
### Observability
-
-| Feature | Phase
+| Feature | Phase
|-------------------|-------------------
-| [Prometheus Integration]({{home}}/docs/guides/telemetry.html) | Beta
-| [Local Logging (STDIO)]({{home}}/docs/guides/telemetry.html) | Beta
-| [Statsd Integration]({{home}}/docs/reference/config/adapters/statsd.html) | Stable
-| [Service Dashboard in Grafana]({{home}}/docs/tasks/telemetry/using-istio-dashboard.html) | Beta
-| [Stackdriver Integration]({{home}}/docs/reference/config/adapters/stackdriver.html) | Alpha
-| [Service Graph]({{home}}/docs/tasks/telemetry/servicegraph.html) | Alpha
-| [Distributed Tracing to Zipkin / Jaeger]({{home}}/docs/tasks/telemetry/distributed-tracing.html) | Alpha
-| [Istio Component Dashboard in Grafana]({{home}}/docs/tasks/telemetry/using-istio-dashboard.html) - **New to 0.5** | Alpha
-
+| [Prometheus Integration]({{home}}/docs/guides/telemetry.html) | Beta
+| [Local Logging (STDIO)]({{home}}/docs/guides/telemetry.html) | Stable
+| [Statsd Integration]({{home}}/docs/reference/config/adapters/statsd.html) | Stable
+| [Service Dashboard in Grafana]({{home}}/docs/tasks/telemetry/using-istio-dashboard.html) | Beta
+| [Stackdriver Integration]({{home}}/docs/reference/config/adapters/stackdriver.html) | Alpha
+| [Service Graph]({{home}}/docs/tasks/telemetry/servicegraph.html) | Alpha
+| [Distributed Tracing to Zipkin / Jaeger]({{home}}/docs/tasks/telemetry/distributed-tracing.html) | Alpha
+| [Istio Component Dashboard in Grafana]({{home}}/docs/tasks/telemetry/using-istio-dashboard.html) | Beta
+| Service Tracing | Alpha
### Security
-
-| Feature | Phase
+| Feature | Phase
|-------------------|-------------------
-| [Deny Checker]({{home}}/docs/reference/config/adapters/denier.html) | Beta
-| [List Checker]({{home}}/docs/reference/config/adapters/list.html) | Beta
-| [Kubernetes: Service Credential Distribution]({{home}}/docs/concepts/security/mutual-tls.html) | Beta
+| [Deny Checker]({{home}}/docs/reference/config/adapters/denier.html) | Stable
+| [List Checker]({{home}}/docs/reference/config/adapters/list.html) | Stable
+| [Kubernetes: Service Credential Distribution]({{home}}/docs/concepts/security/mutual-tls.html) | Stable
| [Pluggable Key/Cert Support for Istio CA]({{home}}/docs/tasks/security/plugin-ca-cert.html) | Stable
-| [Service-to-service mutual TLS]({{home}}/docs/concepts/security/mutual-tls.html) | Beta
+| [Service-to-service mutual TLS]({{home}}/docs/concepts/security/mutual-tls.html) | Stable
| [Incremental Enablement of service-to-service mutual TLS]({{home}}/docs/tasks/security/per-service-mtls.html) | Alpha
| [VM: Service Credential Distribution]({{home}}/docs/concepts/security/mutual-tls.html) | Alpha
| [OPA Checker](https://github.com/istio/istio/blob/41a8aa4f75f31bf0c1911d844a18da4cff8ac584/mixer/adapter/opa/README.md) | Alpha
-
-
+| RBAC Mixer Adapter | Alpha
+| API Keys | Alpha
### Core
-
-| Feature | Phase
+| Feature | Phase
|-------------------|-------------------
| [Kubernetes: Envoy Installation and Traffic Interception]({{home}}/docs/setup/kubernetes/) | Beta
| [Kubernetes: Istio Control Plane Installation]({{home}}/docs/setup/kubernetes/) | Beta
| [Pilot Integration into Kubernetes Service Discovery]({{home}}/docs/setup/kubernetes/) | Stable
-| [Attribute Expression Language]({{home}}/docs/reference/config/mixer/expression-language.html) | Beta
-| [Mixer Adapter Authoring Model]({{home}}/blog/2017/adapter-model.html) | Beta
+| [Attribute Expression Language]({{home}}/docs/reference/config/mixer/expression-language.html) | Stable
+| [Mixer Adapter Authoring Model]({{home}}/blog/2017/adapter-model.html) | Stable
| [VM: Envoy Installation, Traffic Interception and Service Registration]({{home}}/docs/guides/integrating-vms.html) | Alpha
| [VM: Istio Control Plane Installation and Upgrade (Galley, Mixer, Pilot, CA)](https://github.com/istio/istio/issues/2083) | Alpha
-| [Kubernetes: Istio Control Plane Upgrade]({{home}}/docs/setup/kubernetes/) | Alpha
-| [Pilot Integration into Consul]({{home}}/docs/setup/consul/quick-start.html) | Alpha
-| [Pilot Integration into Eureka]({{home}}/docs/setup/consul/quick-start.html) | Alpha
+| VM: Ansible Envoy Installation, Interception and Registration | Alpha
+| [Kubernetes: Istio Control Plane Upgrade]({{home}}/docs/setup/kubernetes/) | Beta
+| [Pilot Integration into Consul]({{home}}/docs/setup/consul/quick-start.html) | Alpha
+| [Pilot Integration into Eureka]({{home}}/docs/setup/consul/quick-start.html) | Alpha
| [Pilot Integration into Cloud Foundry Service Discovery]({{home}}/docs/setup/consul/quick-start.html) | Alpha
-| [Basic Config Resource Validation](https://github.com/istio/istio/issues/1894) | Alpha
-
-
-
+| [Basic Config Resource Validation](https://github.com/istio/istio/issues/1894) | Alpha
+| Mixer Telemetry Collection (Tracing, Logging, Monitoring) | Alpha
+| Custom Mixer Build Model | Alpha
+| Enable API attributes using an IDL | Alpha
+| [Helm]({{home}}/docs/setup/kubernetes/helm-install.html) | Alpha
+| [Multicluster Mesh]({{home}}/docs/setup/kubernetes/multicluster-install.html) | Alpha
>
-Please get in touch by joining our [community]({{home}}/community) if there are features you'd like to see in our future releases!
+Please get in touch by joining our [community]({{home}}/community.html) if there are features you'd like to see in our future releases!
diff --git a/_about/index.md b/_about/index.md
index bc5acba7ed98a..d0410be0c0db7 100644
--- a/_about/index.md
+++ b/_about/index.md
@@ -1,12 +1,20 @@
---
title: About Istio
-overview: All about Istio.
+description: All about Istio.
-order: 15
+weight: 15
-layout: about
-type: markdown
toc: false
---
+{% include home.html %}
-{% include section-index.html docs=site.about %}
+Get a bit more in-depth info about the Istio project.
+
+- [What is Istio?]({{home}}/about/intro.html). Get some context about what problems Istio is designed to solve.
+
+- [Release Notes]({{home}}/about/notes/). Learn about the latest features, improvements, and bug fixes.
+
+- [Feature Status]({{home}}/about/feature-stages.html). Get a detailed list of Istio's individual features and their relative
+maturity and support level.
+
+- [Contributing to the Docs]({{home}}/about/contribute/). Learn how you can help contribute to improve Istio's documentation.
diff --git a/_about/intro.md b/_about/intro.md
index 658c44e69fd7c..0c8197f6cc57a 100644
--- a/_about/intro.md
+++ b/_about/intro.md
@@ -1,13 +1,10 @@
---
title: What is Istio?
-overview: Context about what problems Istio is designed to solve.
+description: Context about what problems Istio is designed to solve.
-order: 0
+weight: 1
-layout: about
-type: markdown
toc: false
-redirect_from: /about.html
---
Istio is an open platform that provides a uniform way to connect, manage,
@@ -26,11 +23,11 @@ rate limits and quotas.
- Automatic metrics, logs, and traces for all traffic within a cluster,
including cluster ingress and egress.
-- Secure service-to-service authentication with strong identity assertions
-between services in a cluster.
+- Secure service-to-service communication in a cluster with strong
+identity-based authentication and authorization.
Istio can be deployed on [Kubernetes](https://kubernetes.io),
[Nomad](https://nomadproject.io) with [Consul](https://www.consul.io/). We
plan to add support for additional platforms such as
[Cloud Foundry](https://www.cloudfoundry.org/),
-and [Apache Mesos](http://mesos.apache.org/) in the near future.
+and [Apache Mesos](https://mesos.apache.org/) in the near future.
diff --git a/_about/notes/0.1.md b/_about/notes/0.1.md
index 8c21c7494438c..fda82fa1c6fbf 100644
--- a/_about/notes/0.1.md
+++ b/_about/notes/0.1.md
@@ -1,10 +1,8 @@
---
title: Istio 0.1
-order: 100
+weight: 100
-layout: about
-type: markdown
toc: false
redirect_from: /docs/welcome/notes/0.1.html
---
diff --git a/_about/notes/0.2.md b/_about/notes/0.2.md
index dd8f907a4d0f3..5271b83fe6e5b 100644
--- a/_about/notes/0.2.md
+++ b/_about/notes/0.2.md
@@ -1,43 +1,43 @@
---
title: Istio 0.2
-order: 99
+weight: 99
-layout: about
-type: markdown
redirect_from: /docs/welcome/notes/0.2.html
---
## General
- **Updated Config Model**. Istio now uses the Kubernetes [Custom Resource](https://kubernetes.io/docs/concepts/api-extension/custom-resources/)
-model to describe and store its configuration. When running in Kubernetes, configuration can now be optionally managed using the `kubectl`
+model to describe and store its configuration. When running in Kubernetes, configuration can now be optionally managed using the `kubectl`
command.
-- **Multiple Namespace Support**. Istio control plane components are now in the dedicated "istio-system" namespace. Istio can manage
+- **Multiple Namespace Support**. Istio control plane components are now in the dedicated "istio-system" namespace. Istio can manage
services in other non-system namespaces.
- **Mesh Expansion**. Initial support for adding non-Kubernetes services (in the form of VMs and/or physical machines) to a mesh. This is an early version of
this feature and has some limitations (such as requiring a flat network across containers and VMs).
-- **Multi-Environment Support**. Initial support for using Istio in conjunction with other service registries
-including Consul and Eureka.
+- **Multi-Environment Support**. Initial support for using Istio in conjunction with other service registries
+including Consul and Eureka.
-- **Automatic injection of sidecars**. Istio sidecar can automatically be injected into a Pod upon deployment using the [Initializers](https://kubernetes.io/docs/admin/extensible-admission-controllers/#what-are-initializers) alpha feature in Kubernetes.
+- **Automatic injection of sidecars**. Istio sidecar can automatically be injected into a Pod upon deployment using the
+[Initializers](https://kubernetes.io/docs/admin/extensible-admission-controllers/#what-are-initializers) alpha feature in Kubernetes.
-## Perf and quality
+## Performance and quality
-There have been many performance and reliability improvements throughout the system. We don’t consider Istio 0.2 ready for production yet, but we’ve made excellent progress in that direction. Here are a few items of note:
+There have been many performance and reliability improvements throughout the system. We don’t consider Istio 0.2 ready for production yet, but
+we’ve made excellent progress in that direction. Here are a few items of note:
-- **Caching Client**. The Mixer client library used by Envoy now provides caching for Check calls and batching for Report calls, considerably reducing
+- **Caching Client**. The Mixer client library used by Envoy now provides caching for Check calls and batching for Report calls, considerably reducing
end-to-end overhead.
- **Avoid Hot Restarts**. The need to hot-restart Envoy has been mostly eliminated through effective use of LDS/RDS/CDS/EDS.
- **Reduced Memory Use**. Significantly reduced the size of the sidecar helper agent, from 50Mb to 7Mb.
-- **Improved Mixer Latency**. Mixer now clearly delineates configuration-time vs. request-time computations, which avoids doing extra setup work at
-request-time for initial requests and thus delivers a smoother average latency. Better resource caching also contributes to better end-to-end perf.
+- **Improved Mixer Latency**. Mixer now clearly delineates configuration-time vs. request-time computations, which avoids doing extra setup work at
+request-time for initial requests and thus delivers a smoother average latency. Better resource caching also contributes to better end-to-end performance.
- **Reduced Latency for Egress Traffic**. We now forward traffic to external services directly from the sidecar.
@@ -55,15 +55,15 @@ Jaeger tracing.
- **Ingress Policies**. In addition to east-west traffic supported in 0.1. policies can now be applied to north-south traffic.
-- **Support for TCP Services**. In addition to the HTTP-level policy controls available in 0.1, 0.2 introduces policy controls for
+- **Support for TCP Services**. In addition to the HTTP-level policy controls available in 0.1, 0.2 introduces policy controls for
TCP services.
-- **New Mixer API**. The API that Envoy uses to interact with Mixer has been completely redesigned for increased robustness, flexibility, and to support
+- **New Mixer API**. The API that Envoy uses to interact with Mixer has been completely redesigned for increased robustness, flexibility, and to support
rich proxy-side caching and batching for increased performance.
-- **New Mixer Adapter Model**. A new adapter composition model makes it easier to extend Mixer by adding whole new classes of adapters via templates. This
+- **New Mixer Adapter Model**. A new adapter composition model makes it easier to extend Mixer by adding whole new classes of adapters via templates. This
new model will serve as the foundational building block for many features in the future. See the
-[Adapter Developer's Guide](https://github.com/istio/istio/blob/master/mixer/doc/adapters.md) to learn how
+[Adapter Developer's Guide](https://github.com/istio/istio/wiki/Mixer-Adapter-Dev-Guide) to learn how
to write adapters.
- **Improved Mixer Build Model**. It’s now easier to build a Mixer binary that includes custom adapters.
@@ -84,10 +84,15 @@ identity provisioning. This agent runs on each node (VM / physical machine) and
- **Bring Your Own CA Certificates**. Allows users to provide their own key and certificate for Istio CA.
- **Persistent CA Key/Certificate Storage**. Istio CA now stores signing key/certificates in
-persistent storage to facilitate CA restarts.
+persistent storage to facilitate CA restarts.
## Known issues
-- **User may get periodical 404 when accessing the application**: We have noticed that Envoy doesn't get routes properly occasionally thus a 404 is returned to the user. We are actively working on this [issue](https://github.com/istio/istio/issues/1038).
-- **Istio Ingress or Egress reports ready before Pilot is actually ready**: You can check the istio-ingress and istio-egress pods status in the `istio-system` namespace and wait a few seconds after all the Istio pods reach ready status. We are actively working on this [issue](https://github.com/istio/istio/pull/1055).
+- **User may get periodical 404 when accessing the application**: We have noticed that Envoy doesn't get routes properly occasionally
+thus a 404 is returned to the user. We are actively working on this [issue](https://github.com/istio/istio/issues/1038).
+
+- **Istio Ingress or Egress reports ready before Pilot is actually ready**: You can check the istio-ingress and istio-egress pods status
+in the `istio-system` namespace and wait a few seconds after all the Istio pods reach ready status. We are actively working on this
+[issue](https://github.com/istio/istio/pull/1055).
+
- **A service with Istio Auth enabled can't communicate with a service without Istio**: This limitation will be removed in the near future.
diff --git a/_about/notes/0.3.md b/_about/notes/0.3.md
index eb4162949b66d..524eb9c287c27 100644
--- a/_about/notes/0.3.md
+++ b/_about/notes/0.3.md
@@ -1,13 +1,13 @@
---
title: Istio 0.3
-order: 98
+weight: 98
-layout: about
-type: markdown
redirect_from: /docs/welcome/notes/0.3.html
---
+{% include home.html %}
+
## General
Starting with 0.3, Istio is switching to a monthly release cadence. We hope this will help accelerate our ability
@@ -40,6 +40,5 @@ significant drop in average latency for authorization checks.
- **Config Validation**. Mixer does more extensive validation of configuration state in order to catch problems earlier.
We expect to invest more in this area in coming releases.
-
If you're into the nitty-gritty details, you can see our more detailed low-level
release notes [here](https://github.com/istio/istio/wiki/v0.3.0).
diff --git a/_about/notes/0.4.md b/_about/notes/0.4.md
index dbf6f5bff80f9..668ca439c2327 100644
--- a/_about/notes/0.4.md
+++ b/_about/notes/0.4.md
@@ -1,17 +1,15 @@
---
title: Istio 0.4
-order: 97
+weight: 97
-layout: about
-type: markdown
toc: false
redirect_from: /docs/welcome/notes/0.4.html
---
{% include home.html %}
-This release has only got a few weeks' worth of changes, as we stabilize our monthly release process.
-In addition to the usual pile of bug fixes and perf improvements, this release includes:
+This release has only got a few weeks' worth of changes, as we stabilize our monthly release process.
+In addition to the usual pile of bug fixes and performance improvements, this release includes:
- **Cloud Foundry**. Added minimum Pilot support for the [Cloud Foundry](https://www.cloudfoundry.org) platform, making it
possible for Pilot to discover CF services and service instances.
diff --git a/_about/notes/0.5.md b/_about/notes/0.5.md
index c9872ebcd3481..7adb9f5be8216 100644
--- a/_about/notes/0.5.md
+++ b/_about/notes/0.5.md
@@ -1,14 +1,12 @@
---
title: Istio 0.5
-order: 96
+weight: 96
-layout: about
-type: markdown
---
{% include home.html %}
-In addition to the usual pile of bug fixes and perf improvements, this release includes the new or
+In addition to the usual pile of bug fixes and performance improvements, this release includes the new or
updated features detailed below.
## Networking
@@ -17,22 +15,24 @@ updated features detailed below.
the components you want (e.g, Pilot+Ingress only as the minimal Istio install). Refer to the `istioctl` CLI tool for generating a
information on customized Istio deployments.
-- **Automatic Proxy Injection**. We leverage Kubernetes 1.9's new [mutating webhook feature](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.9.md#api-machinery) to provide automatic
+- **Automatic Proxy Injection**. We leverage Kubernetes 1.9's new
+[mutating webhook feature](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.9.md#api-machinery) to provide automatic
pod-level proxy injection. Automatic injection requires Kubernetes 1.9 or beyond and
-therefore doesn't work on older versions. The alpha initializer mechanism is no longer supported. [Learn more]({{home}}/docs/setup/kubernetes/sidecar-injection.html#automatic-sidecar-injection)
+therefore doesn't work on older versions. The alpha initializer mechanism is no longer supported.
+[Learn more]({{home}}/docs/setup/kubernetes/sidecar-injection.html#automatic-sidecar-injection)
- **Revised Traffic Rules**. Based on user feedback, we have made significant changes to Istio's traffic management
(routing rules, destination rules, etc.). We would love your continuing feedback while we polish this in the coming weeks.
## Mixer adapters
-- **Open Policy Agent**. Mixer now has an authorization adapter implementing the [open policy agent](http://www.openpolicyagent.org) model,
+- **Open Policy Agent**. Mixer now has an authorization adapter implementing the [open policy agent](https://www.openpolicyagent.org) model,
providing a flexible fine-grained access control mechanism. [Learn more](https://docs.google.com/document/d/1U2XFmah7tYdmC5lWkk3D43VMAAQ0xkBatKmohf90ICA/edit#heading=h.fmlgl8m03gfy)
- **Istio RBAC**. Mixer now has a role-based access control adapter.
[Learn more]({{home}}/docs/concepts/security/rbac.html)
-- **Fluentd**. Mixer now has an adapter for log collection through [fluentd](https://www.fluentd.org).
+- **Fluentd**. Mixer now has an adapter for log collection through [fluentd](https://www.fluentd.org).
[Learn more]({{home}}/docs/tasks/telemetry/fluentd.html)
- **Stdio**. The stdio adapter now lets you log to files with support for log rotation & backup, along with a host
@@ -51,10 +51,10 @@ of controls.
## Other
- **Release-Mode Binaries**. We switched release and installation default to release for improved
-performance and security.
+performance and security.
- **Component Logging**. Istio components now offer a rich set of command-line options to control local logging, including
-common support for log rotation.
+common support for log rotation.
- **Consistent Version Reporting**. Istio components now offer a consistent command-line interface to report their version information.
diff --git a/_about/notes/0.6.md b/_about/notes/0.6.md
new file mode 100644
index 0000000000000..c527fc1d85b78
--- /dev/null
+++ b/_about/notes/0.6.md
@@ -0,0 +1,42 @@
+---
+title: Istio 0.6
+
+weight: 95
+
+---
+{% include home.html %}
+
+In addition to the usual pile of bug fixes and performance improvements, this release includes the new or
+updated features detailed below.
+
+## Networking
+
+- **Custom Envoy Config**. Pilot now supports ferrying custom Envoy config to the
+proxy. [Learn more](https://github.com/mandarjog/istioluawebhook)
+
+## Mixer adapters
+
+- **SolarWinds**. Mixer can now interface to AppOptics and Papertrail.
+[Learn more]({{home}}/docs/reference/config/adapters/solarwinds.html)
+
+- **Redisquota**. Mixer now supports a Redis-based adapter for rate limit tracking.
+[Learn more]({{home}}/docs/reference/config/adapters/redisquota.html)
+
+- **Datadog**. Mixer now provides an adapter to deliver metric data to a Datadog agent.
+[Learn more]({{home}}/docs/reference/config/adapters/datadog.html)
+
+## Other
+
+- **Separate Check & Report Clusters**. When configuring Envoy, it's now possible to use different clusters
+for Mixer instances that are used for Mixer's Check functionality from those used for Mixer's Report
+functionality. This may be useful in large deployments for better scaling of Mixer instances.
+
+- **Monitoring Dashboards**. There are now preliminary Mixer & Pilot monitoring dashboard in Grafana.
+
+- **Servicegraph Visualization**. Servicegraph has a new visualization. [Learn more]({{home}}/docs/tasks/telemetry/servicegraph.html).
+
+- **Liveness and Readiness Probes**. Istio components now provide canonical liveness and readiness
+probe support to help ensure mesh infrastructure health. [Learn more]({{home}}/docs/tasks/security/health-check.html)
+
+- **Egress Policy and Telemetry**. Istio can monitor traffic to external services defined by EgressRule or External Service. Istio can also apply
+Mixer policies on this traffic.
diff --git a/_about/notes/0.7.md b/_about/notes/0.7.md
new file mode 100644
index 0000000000000..b5c061b96c4d1
--- /dev/null
+++ b/_about/notes/0.7.md
@@ -0,0 +1,22 @@
+---
+title: Istio 0.7
+
+weight: 94
+
+---
+{% include home.html %}
+
+For this release, we focused on improving our build and test infrastructures and increasing the
+quality of our tests. As a result, there are no new features for this month.
+
+However, this release does include a large number of bug fixes and performance improvements.
+
+Please note that this release includes preliminary support for the new v1alpha3 traffic management
+functionality. This functionality is still in a great deal of flux and there may be some breaking
+changes in 0.8. So if you feel like exploring, please go right ahead, but expect that this may
+change in 0.8 and beyond.
+
+Known Issues:
+
+Our [helm chart](https://istio.io/docs/setup/kubernetes/helm.html)
+currently requires some workaround to apply the chart correctly, see [4701](https://github.com/istio/istio/issues/4701) for details.
diff --git a/_about/notes/0.8.md b/_about/notes/0.8.md
new file mode 100644
index 0000000000000..c1329a6bebd67
--- /dev/null
+++ b/_about/notes/0.8.md
@@ -0,0 +1,61 @@
+---
+title: Istio 0.8
+
+weight: 93
+---
+{% include home.html %}
+
+In addition to the usual pile of bug fixes and performance improvements, this release includes the new or
+updated features detailed below.
+
+## Networking
+
+- **Revamped Traffic Management Model**. We're finally ready to take the wraps off our
+new [traffic management configuration model]({{home}}/blog/2018/v1alpha3-routing.html).
+The new model adds many new features and addresses usability issues
+with the prior model. There is a conversion tool built into `istioctl` to help migrate your config from
+the old model. [Learn more about the new traffic management model]({{home}}/docs/tasks/traffic-management-v1alpha3/).
+
+- **Envoy V2**. Users can choose to inject Envoy V2 as the sidecar. In this mode, Pilot uses Envoy's new API to push configuration to the data plane. This new approach
+increases effective scalability and should eliminate spurious 404 errors. [TBD: docs on how to control this?]
+
+- **Gateway for Ingress/Egress**. We no longer support combining Kubernetes Ingress specs with Istio route rules, as it has led to several
+bugs and reliability issues. Istio now
+supports a platform independent ingress/egress Gateway that works across Kubernetes and Cloud Foundry and works seamlessly with the routing rules
+[TBD: doc link]
+
+- **Constrained Inbound Ports**. We now restrict the inbound ports in a pod to the ones declared by the apps running inside that pod.
+
+## Security
+
+- **Introducing Citadel**. We've finally given a name to our security component. What was
+formerly known as Istio-Auth or Istio-CA is now called Citadel.
+
+- **Multicluster Support**. We support per-cluster Citadel in multicluster deployments such that all Citadels share the same root cert
+and workloads can authenticate each other across the mesh.
+
+- **Authentication Policy**. We've introduced authentication policy that can be used to configure service-to-service
+authentication (mutual TLS) and end user authentication. This is the recommended way for enabling mutual TLS
+(over the existing config flag and service annotations). [Learn more]({{home}}/docs/tasks/security/authn-policy.html).
+
+## Telemetry
+
+- **Self-Reporting**. Mixer and Pilot now produce telemetry that flows through the normal
+Istio telemetry pipeline, just like services in the mesh.
+
+## Setup
+
+- **A la Carte Istio**. Istio has a rich set of features, however you don't need to install or consume them all together. By using
+Helm or `istioctl gen-deploy`, users can install only the features they want. For example, users can install Pilot only and enjoy traffic
+management functionality without dealing with Mixer or Citadel.
+Learn more about [customization through Helm](https://istio.io/docs/setup/kubernetes/helm-install.html#customization-with-helm)
+and about [`istioctl gen-deploy`](https://istio.io/docs/reference/commands/istioctl.html#istioctl%20gen-deploy).
+
+## Mixer adapters
+
+- **CloudWatch**. Mixer can now report metrics to AWS CloudWatch.
+[Learn more]({{home}}/docs/reference/config/adapters/cloudwatch.html)
+
+## Known issues with 0.8
+
+- A gateway with virtual services pointing to a headless service won't work ([Issue #5005](https://github.com/istio/istio/issues/5005)).
diff --git a/_about/notes/index.md b/_about/notes/index.md
index 3e6416ef60b84..9ee2f80c85a29 100644
--- a/_about/notes/index.md
+++ b/_about/notes/index.md
@@ -1,31 +1,32 @@
---
title: Release Notes
-overview: Istio releases information.
+description: Description of features and improvements for every Istio release.
-order: 5
+weight: 5
-layout: about
-type: markdown
redirect_from:
- "/docs/reference/release-notes.html"
- "/release-notes"
- "/docs/welcome/notes/index.html"
-toc: false
+ - "/docs/references/notes"
+toc: false
---
+{% include section-index.html docs=site.about %}
-{% include section-index.html docs=site.docs %}
+The latest Istio monthly release is {{site.data.istio.version}} ([release notes]({{site.data.istio.version}}.html)). You can
+[download {{site.data.istio.version}}](https://github.com/istio/istio/releases) with:
+```command
+$ curl -L https://git.io/getLatestIstio | sh -
+```
-- The [latest](https://github.com/istio/istio/releases) Istio monthly release is {{site.data.istio.version}}. It is downloaded when the following is used(*):
- ```
- curl -L https://git.io/getLatestIstio | sh -
- ```
+The most recent stable release is 0.2.12. You can [download 0.2.12](https://github.com/istio/istio/releases/tag/0.2.12) with:
-- The most recent 'stable' release is [0.2.12](https://github.com/istio/istio/releases/tag/0.2.12), the matching docs are [archive.istio.io/v0.2/docs/](https://archive.istio.io/v0.2/docs/)
- ```
- curl -L https://git.io/getIstio | sh -
- ```
+```command
+$ curl -L https://git.io/getIstio | sh -
+```
-We typically wait to 'bake' the latest release for several weeks and ensure it is more stable than the previous one before promoting it to stable.
+[Archived documentation for the 0.2.12 release](https://archive.istio.io/v0.2/docs/).
-> (*) Note: security conscious users should examine the output of the curl command before piping it to a shell.
+> As we don't control the `git.io` domain, please examine the output of the `curl` command before piping it to a shell if running in any
+sensitive or non-sandboxed environment.
diff --git a/_blog/2017/0.1-announcement.md b/_blog/2017/0.1-announcement.md
index ad49fd5cd1714..871a64e6afa23 100644
--- a/_blog/2017/0.1-announcement.md
+++ b/_blog/2017/0.1-announcement.md
@@ -1,14 +1,12 @@
---
-title: "Introducing Istio"
-overview: Istio 0.1 announcement
-publish_date: May 24, 2017
+title: Introducing Istio
+description: Istio 0.1 announcement
+publishdate: 2018-05-24
subtitle: A robust service mesh for microservices
attribution: The Istio Team
-order: 100
+weight: 100
-layout: blog
-type: markdown
redirect_from:
- "/blog/istio-service-mesh-for-microservices.html"
- "/blog/0.1-announcement.html"
@@ -24,7 +22,8 @@ Writing reliable, loosely coupled, production-grade applications based on micros
Inconsistent attempts at solving these challenges, cobbled together from libraries, scripts and Stack Overflow snippets leads to solutions that vary wildly across languages and runtimes, have poor observability characteristics and can often end up compromising security.
-One solution is to standardize implementations on a common RPC library like [gRPC](http://grpc.io), but this can be costly for organizations to adopt wholesale and leaves out brownfield applications which may be practically impossible to change. Operators need a flexible toolkit to make their microservices secure, compliant, trackable and highly available, and developers need the ability to experiment with different features in production, or deploy canary releases, without impacting the system as a whole.
+One solution is to standardize implementations on a common RPC library like [gRPC](https://grpc.io), but this can be costly for organizations to adopt wholesale
+and leaves out brownfield applications which may be practically impossible to change. Operators need a flexible toolkit to make their microservices secure, compliant, trackable and highly available, and developers need the ability to experiment with different features in production, or deploy canary releases, without impacting the system as a whole.
## Solution: Service Mesh
@@ -36,21 +35,17 @@ Google, IBM and Lyft joined forces to create Istio from a desire to provide a re
**Fleet-wide Visibility**: Failures happen, and operators need tools to stay on top of the health of clusters and their graphs of microservices. Istio produces detailed monitoring data about application and network behaviors that is rendered using [Prometheus](https://prometheus.io/) & [Grafana](https://github.com/grafana/grafana), and can be easily extended to send metrics and logs to any collection, aggregation and querying system. Istio enables analysis of performance hotspots and diagnosis of distributed failure modes with [Zipkin](https://github.com/openzipkin/zipkin) tracing.
-{% include figure.html width='100%' ratio='55.42%'
- img='./img/istio_grafana_dashboard-new.png'
- alt='Grafana Dashboard with Response Size'
- title='Grafana Dashboard with Response Size'
- caption='Grafana Dashboard with Response Size'
+{% include image.html width="100%" ratio="55.42%"
+ link="./img/istio_grafana_dashboard-new.png"
+ caption="Grafana Dashboard with Response Size"
%}
-{% include figure.html width='100%' ratio='29.91%'
- img='./img/istio_zipkin_dashboard.png'
- alt='Zipkin Dashboard'
- title='Zipkin Dashboard'
- caption='Zipkin Dashboard'
+{% include image.html width="100%" ratio="29.91%"
+ link="./img/istio_zipkin_dashboard.png"
+ caption="Zipkin Dashboard"
%}
-**Resiliency and efficiency**: When developing microservices, operators need to assume that the network will be unreliable. Operators can use retries, load balancing, flow-control (HTTP/2), and circuit-breaking to compensate for some of the common failure modes due to an unreliable network. Istio provides a uniform approach to configuring these features, making it easier to operate a highly resilient service mesh.
+**Resiliency and efficiency**: When developing microservices, operators need to assume that the network will be unreliable. Operators can use retries, load balancing, flow-control (HTTP/2), and circuit-breaking to compensate for some of the common failure modes due to an unreliable network. Istio provides a uniform approach to configuring these features, making it easier to operate a highly resilient service mesh.
**Developer productivity**: Istio provides a significant boost to developer productivity by letting them focus on building service features in their language of choice, while Istio handles resiliency and networking challenges in a uniform way. Developers are freed from having to bake solutions to distributed systems problems into their code. Istio further improves productivity by providing common functionality supporting A/B testing, canarying, and fault injection.
@@ -58,14 +53,14 @@ Google, IBM and Lyft joined forces to create Istio from a desire to provide a re
**Secure by default**: It is a common fallacy of distributed computing that the network is secure. Istio enables operators to authenticate and secure all communication between services using a mutual TLS connection, without burdening the developer or the operator with cumbersome certificate management tasks. Our security framework is aligned with the emerging [SPIFFE](https://spiffe.github.io/) specification, and is based on similar systems that have been tested extensively inside Google.
-**Incremental Adoption**: We designed Istio to be completely transparent to the services running in the mesh, allowing teams to incrementally adopt features of Istio over time. Adopters can start with enabling fleet-wide visibility and once they’re comfortable with Istio in their environment they can switch on other features as needed.
+**Incremental Adoption**: We designed Istio to be completely transparent to the services running in the mesh, allowing teams to incrementally adopt features of Istio over time. Adopters can start with enabling fleet-wide visibility and once they’re comfortable with Istio in their environment they can switch on other features as needed.
## Join us in this journey
-Istio is a completely open development project. Today we are releasing version 0.1, which works in a Kubernetes cluster, and we plan to have major new
-releases every 3 months, including support for additional environments. Our goal is to enable developers and operators to rollout and operate microservices
-with agility, complete visibility of the underlying network, and uniform control and security in all environments. We look forward to working with the Istio
-community and our partners towards these goals, following our [roadmap]({{home}}/docs/reference/release-roadmap.html).
+Istio is a completely open development project. Today we are releasing version 0.1, which works in a Kubernetes cluster, and we plan to have major new
+releases every 3 months, including support for additional environments. Our goal is to enable developers and operators to rollout and operate microservices
+with agility, complete visibility of the underlying network, and uniform control and security in all environments. We look forward to working with the Istio
+community and our partners towards these goals, following our [roadmap]({{home}}/docs/reference/release-roadmap.html).
Visit [here](https://github.com/istio/istio/releases) to get the latest released bits.
@@ -74,9 +69,9 @@ View the [presentation]({{home}}/talks/istio_talk_gluecon_2017.pdf) from GlueCon
## Community
We are excited to see early commitment to support the project from many companies in the community:
-[Red Hat](https://blog.openshift.com/red-hat-istio-launch/) with Red Hat Openshift and OpenShift Application Runtimes,
+[Red Hat](https://blog.openshift.com/red-hat-istio-launch/) with Red Hat OpenShift and OpenShift Application Runtimes,
Pivotal with [Pivotal Cloud Foundry](https://content.pivotal.io/blog/pivotal-and-istio-advancing-the-ecosystem-for-microservices-in-the-enterprise),
-Weaveworks with [Weave Cloud](https://www.weave.works/blog/istio-weave-cloud/) and Weave Net 2.0,
+WeaveWorks with [Weave Cloud](https://www.weave.works/blog/istio-weave-cloud/) and Weave Net 2.0,
[Tigera](https://www.projectcalico.org/welcoming-istio-to-the-kubernetes-networking-community) with the Project Calico Network Policy Engine
and [Datawire](https://www.datawire.io/istio-and-datawire-ecosystem/) with the Ambassador project. We hope to see many more companies join us in
this journey.
@@ -86,12 +81,12 @@ To get involved, connect with us via any of these channels:
* [istio.io]({{home}}) for documentation and examples.
* The [istio-users@googlegroups.com](https://groups.google.com/forum/#!forum/istio-users) mailing list for general discussions,
-or [istio-announce@googlegroups.com](https://groups.google.com/forum/#!forum/istio-announce) for key announcements regarding the project.
+or [istio-announce@googlegroups.com](https://groups.google.com/forum/#!forum/istio-announce) for key announcements regarding the project.
* [Stack Overflow](https://stackoverflow.com/questions/tagged/istio) for curated questions and answers
-* [GitHub](http://github.com/istio/issues/issues) for filing issues
+* [GitHub](https://github.com/istio/issues/issues) for filing issues
-* [@IstioMesh](https://twitter.com/IstioMesh) on Twitter
+* [@IstioMesh](https://twitter.com/IstioMesh) on Twitter
From everyone working on Istio, welcome aboard!
diff --git a/_blog/2017/0.1-auth.md b/_blog/2017/0.1-auth.md
index 0c3906f8994ba..0e2158381f946 100644
--- a/_blog/2017/0.1-auth.md
+++ b/_blog/2017/0.1-auth.md
@@ -1,14 +1,12 @@
---
-title: "Using Istio to Improve End-to-End Security"
-overview: Istio Auth 0.1 announcement
-publish_date: May 25, 2017
+title: Using Istio to Improve End-to-End Security
+description: Istio Auth 0.1 announcement
+publishdate: 2017-05-25
subtitle: Secure by default service to service communications
attribution: The Istio Team
-order: 99
+weight: 99
-layout: blog
-type: markdown
redirect_from:
- "/blog/0.1-auth.html"
- "/blog/istio-auth-for-microservices.html"
@@ -16,23 +14,23 @@ redirect_from:
{% include home.html %}
Conventional network security approaches fail to address security threats to distributed applications deployed in dynamic production environments. Today, we describe how Istio Auth enables enterprises to transform their security posture from just protecting the edge to consistently securing all inter-service communications deep within their applications. With Istio Auth, developers and operators can protect services with sensitive data against unauthorized insider access and they can achieve this without any changes to the application code!
-
-Istio Auth is the security component of the broader [Istio platform]({{home}}/). It incorporates the learnings of securing millions of microservice
+
+Istio Auth is the security component of the broader [Istio platform]({{home}}/). It incorporates the learnings of securing millions of microservice
endpoints in Google’s production environment.
## Background
Modern application architectures are increasingly based on shared services that are deployed and scaled dynamically on cloud platforms. Traditional network edge security (e.g. firewall) is too coarse-grained and allows access from unintended clients. An example of a security risk is stolen authentication tokens that can be replayed from another client. This is a major risk for companies with sensitive data that are concerned about insider threats. Other network security approaches like IP whitelists have to be statically defined, are hard to manage at scale, and are unsuitable for dynamic production environments.
-Thus, security administrators need a tool that enables them to consistently, and by default, secure all communication between services across diverse production environments.
+Thus, security administrators need a tool that enables them to consistently, and by default, secure all communication between services across diverse production environments.
## Solution: strong service identity and authentication
-Google has, over the years, developed architecture and technology to uniformly secure millions of microservice endpoints in its production environment against
-external
-attacks and insider threats. Key security principles include trusting the endpoints and not the network, strong mutual authentication based on service identity and service level authorization. Istio Auth is based on the same principles.
+Google has, over the years, developed architecture and technology to uniformly secure millions of microservice endpoints in its production environment against
+external
+attacks and insider threats. Key security principles include trusting the endpoints and not the network, strong mutual authentication based on service identity and service level authorization. Istio Auth is based on the same principles.
-The version 0.1 release of Istio Auth runs on Kubernetes and provides the following features:
+The version 0.1 release of Istio Auth runs on Kubernetes and provides the following features:
* Strong identity assertion between services
@@ -46,11 +44,9 @@ Istio Auth is based on industry standards like mutual TLS and X.509. Furthermore
The diagram below provides an overview of the Istio Auth service authentication architecture on Kubernetes.
-{% include figure.html width='100%' ratio='56.25%'
- img='./img/istio_auth_overview.svg'
- alt='Istio Auth Overview'
- title='Istio Auth Overview'
- caption='Istio Auth Overview'
+{% include image.html width="100%" ratio="56.25%"
+ link="./img/istio_auth_overview.svg"
+ caption="Istio Auth Overview"
%}
The above diagram illustrates three key security features:
@@ -67,7 +63,7 @@ Istio Auth uses [Kubernetes service accounts](https://kubernetes.io/docs/tasks/c
### Communication security
-Service-to-service communication is tunneled through high performance client side and server side [Envoy](https://envoyproxy.github.io/envoy/) proxies. The communication between the proxies is secured using mutual TLS. The benefit of using mutual TLS is that the service identity is not expressed as a bearer token that can be stolen or replayed from another source. Istio Auth also introduces the concept of Secure Naming to protect from a server spoofing attacks - the client side proxy verifies that the authenticated server's service account is allowed to run the named service.
+Service-to-service communication is tunneled through high performance client side and server side [Envoy](https://envoyproxy.github.io/envoy/) proxies. The communication between the proxies is secured using mutual TLS. The benefit of using mutual TLS is that the service identity is not expressed as a bearer token that can be stolen or replayed from another source. Istio Auth also introduces the concept of Secure Naming to protect from a server spoofing attacks - the client side proxy verifies that the authenticated server's service account is allowed to run the named service.
### Key management and distribution
@@ -77,17 +73,15 @@ Istio Auth provides a per-cluster CA (Certificate Authority) and automated key &
* Distributes keys and certificates to the appropriate pods using [Kubernetes Secrets](https://kubernetes.io/docs/concepts/configuration/secret/).
-* Rotates keys and certificates periodically.
+* Rotates keys and certificates periodically.
* Revokes a specific key and certificate pair when necessary (future).
-
+
The following diagram explains the end to end Istio Auth authentication workflow on Kubernetes:
-{% include figure.html width='100%' ratio='56.25%'
- img='./img/istio_auth_workflow.svg'
- alt='Istio Auth Workflow'
- title='Istio Auth Workflow'
- caption='Istio Auth Workflow'
+{% include image.html width="100%" ratio="56.25%"
+ link="./img/istio_auth_workflow.svg"
+ caption="Istio Auth Workflow"
%}
Istio Auth is part of the broader security story for containers. Red Hat, a partner on the development of Kubernetes, has identified [10 Layers](https://www.redhat.com/en/resources/container-security-openshift-cloud-devops-whitepaper) of container security. Istio and Istio Auth addresses two of these layers: "Network Isolation" and "API and Service Endpoint Management". As cluster federation evolves on Kubernetes and other platforms, our intent is for Istio to secure communications across services spanning multiple federated clusters.
@@ -95,14 +89,14 @@ Istio Auth is part of the broader security story for containers. Red Hat, a part
## Benefits of Istio Auth
**Defense in depth**: When used in conjunction with Kubernetes (or infrastructure) network policies, users achieve higher levels of confidence, knowing that pod-to-pod or service-to-service communication is secured both at network and application layers.
-
-**Secure by default**: When used with Istio’s proxy and centralized policy engine, Istio Auth can be configured during deployment with minimal or no application change. Administrators and operators can thus ensure that service communications are secured by default and that they can enforce these policies consistently across diverse protocols and runtimes.
-
-**Strong service authentication**: Istio Auth secures service communication using mutual TLS to ensure that the service identity is not expressed as a bearer token that can be stolen or replayed from another source. This ensures that services with sensitive data can only be accessed from strongly authenticated and authorized clients.
+
+**Secure by default**: When used with Istio’s proxy and centralized policy engine, Istio Auth can be configured during deployment with minimal or no application change. Administrators and operators can thus ensure that service communications are secured by default and that they can enforce these policies consistently across diverse protocols and runtimes.
+
+**Strong service authentication**: Istio Auth secures service communication using mutual TLS to ensure that the service identity is not expressed as a bearer token that can be stolen or replayed from another source. This ensures that services with sensitive data can only be accessed from strongly authenticated and authorized clients.
## Join us in this journey
Istio Auth is the first step towards providing a full stack of capabilities to protect services with sensitive data from external attacks and insider
-threats. While the initial version runs on Kubernetes, our goal is to enable Istio Auth to secure services across diverse production environments. We encourage the
-community to [join us](https://github.com/istio/istio/blob/master/security) in making robust service security easy and ubiquitous across different application
-stacks and runtime platforms.
+threats. While the initial version runs on Kubernetes, our goal is to enable Istio Auth to secure services across diverse production environments. We encourage the
+community to [join us](https://github.com/istio/istio/blob/master/security) in making robust service security easy and ubiquitous across different application
+stacks and runtime platforms.
diff --git a/_blog/2017/0.1-canary.md b/_blog/2017/0.1-canary.md
index bcf45c083a987..361f1d53f77e1 100644
--- a/_blog/2017/0.1-canary.md
+++ b/_blog/2017/0.1-canary.md
@@ -1,17 +1,17 @@
---
-title: "Canary Deployments using Istio"
-overview: Using Istio to create autoscaled canary deployments
-publish_date: June 14, 2017
+title: Canary Deployments using Istio
+description: Using Istio to create autoscaled canary deployments
+publishdate: 2017-06-14
subtitle:
attribution: Frank Budinsky
-order: 98
+weight: 98
-layout: blog
-type: markdown
redirect_from: "/blog/canary-deployments-using-istio.html"
---
+{% include home.html %}
+
One of the benefits of the [Istio]({{home}}) project is that it provides the control needed to deploy canary services. The idea behind canary deployment (or rollout) is to introduce a new version of a service by first testing it using a small percentage of user traffic, and then if all goes well, increase, possibly gradually in increments, the percentage while simultaneously phasing out the old version. If anything goes wrong along the way, we abort and rollback to the previous version. In its simplest form, the traffic sent to the canary version is a randomly selected percentage of requests, but in more sophisticated schemes it can be based on the region, user, or other properties of the request.
Depending on your level of expertise in this area, you may wonder why Istio's support for canary deployment is even needed, given that platforms like Kubernetes already provide a way to do [version rollout](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#updating-a-deployment) and [canary deployment](https://kubernetes.io/docs/concepts/cluster-administration/manage-deployment/#canary-deployments). Problem solved, right? Well, not exactly. Although doing a rollout this way works in simple cases, it’s very limited, especially in large scale cloud environments receiving lots of (and especially varying amounts of) traffic, where autoscaling is needed.
@@ -28,8 +28,8 @@ Whether we use one deployment or two, canary management using deployment feature
With Istio, traffic routing and replica deployment are two completely independent functions. The number of pods implementing services are free to scale up and down based on traffic load, completely orthogonal to the control of version traffic routing. This makes managing a canary version in the presence of autoscaling a much simpler problem. Autoscalers may, in fact, respond to load variations resulting from traffic routing changes, but they are nevertheless functioning independently and no differently than when loads change for other reasons.
-Istio’s [routing rules]({{home}}/docs/concepts/traffic-management/rules-configuration.html) also provide other important advantages; you can easily control
-fine grain traffic percentages (e.g., route 1% of traffic without requiring 100 pods) and you can control traffic using other criteria (e.g., route traffic for specific users to the canary version). To illustrate, let’s look at deploying the **helloworld** service and see how simple the problem becomes.
+Istio’s [routing rules]({{home}}/docs/concepts/traffic-management/rules-configuration.html) also provide other important advantages; you can easily control
+fine grain traffic percentages (e.g., route 1% of traffic without requiring 100 pods) and you can control traffic using other criteria (e.g., route traffic for specific users to the canary version). To illustrate, let’s look at deploying the **helloworld** service and see how simple the problem becomes.
We begin by defining the **helloworld** Service, just like any other Kubernetes service, something like this:
@@ -88,7 +88,7 @@ rule to control the traffic distribution. For example if we want to send 10% of
[istioctl]({{home}}/docs/reference/commands/istioctl.html) command to set a routing rule something like this:
```bash
-$ cat < Usage of AWS `nlb` on kubernetes is an alpha feature and not recommended for production clusters.
+
+## IAM Policy
+
+You need to apply policy on the master role in order to be able to provision network load balancer.
+
+1. In AWS `iam` console click on policies and click on create a new one:
+{% include image.html width="80%" ratio="60%"
+ link="./img/createpolicystart.png"
+ caption="Create a new policy"
+ %}
+1. Select `json`:
+{% include image.html width="80%" ratio="60%"
+ link="./img/createpolicyjson.png"
+ caption="Select json"
+ %}
+1. Copy/paste text bellow:
+```json
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "kopsK8sNLBMasterPermsRestrictive",
+ "Effect": "Allow",
+ "Action": [
+ "ec2:DescribeVpcs",
+ "elasticloadbalancing:AddTags",
+ "elasticloadbalancing:CreateListener",
+ "elasticloadbalancing:CreateTargetGroup",
+ "elasticloadbalancing:DeleteListener",
+ "elasticloadbalancing:DeleteTargetGroup",
+ "elasticloadbalancing:DescribeListeners",
+ "elasticloadbalancing:DescribeLoadBalancerPolicies",
+ "elasticloadbalancing:DescribeTargetGroups",
+ "elasticloadbalancing:DescribeTargetHealth",
+ "elasticloadbalancing:ModifyListener",
+ "elasticloadbalancing:ModifyTargetGroup",
+ "elasticloadbalancing:RegisterTargets",
+ "elasticloadbalancing:SetLoadBalancerPoliciesOfListener"
+ ],
+ "Resource": [
+ "*"
+ ]
+ },
+ {
+ "Effect": "Allow",
+ "Action": [
+ "ec2:DescribeVpcs",
+ "ec2:DescribeRegions"
+ ],
+ "Resource": "*"
+ }
+ ]
+}
+```
+1. Click review policy, fill all fields and click create policy:
+{% include image.html width="80%" ratio="60%"
+ link="./img/create_policy.png"
+ caption="Validate policy"
+ %}
+1. Click on roles, select you master role nodes, and click attach policy:
+{% include image.html width="100%" ratio="35%"
+ link="./img/roles_summary.png"
+ caption="Attach policy"
+ %}
+1. Your policy is now attach to your master node.
+
+## Rewrite Istio Ingress Service
+
+You need to rewrite ingress service with the following:
+
+```yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: istio-ingress
+ namespace: istio-system
+ labels:
+ istio: ingress
+ annotations:
+ service.beta.kubernetes.io/aws-load-balancer-type: "nlb"
+spec:
+ externalTrafficPolicy: Local
+ ports:
+ - port: 80
+ protocol: TCP
+ targetPort: 80
+ name: http
+ - port: 443
+ protocol: TCP
+ targetPort: 443
+ name: https
+ selector:
+ istio: ingress
+ type: LoadBalancer
+ ```
+
+## What's next
+
+Kubernetes [service networking](https://kubernetes.io/docs/concepts/services-networking/service/) should be consulted if further information is needed.
diff --git a/_blog/2018/egress-https.md b/_blog/2018/egress-https.md
index 6382a4242b189..732ae0a902902 100644
--- a/_blog/2018/egress-https.md
+++ b/_blog/2018/egress-https.md
@@ -1,14 +1,12 @@
---
-title: "Consuming External Web Services"
-overview: Describes a simple scenario based on Istio Bookinfo sample
-publish_date: January 31, 2018
+title: Consuming External Web Services
+description: Describes a simple scenario based on Istio Bookinfo sample
+publishdate: 2018-01-31
subtitle: Egress Rules for HTTPS traffic
attribution: Vadim Eisenberg
-order: 93
+weight: 93
-layout: blog
-type: markdown
redirect_from: "/blog/egress-https.html"
---
{% include home.html %}
@@ -20,9 +18,10 @@ In this blog post, I modify the [Istio Bookinfo Sample Application]({{home}}/doc
## Bookinfo sample application with external details web service
### Initial setting
+
To demonstrate the scenario of consuming an external web service, I start with a Kubernetes cluster with [Istio installed]({{home}}/docs/setup/kubernetes/quick-start.html#installation-steps). Then I deploy [Istio Bookinfo Sample Application]({{home}}/docs/guides/bookinfo.html). This application uses the _details_ microservice to fetch book details, such as the number of pages and the publisher. The original _details_ microservice provides the book details without consulting any external service.
-The example commands in this blog post work with Istio version 0.2+, with or without [Mutual TLS]({{home}}/docs/concepts/security/mutual-tls.html) enabled.
+The example commands in this blog post work with Istio 0.2+, with or without [Mutual TLS]({{home}}/docs/concepts/security/mutual-tls.html) enabled.
The Bookinfo configuration files required for the scenario of this post appear starting from [Istio release version 0.5](https://github.com/istio/istio/releases/tag/0.5.0).
The Bookinfo configuration files reside in the `samples/bookinfo/kube` directory of the Istio release archive.
@@ -30,27 +29,24 @@ The Bookinfo configuration files reside in the `samples/bookinfo/kube` directory
Here is a copy of the end-to-end architecture of the application from the original [Bookinfo Guide]({{home}}/docs/guides/bookinfo.html).
{% assign url = home | append: "/docs/guides/img/bookinfo/withistio.svg" %}
-{% include figure.html width='80%' ratio='59.08%'
- img=url
- alt='The Original Bookinfo Application'
- title='The Original Bookinfo Application'
- caption='The Original Bookinfo Application'
+{% include image.html width="80%" ratio="59.08%"
+ link=url
+ caption="The Original Bookinfo Application"
%}
### Bookinfo with details version 2
+
Let's add a new version of the _details_ microservice, _v2_, that fetches the book details from [Google Books APIs](https://developers.google.com/books/docs/v1/getting_started).
-```bash
-kubectl apply -f <(istioctl kube-inject -f samples/bookinfo/kube/bookinfo-details-v2.yaml)
+```command
+$ kubectl apply -f <(istioctl kube-inject -f samples/bookinfo/kube/bookinfo-details-v2.yaml)
```
The updated architecture of the application now looks as follows:
-{% include figure.html width='80%' ratio='65.16%'
- img='./img/bookinfo-details-v2.svg'
- alt='The Bookinfo Application with details V2'
- title='The Bookinfo Application with details V2'
- caption='The Bookinfo Application with details V2'
+{% include image.html width="80%" ratio="65.16%"
+ link="./img/bookinfo-details-v2.svg"
+ caption="The Bookinfo Application with details V2"
%}
Note that the Google Books web service is outside the Istio service mesh, the boundary of which is marked by a dashed line.
@@ -77,11 +73,9 @@ Let's access the web page of the application, after [determining the ingress IP
Oops... Instead of the book details we have the _Error fetching product details_ message displayed:
-{% include figure.html width='80%' ratio='36.01%'
- img='./img/errorFetchingBookDetails.png'
- alt='The Error Fetching Product Details Message'
- title='The Error Fetching Product Details Message'
- caption='The Error Fetching Product Details Message'
+{% include image.html width="80%" ratio="36.01%"
+ link="./img/errorFetchingBookDetails.png"
+ caption="The Error Fetching Product Details Message"
%}
The good news is that our application did not crash. With a good microservice design, we do not have **failure propagation**. In our case, the failing _details_ microservice does not cause the _productpage_ microservice to fail. Most of the functionality of the application is still provided, despite the failure in the _details_ microservice. We have **graceful service degradation**: as you can see, the reviews and the ratings are displayed correctly, and the application is still useful.
@@ -89,7 +83,9 @@ The good news is that our application did not crash. With a good microservice de
So what might have gone wrong? Ah... The answer is that I forgot to enable traffic from inside the mesh to an external service, in this case to the Google Books web service. By default, the Istio sidecar proxies ([Envoy proxies](https://www.envoyproxy.io)) **block all the traffic to destinations outside the cluster**. To enable such traffic, we must define an [egress rule]({{home}}/docs/reference/config/istio.routing.v1alpha1.html#EgressRule).
### Egress rule for Google Books web service
+
No worries, let's define an **egress rule** and fix our application:
+
```bash
cat < --port
```
_**OR**_
When using the `mysql` client and a local MySQL database, I would run:
- ```bash
- curl -s https://raw.githubusercontent.com/istio/istio/master/samples/bookinfo/src/mysql/mysqldb-init.sql |
+ ```command
+ $ curl -s https://raw.githubusercontent.com/istio/istio/master/samples/bookinfo/src/mysql/mysqldb-init.sql | \
mysql -u root -p
```
-2. I then create a user with the name _bookinfo_ and grant it _SELECT_ privilege on the `test.ratings` table:
- ```bash
- mysqlsh --sql --ssl-mode=REQUIRED -u admin -p --host --port \
+1. I then create a user with the name _bookinfo_ and grant it _SELECT_ privilege on the `test.ratings` table:
+ ```command
+ $ mysqlsh --sql --ssl-mode=REQUIRED -u admin -p --host --port \
-e "CREATE USER 'bookinfo' IDENTIFIED BY ''; GRANT SELECT ON test.ratings to 'bookinfo';"
```
_**OR**_
For `mysql` and the local database, the command would be:
- ```bash
- mysql -u root -p -e \
+ ```command
+ $ mysql -u root -p -e \
"CREATE USER 'bookinfo' IDENTIFIED BY ''; GRANT SELECT ON test.ratings to 'bookinfo';"
```
Here I apply the [principle of least privilege](https://en.wikipedia.org/wiki/Principle_of_least_privilege). This means that I do not use my _admin_ user in the Bookinfo application. Instead, I create a special user for the Bookinfo application , _bookinfo_, with minimal privileges. In this case, the _bookinfo_ user only has the `SELECT` privilege on a single table.
After running the command to create the user, I will clean my bash history by checking the number of the last command and running `history -d `. I don't want the password of the new user to be stored in the bash history. If I'm using `mysql`, I'll remove the last command from `~/.mysql_history` file as well. Read more about password protection of the newly created user in [MySQL documentation](https://dev.mysql.com/doc/refman/5.5/en/create-user.html).
-3. I inspect the created ratings to see that everything worked as expected:
- ```bash
- mysqlsh --sql --ssl-mode=REQUIRED -u bookinfo -p --host --port \
+1. I inspect the created ratings to see that everything worked as expected:
+ ```command
+ $ mysqlsh --sql --ssl-mode=REQUIRED -u bookinfo -p --host --port \
-e "select * from test.ratings;"
- ```
- ```bash
Enter password:
+----------+--------+
| ReviewID | Rating |
@@ -70,10 +68,8 @@ For this task I set up an instance of [MySQL](https://www.mysql.com). You can us
_**OR**_
For `mysql` and the local database:
- ```bash
- mysql -u bookinfo -p -e "select * from test.ratings;"
- ```
- ```bash
+ ```command
+ $ mysql -u bookinfo -p -e "select * from test.ratings;"
Enter password:
+----------+--------+
| ReviewID | Rating |
@@ -83,12 +79,10 @@ For this task I set up an instance of [MySQL](https://www.mysql.com). You can us
+----------+--------+
```
-4. I set the ratings temporarily to 1 to provide a visual clue when our database is used by the Bookinfo _ratings_ service:
- ```bash
- mysqlsh --sql --ssl-mode=REQUIRED -u admin -p --host --port \
+1. I set the ratings temporarily to 1 to provide a visual clue when our database is used by the Bookinfo _ratings_ service:
+ ```command
+ $ mysqlsh --sql --ssl-mode=REQUIRED -u admin -p --host --port \
-e "update test.ratings set rating=1; select * from test.ratings;"
- ```
- ```bash
Enter password:
+----------+--------+
| ReviewID | Rating |
@@ -101,10 +95,8 @@ For this task I set up an instance of [MySQL](https://www.mysql.com). You can us
_**OR**_
For `mysql` and the local database:
- ```bash
- mysql -u root -p -e "update test.ratings set rating=1; select * from test.ratings;"
- ```
- ```bash
+ ```command
+ $ mysql -u root -p -e "update test.ratings set rating=1; select * from test.ratings;"
Enter password:
+----------+--------+
| ReviewID | Rating |
@@ -118,21 +110,21 @@ For this task I set up an instance of [MySQL](https://www.mysql.com). You can us
Now I am ready to deploy a version of the Bookinfo application that will use my database.
### Initial setting of Bookinfo application
+
To demonstrate the scenario of using an external database, I start with a Kubernetes cluster with [Istio installed]({{home}}/docs/setup/kubernetes/quick-start.html#installation-steps). Then I deploy the [Istio Bookinfo sample application]({{home}}/docs/guides/bookinfo.html). This application uses the _ratings_ microservice to fetch book ratings, a number between 1 and 5. The ratings are displayed as stars for each review. There are several versions of the _ratings_ microservice. Some use [MongoDB](https://www.mongodb.com), others use [MySQL](https://www.mysql.com) as their database.
-The example commands in this blog post work with Istio version 0.3+, with or without [Mutual TLS]({{home}}/docs/concepts/security/mutual-tls.html) enabled.
+The example commands in this blog post work with Istio 0.3+, with or without [Mutual TLS]({{home}}/docs/concepts/security/mutual-tls.html) enabled.
As a reminder, here is the end-to-end architecture of the application from the [Bookinfo Guide]({{home}}/docs/guides/bookinfo.html).
{% assign url = home | append: "/docs/guides/img/bookinfo/withistio.svg" %}
-{% include figure.html width='80%' ratio='59.08%'
- img=url
- alt='The original Bookinfo application'
- title='The original Bookinfo application'
- caption='The original Bookinfo application'
+{% include image.html width="80%" ratio="59.08%"
+ link=url
+ caption="The original Bookinfo application"
%}
### Use the database for ratings data in Bookinfo application
+
1. I modify the deployment spec of a version of the _ratings_ microservice that uses a MySQL database, to use my database instance. The spec is in `samples/bookinfo/kube/bookinfo-ratings-v2-mysql.yaml` of an Istio release archive. I edit the following lines:
```yaml
@@ -147,46 +139,40 @@ As a reminder, here is the end-to-end architecture of the application from the [
```
I replace the values in the snippet above, specifying the database host, port, user, and password. Note that the correct way to work with passwords in container's environment variables in Kubernetes is [to use secrets](https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets-as-environment-variables). For this example task only, I write the password directly in the deployment spec. **Do not do it** in a real environment! I also assume everyone realizes that `"password"` should not be used as a password...
-2. I apply the modified spec to deploy the version of the _ratings_ microservice, _v2-mysql_, that will use my database.
+1. I apply the modified spec to deploy the version of the _ratings_ microservice, _v2-mysql_, that will use my database.
- ```bash
- kubectl apply -f <(istioctl kube-inject -f samples/bookinfo/kube/bookinfo-ratings-v2-mysql.yaml)
- ```
- ```bash
+ ```command
+ $ kubectl apply -f <(istioctl kube-inject -f samples/bookinfo/kube/bookinfo-ratings-v2-mysql.yaml)
deployment "ratings-v2-mysql" created
```
-3. I route all the traffic destined to the _reviews_ service to its _v3_ version. I do this to ensure that the _reviews_ service always calls the _ratings_ service. In addition, I route all the traffic destined to the _ratings_ service to _ratings v2-mysql_ that uses my database. I add routing for both services above by adding two [route rules]({{home}}/docs/reference/config/istio.routing.v1alpha1.html). These rules are specified in `samples/bookinfo/kube/route-rule-ratings-mysql.yaml` of an Istio release archive.
+1. I route all the traffic destined to the _reviews_ service to its _v3_ version. I do this to ensure that the _reviews_ service always calls the _ratings_
+service. In addition, I route all the traffic destined to the _ratings_ service to _ratings v2-mysql_ that uses my database. I add routing for both services above by adding two [route rules]({{home}}/docs/reference/config/istio.routing.v1alpha1.html). These rules are specified in `samples/bookinfo/kube/route-rule-ratings-mysql.yaml` of an Istio release archive.
- ```bash
- istioctl create -f samples/bookinfo/kube/route-rule-ratings-mysql.yaml
- ```
- ```bash
+ ```command
+ $ istioctl create -f samples/bookinfo/kube/route-rule-ratings-mysql.yaml
Created config route-rule/default/ratings-test-v2-mysql at revision 1918799
Created config route-rule/default/reviews-test-ratings-v2 at revision 1918800
```
The updated architecture appears below. Note that the blue arrows inside the mesh mark the traffic configured according to the route rules we added. According to the route rules, the traffic is sent to _reviews v3_ and _ratings v2-mysql_.
-{% include figure.html width='80%' ratio='59.31%'
- img='./img/bookinfo-ratings-v2-mysql-external.svg'
- alt='The Bookinfo application with ratings v2-mysql and an external MySQL database'
- title='The Bookinfo application with ratings v2-mysql and an external MySQL database'
- caption='The Bookinfo application with ratings v2-mysql and an external MySQL database'
+{% include image.html width="80%" ratio="59.31%"
+ link="./img/bookinfo-ratings-v2-mysql-external.svg"
+ caption="The Bookinfo application with ratings v2-mysql and an external MySQL database"
%}
Note that the MySQL database is outside the Istio service mesh, or more precisely outside the Kubernetes cluster. The boundary of the service mesh is marked by a dashed line.
### Access the webpage
+
Let's access the webpage of the application, after [determining the ingress IP and port]({{home}}/docs/guides/bookinfo.html#determining-the-ingress-ip-and-port).
We have a problem... Instead of the rating stars, the message _"Ratings service is currently unavailable"_ is currently displayed below each review:
-{% include figure.html width='80%' ratio='36.19%'
- img='./img/errorFetchingBookRating.png'
- alt='The Ratings service error messages'
- title='The Ratings service error messages'
- caption='The Ratings service error messages'
+{% include image.html width="80%" ratio="36.19%"
+ link="./img/errorFetchingBookRating.png"
+ caption="The Ratings service error messages"
%}
As in [Consuming External Web Services]({{home}}/blog/2018/egress-https.html), we experience **graceful service degradation**, which is good. The application did not crash due to the error in the _ratings_ microservice. The webpage of the application correctly displayed the book information, the details, and the reviews, just without the rating stars.
@@ -194,6 +180,7 @@ As in [Consuming External Web Services]({{home}}/blog/2018/egress-https.html), w
We have the same problem as in [Consuming External Web Services]({{home}}/blog/2018/egress-https.html), namely all the traffic outside the Kubernetes cluster, both TCP and HTTP, is blocked by default by the sidecar proxies. To enable such traffic for TCP, an egress rule for TCP must be defined.
### Egress rule for an external MySQL instance
+
TCP egress rules come to our rescue. I copy the following YAML spec to a text file (let's call it `egress-rule-mysql.yaml`) and edit it to specify the IP of my database instance and its port.
```yaml
@@ -211,21 +198,18 @@ spec:
```
Then I run `istioctl` to add the egress rule to the service mesh:
-```bash
-istioctl create -f egress-rule-mysql.yaml
-```
-```bash
+
+```command
+$ istioctl create -f egress-rule-mysql.yaml
Created config egress-rule/default/mysql at revision 1954425
```
Note that for a TCP egress rule, we specify `tcp` as the protocol of a port of the rule. Also note that we use an IP of the external service instead of its domain name. I will talk more about TCP egress rules [below](#egress-rules-for-tcp-traffic). For now, let's verify that the egress rule we added fixed the problem. Let's access the webpage and see if the stars are back.
It worked! Accessing the web page of the application displays the ratings without error:
-{% include figure.html width='80%' ratio='36.69%'
- img='./img/externalMySQLRatings.png'
- alt='Book Ratings Displayed Correctly'
- title='Book Ratings Displayed Correctly'
- caption='Book Ratings Displayed Correctly'
+{% include image.html width="80%" ratio="36.69%"
+ link="./img/externalMySQLRatings.png"
+ caption="Book Ratings Displayed Correctly"
%}
Note that we see a one-star rating for both displayed reviews, as expected. I changed the ratings to be one star to provide us with a visual clue that our external database is indeed being used.
@@ -233,13 +217,17 @@ Note that we see a one-star rating for both displayed reviews, as expected. I ch
As with egress rules for HTTP/HTTPS, we can delete and create egress rules for TCP using `istioctl`, dynamically.
## Motivation for egress TCP traffic control
+
Some in-mesh Istio applications must access external services, for example legacy systems. In many cases, the access is not performed over HTTP or HTTPS protocols. Other TCP protocols are used, such as database-specific protocols like [MongoDB Wire Protocol](https://docs.mongodb.com/manual/reference/mongodb-wire-protocol/) and [MySQL Client/Server Protocol](https://dev.mysql.com/doc/internals/en/client-server-protocol.html) to communicate with external databases.
Note that in case of access to external HTTPS services, as described in the [Control Egress TCP Traffic]({{home}}/docs/tasks/traffic-management/egress.html) task, an application must issue HTTP requests to the external service. The Envoy sidecar proxy attached to the pod or the VM, will intercept the requests and open an HTTPS connection to the external service. The traffic will be unencrypted inside the pod or the VM, but it will leave the pod or the VM encrypted.
However, sometimes this approach cannot work due to the following reasons:
+
* The code of the application is configured to use an HTTPS URL and cannot be changed
+
* The code of the application uses some library to access the external service and that library uses HTTPS only
+
* There are compliance requirements that do not allow unencrypted traffic, even if the traffic is unencrypted only inside the pod or the VM
In this case, HTTPS can be treated by Istio as _opaque TCP_ and can be handled in the same way as other TCP non-HTTP protocols.
@@ -247,6 +235,7 @@ In this case, HTTPS can be treated by Istio as _opaque TCP_ and can be handled i
Next let's see how we define egress rules for TCP traffic.
## Egress rules for TCP traffic
+
The egress rules for enabling TCP traffic to a specific port must specify `TCP` as the protocol of the port. Additionally, for the [MongoDB Wire Protocol](https://docs.mongodb.com/manual/reference/mongodb-wire-protocol/), the protocol can be specified as `MONGO`, instead of `TCP`.
For the `destination.service` field of the rule, an IP or a block of IPs in [CIDR](https://tools.ietf.org/html/rfc2317) notation must be used.
@@ -258,6 +247,7 @@ Note that all the IPs of an external service are not always known. To enable TCP
Also note that the IPs of an external service are not always static, for example in the case of [CDNs](https://en.wikipedia.org/wiki/Content_delivery_network). Sometimes the IPs are static most of the time, but can be changed from time to time, for example due to infrastructure changes. In these cases, if the range of the possible IPs is known, you should specify the range by CIDR blocks (even by multiple egress rules if needed). As an example, see the approach we used in the case of `wikipedia.org`, described in [Control Egress TCP Traffic Task]({{home}}/docs/tasks/traffic-management/egress-tcp.html). If the range of the possible IPs is not known, egress rules for TCP cannot be used and [the external services must be called directly]({{home}}/docs/tasks/traffic-management/egress.html#calling-external-services-directly), circumventing the sidecar proxies.
## Relation to mesh expansion
+
Note that the scenario described in this post is different from the mesh expansion scenario, described in the
[Integrating Virtual Machines]({{home}}/docs/guides/integrating-vms.html) guide. In that scenario, a MySQL instance runs on an external
(outside the cluster) machine (a bare metal or a VM), integrated with the Istio service mesh. The MySQL service becomes a first-class citizen of the mesh with all the beneficial features of Istio applicable. Among other things, the service becomes addressable by a local cluster domain name, for example by `mysqldb.vm.svc.cluster.local`, and the communication to it can be secured by
@@ -266,53 +256,53 @@ service must be registered with Istio. To enable such integration, Istio compone
installed on the machine and the Istio control plane (_Pilot_, _Mixer_, _CA_) must be accessible from it. See the
[Istio Mesh Expansion]({{home}}/docs/setup/kubernetes/mesh-expansion.html) instructions for more details.
-In our case, the MySQL instance can run on any machine or can be provisioned as a service by a cloud provider. There is no requirement to integrate the machine with Istio. The Istio contol plane does not have to be accessible from the machine. In the case of MySQL as a service, the machine which MySQL runs on may be not accessible and installing on it the required components may be impossible. In our case, the MySQL instance is addressable by its global domain name, which could be beneficial if the consuming applications expect to use that domain name. This is especially relevant when that expected domain name cannot be changed in the deployment configuration of the consuming applications.
+In our case, the MySQL instance can run on any machine or can be provisioned as a service by a cloud provider. There is no requirement to integrate the machine
+with Istio. The Istio control plane does not have to be accessible from the machine. In the case of MySQL as a service, the machine which MySQL runs on may be not accessible and installing on it the required components may be impossible. In our case, the MySQL instance is addressable by its global domain name, which could be beneficial if the consuming applications expect to use that domain name. This is especially relevant when that expected domain name cannot be changed in the deployment configuration of the consuming applications.
## Cleanup
+
1. Drop the _test_ database and the _bookinfo_ user:
- ```bash
- mysqlsh --sql --ssl-mode=REQUIRED -u admin -p --host --port \
+ ```command
+ $ mysqlsh --sql --ssl-mode=REQUIRED -u admin -p --host --port \
-e "drop database test; drop user bookinfo;"
```
_**OR**_
For `mysql` and the local database:
- ```bash
- mysql -u root -p -e "drop database test; drop user bookinfo;"
+ ```command
+ $ mysql -u root -p -e "drop database test; drop user bookinfo;"
```
-2. Remove the route rules:
- ```bash
- istioctl delete -f samples/bookinfo/kube/route-rule-ratings-mysql.yaml
- ```
- ```bash
+1. Remove the route rules:
+ ```command
+ $ istioctl delete -f samples/bookinfo/kube/route-rule-ratings-mysql.yaml
Deleted config: route-rule/default/ratings-test-v2-mysql
Deleted config: route-rule/default/reviews-test-ratings-v2
```
-3. Undeploy _ratings v2-mysql_:
- ```bash
- kubectl delete -f <(istioctl kube-inject -f samples/bookinfo/kube/bookinfo-ratings-v2-mysql.yaml)
- ```
- ```bash
+1. Undeploy _ratings v2-mysql_:
+ ```command
+ $ kubectl delete -f <(istioctl kube-inject -f samples/bookinfo/kube/bookinfo-ratings-v2-mysql.yaml)
deployment "ratings-v2-mysql" deleted
```
-4. Delete the egress rule:
- ```bash
- istioctl delete egressrule mysql -n default
- ```
- ```bash
+1. Delete the egress rule:
+ ```command
+ $ istioctl delete egressrule mysql -n default
Deleted config: egressrule mysql
```
## Future work
+
In my next blog posts, I will show examples of combining route rules and egress rules, and also examples of accessing external services via Kubernetes _ExternalName_ services.
## Conclusion
+
In this blog post, I demonstrated how the microservices in an Istio service mesh can consume external services via TCP. By default, Istio blocks all the traffic, TCP and HTTP, to the hosts outside the cluster. To enable such traffic for TCP, TCP egress rules must be created for the service mesh.
## What's next
To read more about Istio egress traffic control:
+
* for TCP, see [Control Egress TCP Traffic Task]({{home}}/docs/tasks/traffic-management/egress-tcp.html)
+
* for HTTP/HTTPS, see [Control Egress Traffic Task]({{home}}/docs/tasks/traffic-management/egress.html)
diff --git a/_blog/2018/img/attach_policies.png b/_blog/2018/img/attach_policies.png
new file mode 100644
index 0000000000000..7307239aa3498
Binary files /dev/null and b/_blog/2018/img/attach_policies.png differ
diff --git a/_blog/2018/img/create_policy.png b/_blog/2018/img/create_policy.png
new file mode 100644
index 0000000000000..792c7d18631e9
Binary files /dev/null and b/_blog/2018/img/create_policy.png differ
diff --git a/_blog/2018/img/createpolicyjson.png b/_blog/2018/img/createpolicyjson.png
new file mode 100644
index 0000000000000..26766f1d751d4
Binary files /dev/null and b/_blog/2018/img/createpolicyjson.png differ
diff --git a/_blog/2018/img/createpolicystart.png b/_blog/2018/img/createpolicystart.png
new file mode 100644
index 0000000000000..430ed3b23e31c
Binary files /dev/null and b/_blog/2018/img/createpolicystart.png differ
diff --git a/_blog/2018/img/gateways.svg b/_blog/2018/img/gateways.svg
new file mode 100644
index 0000000000000..9f57b6a49579a
--- /dev/null
+++ b/_blog/2018/img/gateways.svg
@@ -0,0 +1,202 @@
+
\ No newline at end of file
diff --git a/_blog/2018/img/policies.png b/_blog/2018/img/policies.png
new file mode 100644
index 0000000000000..bda30f41980b2
Binary files /dev/null and b/_blog/2018/img/policies.png differ
diff --git a/_blog/2018/img/roles_summary.png b/_blog/2018/img/roles_summary.png
new file mode 100644
index 0000000000000..943b1ebcfbbff
Binary files /dev/null and b/_blog/2018/img/roles_summary.png differ
diff --git a/_blog/2018/img/virtualservices-destrules.svg b/_blog/2018/img/virtualservices-destrules.svg
new file mode 100644
index 0000000000000..aa213df7f07f6
--- /dev/null
+++ b/_blog/2018/img/virtualservices-destrules.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/_blog/2018/index.md b/_blog/2018/index.md
index e5fc78630b381..89268583c41e0 100644
--- a/_blog/2018/index.md
+++ b/_blog/2018/index.md
@@ -1,11 +1,9 @@
---
title: 2018 Posts
-overview: Blog posts for 2018
+description: Blog posts for 2018
-order: 10
+weight: 10
-layout: blog
-type: markdown
toc: false
---
diff --git a/_blog/2018/soft-multitenancy.md b/_blog/2018/soft-multitenancy.md
new file mode 100644
index 0000000000000..b049a0df98a55
--- /dev/null
+++ b/_blog/2018/soft-multitenancy.md
@@ -0,0 +1,352 @@
+---
+title: Istio Soft Multi-tenancy Support
+description: Using Kubernetes namespace and RBAC to create an Istio soft multi-tenancy environment
+publishdate: 2018-04-19
+subtitle: Using multiple Istio control planes and RBAC to create multi-tenancy
+attribution: John Joyce and Rich Curran
+
+weight: 90
+
+redirect_from: "/blog/soft-multitenancy.html"
+---
+{% include home.html %}
+Multi-tenancy is commonly used in many environments across many different applications,
+but the implementation details and functionality provided on a per tenant basis does not
+follow one model in all environments. The [Kubernetes multi-tenancy working group](
+https://github.com/kubernetes/community/blob/master/wg-multitenancy/README.md)
+is working to define the multi-tenant use cases and functionality that should be available
+within Kubernetes. However, from their work so far it is clear that only "soft multi-tenancy"
+is possible due to the inability to fully protect against malicious containers or workloads
+gaining access to other tenant's pods or kernel resources.
+
+## Soft multi-tenancy
+
+For this blog, "soft multi-tenancy" is defined as having a single Kubernetes control plane
+with multiple Istio control planes and multiple meshes, one control plane and one mesh
+per tenant. The cluster administrator gets control and visibility across all the Istio
+control planes, while the tenant administrator only gets control of a specific Istio
+instance. Separation between the tenants is provided by Kubernetes namespaces and RBAC.
+
+One use case for this deployment model is a shared corporate infrastructure where malicious
+actions are not expected, but a clean separation of the tenants is still required.
+
+Potential future Istio multi-tenant deployment models are described at the bottom of this
+blog.
+
+>Note: This blog is a high-level description of how to deploy Istio in a
+limited multi-tenancy environment. The [docs]({{home}}/docs/) section will be updated
+when official multi-tenancy support is provided.
+
+## Deployment
+
+### Multiple Istio control planes
+
+Deploying multiple Istio control planes starts by replacing all `namespace` references
+in a manifest file with the desired namespace. Using istio.yaml as an example, if two tenant
+level Istio control planes are required; the first can use the istio.yaml default name of
+*istio-system* and a second control plane can be created by generating a new yaml file with
+a different namespace. As an example, the following command creates a yaml file with
+the Istio namespace of *istio-system1*.
+
+```command
+$ cat istio.yaml | sed s/istio-system/istio-system1/g > istio-system1.yaml
+```
+
+The istio yaml file contains the details of the Istio control plane deployment, including the
+pods that make up the control plane (mixer, pilot, ingress, CA). Deploying the two Istio
+control plane yaml files:
+
+```command
+$ kubectl apply -f install/kubernetes/istio.yaml
+$ kubectl apply -f install/kubernetes/istio-system1.yaml
+```
+Results in two Istio control planes running in two namespaces.
+
+```command
+$ kubectl get pods --all-namespaces
+NAMESPACE NAME READY STATUS RESTARTS AGE
+istio-system istio-ca-ffbb75c6f-98w6x 1/1 Running 0 15d
+istio-system istio-ingress-68d65fc5c6-dnvfl 1/1 Running 0 15d
+istio-system istio-mixer-5b9f8dffb5-8875r 3/3 Running 0 15d
+istio-system istio-pilot-678fc976c8-b8tv6 2/2 Running 0 15d
+istio-system1 istio-ca-5f496fdbcd-lqhlk 1/1 Running 0 15d
+istio-system1 istio-ingress-68d65fc5c6-2vldg 1/1 Running 0 15d
+istio-system1 istio-mixer-7d4f7b9968-66z44 3/3 Running 0 15d
+istio-system1 istio-pilot-5bb6b7669c-779vb 2/2 Running 0 15d
+```
+The Istio [sidecar]({{home}}/docs/setup/kubernetes/sidecar-injection.html) and
+[addons]({{home}}/docs/tasks/telemetry/), if required, manifests must also
+be deployed to match the configured `namespace` in use by the tenant's Istio control plane.
+
+The execution of these two yaml files is the responsibility of the cluster
+administrator, not the tenant level administrator. Additional RBAC restrictions will also
+need to be configured and applied by the cluster administrator, limiting the tenant
+administrator to only the assigned namespace.
+
+### Split common and namespace specific resources
+
+The manifest files in the Istio repositories create both common resources that would
+be used by all Istio control planes as well as resources that are replicated per control
+plane. Although it is a simple matter to deploy multiple control planes by replacing the
+*istio-system* namespace references as described above, a better approach is to split the
+manifests into a common part that is deployed once for all tenants and a tenant
+specific part. For the [Custom Resource Definitions](https://kubernetes.io/docs/concepts/api-extension/custom-resources/#customresourcedefinitions), the roles and the role
+bindings should be separated out from the provided Istio manifests. Additionally, the
+roles and role bindings in the provided Istio manifests are probably unsuitable for a
+multi-tenant environment and should be modified or augmented as described in the next
+section.
+
+### Kubernetes RBAC for Istio control plane resources
+
+To restrict a tenant administrator to a single Istio namespace, the cluster
+administrator would create a manifest containing, at a minimum, a `Role` and `RoleBinding`
+similar to the one below. In this example, a tenant administrator named *sales-admin*
+is limited to the namespace *istio-system1*. A completed manifest would contain many
+more `apiGroups` under the `Role` providing resource access to the tenant administrator.
+
+```yaml
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ namespace: istio-system1
+ name: ns-access-for-sales-admin-istio-system1
+rules:
+- apiGroups: [""] # "" indicates the core API group
+ resources: ["*"]
+ verbs: ["*"]
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: access-all-istio-system1
+ namespace: istio-system1
+subjects:
+- kind: User
+ name: sales-admin
+ apiGroup: rbac.authorization.k8s.io
+roleRef:
+ kind: Role
+ name: ns-access-for-sales-admin-istio-system1
+ apiGroup: rbac.authorization.k8s.io
+```
+
+### Watching specific namespaces for service discovery
+
+In addition to creating RBAC rules limiting the tenant administrator's access to a specific
+Istio control plane, the Istio manifest must be updated to specify the application namespace
+that Pilot should watch for creation of its xDS cache. This is done by starting the Pilot
+component with the additional command line arguments `--appNamespace, ns-1`. Where *ns-1*
+is the namespace that the tenant’s application will be deployed in. An example snippet from
+the istio-system1.yaml file is included below.
+
+```yaml
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: istio-pilot
+ namespace: istio-system1
+ annotations:
+ sidecar.istio.io/inject: "false"
+spec:
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ istio: pilot
+ spec:
+ serviceAccountName: istio-pilot-service-account
+ containers:
+ - name: discovery
+ image: docker.io//pilot:
+ imagePullPolicy: IfNotPresent
+ args: ["discovery", "-v", "2", "--admission-service", "istio-pilot", "--appNamespace", "ns-1"]
+ ports:
+ - containerPort: 8080
+ - containerPort: 443
+```
+
+### Deploying the tenant application in a namespace
+
+Now that the cluster administrator has created the tenant's namespace (ex. *istio-system1*) and
+Pilot's service discovery has been configured to watch for a specific application
+namespace (ex. *ns-1*), create the application manifests to deploy in that tenant's specific
+namespace. For example:
+
+```yaml
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: ns-1
+```
+And add the namespace reference to each resource type included in the application's manifest
+file. For example:
+```yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: details
+ labels:
+ app: details
+ namespace: ns-1
+```
+Although not shown, the application namespaces will also have RBAC settings limiting access
+to certain resources. These RBAC settings could be set by the cluster administrator and/or
+the tenant administrator.
+
+### Using `istioctl` in a multi-tenant environment
+
+When defining [route rules]({{home}}/docs/reference/config/istio.routing.v1alpha1.html#RouteRule)
+or [destination policies]({{home}}/docs/reference/config/istio.routing.v1alpha1.html#DestinationPolicy),
+it is necessary to ensure that the `istioctl` command is scoped to
+the namespace the Istio control plane is running in to ensure the resource is created
+in the proper namespace. Additionally, the rule itself must be scoped to the tenant's namespace
+so that it will be applied properly to that tenant's mesh. The *-i* option is used to create
+(or get or describe) the rule in the namespace that the Istio control plane is deployed in.
+The *-n* option will scope the rule to the tenant's mesh and should be set to the namespace that
+the tenant's app is deployed in. Note that the *-n* option can be skipped on the command line if
+the .yaml file for the resource scopes it properly instead.
+
+For example, the following command would be required to add a route rule to the *istio-system1*
+namespace:
+```command
+$ istioctl –i istio-system1 create -n ns-1 -f route_rule_v2.yaml
+```
+And can be displayed using the command:
+```command
+$ istioctl -i istio-system1 -n ns-1 get routerule
+NAME KIND NAMESPACE
+details-Default RouteRule.v1alpha2.config.istio.io ns-1
+productpage-default RouteRule.v1alpha2.config.istio.io ns-1
+ratings-default RouteRule.v1alpha2.config.istio.io ns-1
+reviews-default RouteRule.v1alpha2.config.istio.io ns-1
+```
+
+See the [Multiple Istio control planes]({{home}}/blog/2018/soft-multitenancy.html#multiple-istio-control-planes) section of this document for more details on `namespace` requirements in a
+multi-tenant environment.
+
+### Test results
+
+Following the instructions above, a cluster administrator can create an environment limiting,
+via RBAC and namespaces, what a tenant administrator can deploy.
+
+After deployment, accessing the Istio control plane pods assigned to a specific tenant
+administrator is permitted:
+
+```command
+$ kubectl get pods -n istio-system
+NAME READY STATUS RESTARTS AGE
+grafana-78d649479f-8pqk9 1/1 Running 0 1d
+istio-ca-ffbb75c6f-98w6x 1/1 Running 0 1d
+istio-ingress-68d65fc5c6-dnvfl 1/1 Running 0 1d
+istio-mixer-5b9f8dffb5-8875r 3/3 Running 0 1d
+istio-pilot-678fc976c8-b8tv6 2/2 Running 0 1d
+istio-sidecar-injector-7587bd559d-5tgk6 1/1 Running 0 1d
+prometheus-cf8456855-hdcq7 1/1 Running 0 1d
+servicegraph-75ff8f7c95-wcjs7 1/1 Running 0 1d
+```
+However, accessing all the cluster's pods is not permitted:
+
+```command
+$ kubectl get pods --all-namespaces
+Error from server (Forbidden): pods is forbidden: User "dev-admin" cannot list pods at the cluster scope
+```
+
+And neither is accessing another tenant's namespace:
+
+```command
+$ kubectl get pods -n istio-system1
+Error from server (Forbidden): pods is forbidden: User "dev-admin" cannot list pods in the namespace "istio-system1"
+```
+
+The tenant administrator can deploy applications in the application namespace configured for
+that tenant. As an example, updating the [Bookinfo]({{home}}/docs/guides/bookinfo.html)
+manifests and then deploying under the tenant's application namespace of *ns-0*, listing the
+pods in use by this tenant's namespace is permitted:
+
+```command
+$ kubectl get pods -n ns-0
+NAME READY STATUS RESTARTS AGE
+details-v1-64b86cd49-b7rkr 2/2 Running 0 1d
+productpage-v1-84f77f8747-rf2mt 2/2 Running 0 1d
+ratings-v1-5f46655b57-5b4c5 2/2 Running 0 1d
+reviews-v1-ff6bdb95b-pm5lb 2/2 Running 0 1d
+reviews-v2-5799558d68-b989t 2/2 Running 0 1d
+reviews-v3-58ff7d665b-lw5j9 2/2 Running 0 1d
+```
+
+But accessing another tenant's application namespace is not:
+
+```command
+$ kubectl get pods -n ns-1
+Error from server (Forbidden): pods is forbidden: User "dev-admin" cannot list pods in the namespace "ns-1"
+```
+
+If the [addon tools]({{home}}/docs/tasks/telemetry/), example
+[prometheus]({{home}}/docs/tasks/telemetry//querying-metrics.html), are deployed
+(also limited by an Istio `namespace`) the statistical results returned would represent only
+that traffic seen from that tenant's application namespace.
+
+## Conclusion
+
+The evaluation performed indicates Istio has sufficient capabilities and security to meet a
+small number of multi-tenant use cases. It also shows that Istio and Kubernetes __cannot__
+provide sufficient capabilities and security for other use cases, especially those use
+cases that require complete security and isolation between untrusted tenants. The improvements
+required to reach a more secure model of security and isolation require work in container
+technology, ex. Kubernetes, rather than improvements in Istio capabilities.
+
+## Issues
+
+* The CA (Certificate Authority) and mixer Istio pod logs from one tenant's Istio control
+plane (ex. *istio-system* `namespace`) contained 'info' messages from a second tenant's
+Istio control plane (ex *istio-system1* `namespace`).
+
+## Challenges with other multi-tenancy models
+
+Other multi-tenancy deployment models were considered:
+1. A single mesh with multiple applications, one for each tenant on the mesh. The cluster
+administrator gets control and visibility mesh wide and across all applications, while the
+tenant administrator only gets control of a specific application.
+1. A single Istio control plane with multiple meshes, one mesh per tenant. The cluster
+administrator gets control and visibility across the entire Istio control plane and all
+meshes, while the tenant administrator only gets control of a specific mesh.
+1. A single cloud environment (cluster controlled), but multiple Kubernetes control planes
+(tenant controlled).
+
+These options either can't be properly supported without code changes or don't fully
+address the use cases.
+
+Current Istio capabilities are poorly suited to support the first model as it lacks
+sufficient RBAC capabilities to support cluster versus tenant operations. Additionally,
+having multiple tenants under one mesh is too insecure with the current mesh model and the
+way Istio drives configuration to the envoy proxies.
+
+Regarding the second option, the current Istio paradigm assumes a single mesh per Istio control
+plane. The needed changes to support this model are substantial. They would require
+finer grained scoping of resources and security domains based on namespaces, as well as,
+additional Istio RBAC changes. This model will likely be addressed by future work, but not
+currently possible.
+
+The third model doesn’t satisfy most use cases, as most cluster administrators prefer
+a common Kubernetes control plane which they provide as a
+[PaaS](https://en.wikipedia.org/wiki/Platform_as_a_service) to their tenants.
+
+## Future work
+
+Allowing a single Istio control plane to control multiple meshes would be an obvious next
+feature. An additional improvement is to provide a single mesh that can host different
+tenants with some level of isolation and security between the tenants. This could be done
+by partitioning within a single control plane using the same logical notion of namespace as
+Kubernetes. A [document](https://docs.google.com/document/d/14Hb07gSrfVt5KX9qNi7FzzGwB_6WBpAnDpPG6QEEd9Q)
+has been started within the Istio community to define additional use cases and the
+Istio functionality required to support those use cases.
+
+## References
+
+* Video on Kubernetes multi-tenancy support, [Multi-Tenancy Support & Security Modeling with RBAC and Namespaces](https://www.youtube.com/watch?v=ahwCkJGItkU), and the [supporting slide deck](https://schd.ws/hosted_files/kccncna17/21/Multi-tenancy%20Support%20%26%20Security%20Modeling%20with%20RBAC%20and%20Namespaces.pdf).
+* Kubecon talk on security that discusses Kubernetes support for "Cooperative soft multi-tenancy", [Building for Trust: How to Secure Your Kubernetes](https://www.youtube.com/watch?v=YRR-kZub0cA).
+* Kubernetes documentation on [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/) and [namespaces](https://kubernetes.io/docs/tasks/administer-cluster/namespaces-walkthrough/).
+* Kubecon slide deck on [Multi-tenancy Deep Dive](https://schd.ws/hosted_files/kccncna17/a9/kubecon-multitenancy.pdf).
+* Google document on [Multi-tenancy models for Kubernetes](https://docs.google.com/document/d/15w1_fesSUZHv-vwjiYa9vN_uyc--PySRoLKTuDhimjc/edit#heading=h.3dawx97e3hz6). (Requires permission)
+* Cloud Foundry WIP document, [Multi-cloud and Multi-tenancy](https://docs.google.com/document/d/14Hb07gSrfVt5KX9qNi7FzzGwB_6WBpAnDpPG6QEEd9Q)
+* [Istio Auto Multi-Tenancy 101](https://docs.google.com/document/d/12F183NIRAwj2hprx-a-51ByLeNqbJxK16X06vwH5OWE/edit#heading=h.x0f9qplja3q)
diff --git a/_blog/2018/traffic-mirroring.md b/_blog/2018/traffic-mirroring.md
index c60bbc7bd0300..ffd2fdbee82e0 100644
--- a/_blog/2018/traffic-mirroring.md
+++ b/_blog/2018/traffic-mirroring.md
@@ -1,23 +1,20 @@
---
-title: "Traffic mirroring with Istio for testing in production"
-overview: An introduction to safer, lower-risk deployments and release to production
-publish_date: February 8, 2018
+title: Traffic Mirroring with Istio for Testing in Production
+description: An introduction to safer, lower-risk deployments and release to production
+publishdate: 2018-02-08
subtitle: Routing rules for HTTP traffic
attribution: Christian Posta
-order: 91
+weight: 91
-layout: blog
-type: markdown
redirect_from: "/blog/traffic-mirroring.html"
---
{% include home.html %}
-Trying to enumerate all the possible combinations of test cases for testing services in non-production/test environments can be daunting. In some cases, you'll find that all of the effort that goes into cataloging these use cases doesn't match up to real production use cases. Ideally, we could use live production use cases and traffic to help illuminate all of the feature areas of the service under test that we might miss in more contrived testing environments.
+Trying to enumerate all the possible combinations of test cases for testing services in non-production/test environments can be daunting. In some cases, you'll find that all of the effort that goes into cataloging these use cases doesn't match up to real production use cases. Ideally, we could use live production use cases and traffic to help illuminate all of the feature areas of the service under test that we might miss in more contrived testing environments.
Istio can help here. With the release of [Istio 0.5.0]({{home}}/about/notes/0.5.html), Istio can mirror traffic to help test your services. You can write route rules similar to the following to enable traffic mirroring:
-
```yaml
apiVersion: config.istio.io/v1alpha2
kind: RouteRule
@@ -31,14 +28,14 @@ spec:
- labels:
version: v1
weight: 100
- - labels:
+ - labels:
version: v2
weight: 0
mirror:
name: httpbin
labels:
version: v2
-```
+```
A few things to note here:
@@ -46,4 +43,5 @@ A few things to note here:
* Responses to any mirrored traffic is ignored; traffic is mirrored as "fire-and-forget"
* You'll need to have the 0-weighted route to hint to Istio to create the proper Envoy cluster under the covers; [this should be ironed out in future releases](https://github.com/istio/istio/issues/3270).
-Learn more about mirroring by visiting the [Mirroring Task]({{home}}/docs/tasks/traffic-management/mirroring.html) and see a more [comprehensive treatment of this scenario on my blog](http://blog.christianposta.com/microservices/traffic-shadowing-with-istio-reduce-the-risk-of-code-release/).
\ No newline at end of file
+Learn more about mirroring by visiting the [Mirroring Task]({{home}}/docs/tasks/traffic-management/mirroring.html) and see a more
+[comprehensive treatment of this scenario on my blog](https://blog.christianposta.com/microservices/traffic-shadowing-with-istio-reduce-the-risk-of-code-release/).
diff --git a/_blog/2018/v1alpha3-routing.md b/_blog/2018/v1alpha3-routing.md
new file mode 100644
index 0000000000000..b050d7ff0c2c6
--- /dev/null
+++ b/_blog/2018/v1alpha3-routing.md
@@ -0,0 +1,435 @@
+---
+title: Introducing the Istio v1alpha3 routing API
+description: Introduction, motivation and design principles for the Istio v1alpha3 routing API.
+publishdate: 2018-04-25
+subtitle:
+attribution: Frank Budinsky (IBM) and Shriram Rajagopalan (VMware)
+
+weight: 88
+
+redirect_from: "/blog/v1alpha3-routing.html"
+---
+
+{% include home.html %}
+
+Up until now, Istio has provided a simple API for traffic management using four configuration resources:
+`RouteRule`, `DestinationPolicy`, `EgressRule`, and (Kubernetes) `Ingress`.
+With this API, users have been able to easily manage the flow of traffic in an Istio service mesh.
+The API has allowed users to route requests to specific versions of services, inject delays and failures for resilience
+testing, add timeouts and circuit breakers, and more, all without changing the application code itself.
+
+While this functionality has proven to be a very compelling part of Istio, user feedback has also shown that this API does
+have some shortcomings, specifically when using it to manage very large applications containing thousands of services, and
+when working with protocols other than HTTP. Furthermore, the use of Kubernetes `Ingress` resources to configure external
+traffic has proven to be woefully insufficient for our needs.
+
+To address these, and other concerns, a new traffic management API, a.k.a. `v1alpha3`, is being introduced, which will
+completely replace the previous API going forward. Although the `v1alpha3` model is fundamentally the same, it is not
+backward compatible and will require manual conversion from the old API. A
+[conversion tool]({{home}}/docs/reference/commands/istioctl.html#istioctl%20experimental%20convert-networking-config)
+is included in the next few releases of Istio to help with the transition.
+
+To justify this disruption, the `v1alpha3` API has gone through a long and painstaking community
+review process that has hopefully resulted in a greatly improved API that will stand the test of time. In this article,
+we will introduce the new configuration model and attempt to explain some of the motivation and design principles that
+influenced it.
+
+## Design principles
+
+A few key design principles played a role in the routing model redesign:
+
+* Explicitly model infrastructure as well as intent. For example, in addition to configuring an ingress gateway, the
+ component (controller) implementing it can also be specified.
+* The authoring model should be "producer oriented" and "host centric" as opposed to compositional. For example, all
+ rules associated with a particular host are configured together, instead of individually.
+* Clear separation of routing from post-routing behaviors.
+
+## Configuration resources in v1alpha3
+
+A typical mesh will have one or more load balancers (we call them gateways)
+that terminate TLS from external networks and allow traffic into the mesh.
+Traffic then flows through internal services via sidecar gateways.
+It is also common for applications to consume external
+services (e.g., Google Maps API). These may be called directly or, in certain deployments, all traffic
+exiting the mesh may be forced through dedicated egress gateways. The following diagram depicts
+this mental model.
+
+{% include image.html width="80%" ratio="35.20%"
+ link="./img/gateways.svg"
+ alt="Role of gateways in the mesh"
+ caption="Gateways in an Istio service mesh"
+ %}
+
+With the above setup in mind, `v1alpha3` introduces the following new
+configuration resources to control traffic routing into, within, and out of the mesh.
+
+1. `Gateway`
+1. `VirtualService`
+1. `DestinationRule`
+1. `ServiceEntry`
+
+`VirtualService`, `DestinationRule`, and `ServiceEntry` replace `RouteRule`,
+`DestinationPolicy`, and `EgressRule` respectively. The `Gateway` is a
+platform independent abstraction to model the traffic flowing into
+dedicated middleboxes.
+
+The figure below depicts the flow of control across configuration
+resources.
+
+{% include image.html width="80%" ratio="41.16%"
+ link="./img/virtualservices-destrules.svg"
+ caption="Relationship between different v1alpha3 elements"
+ %}
+
+### Gateway
+
+A [Gateway]({{home}}/docs/reference/config/istio.networking.v1alpha3.html#Gateway)
+configures a load balancer for HTTP/TCP traffic, regardless of
+where it will be running. Any number of gateways can exist within the mesh
+and multiple different gateway implementations can co-exist. In fact, a
+gateway configuration can be bound to a particular workload by specifying
+the set of workload (pod) labels as part of the configuration, allowing
+users to reuse off the shelf network appliances by writing a simple gateway
+controller.
+
+For ingress traffic management, you might ask: _Why not reuse Kubernetes Ingress APIs_?
+The Ingress APIs proved to be incapable of expressing Istio's routing needs.
+By trying to draw a common denominator across different HTTP proxies, the
+Ingress is only able to support the most basic HTTP routing and ends up
+pushing every other feature of modern proxies into non-portable
+annotations.
+
+Istio `Gateway` overcomes the `Ingress` shortcomings by separating the
+L4-L6 spec from L7. It only configures the L4-L6 functions (e.g., ports to
+expose, TLS configuration) that are uniformly implemented by all good L7
+proxies. Users can then use standard Istio rules to control HTTP
+requests as well as TCP traffic entering a `Gateway` by binding a
+`VirtualService` to it.
+
+For example, the following simple `Gateway` configures a load balancer
+to allow external https traffic for host `bookinfo.com` into the mesh:
+
+```yaml
+apiVersion: networking.istio.io/v1alpha3
+kind: Gateway
+metadata:
+ name: bookinfo-gateway
+spec:
+ servers:
+ - port:
+ number: 443
+ name: https
+ protocol: HTTPS
+ hosts:
+ - bookinfo.com
+ tls:
+ mode: SIMPLE
+ serverCertificate: /tmp/tls.crt
+ privateKey: /tmp/tls.key
+```
+
+To configure the corresponding routes, a `VirtualService` (described in the [following section](#virtualservice))
+must be defined for the same host and bound to the `Gateway` using
+the `gateways` field in the configuration:
+
+```yaml
+apiVersion: networking.istio.io/v1alpha3
+kind: VirtualService
+metadata:
+ name: bookinfo
+spec:
+ hosts:
+ - bookinfo.com
+ gateways:
+ - bookinfo-gateway # <---- bind to gateway
+ http:
+ - match:
+ - uri:
+ prefix: /reviews
+ route:
+ ...
+```
+
+The `Gateway` can be used to model an edge-proxy or a purely internal proxy
+as shown in the first figure. Irrespective of the location, all gateways
+can be configured and controlled in the same way.
+
+### VirtualService
+
+Replacing route rules with something called “virtual services” might seem peculiar at first, but in reality it’s
+fundamentally a much better name for what is being configured, especially after redesigning the API to address the
+scalability issues with the previous model.
+
+In effect, what has changed is that instead of configuring routing using a set of individual configuration resources
+(rules) for a particular destination service, each containing a precedence field to control the order of evaluation, we
+now configure the (virtual) destination itself, with all of its rules in an ordered list within a corresponding
+[VirtualService]({{home}}/docs/reference/config/istio.networking.v1alpha3.html#VirtualService) resource.
+For example, where previously we had two `RouteRule` resources for the
+[Bookinfo]({{home}}/docs/guides/bookinfo.html) application’s `reviews` service, like this:
+
+```yaml
+apiVersion: config.istio.io/v1alpha2
+kind: RouteRule
+metadata:
+ name: reviews-default
+spec:
+ destination:
+ name: reviews
+ precedence: 1
+ route:
+ - labels:
+ version: v1
+---
+apiVersion: config.istio.io/v1alpha2
+kind: RouteRule
+metadata:
+ name: reviews-test-v2
+spec:
+ destination:
+ name: reviews
+ precedence: 2
+ match:
+ request:
+ headers:
+ cookie:
+ regex: "^(.*?;)?(user=jason)(;.*)?$"
+ route:
+ - labels:
+ version: v2
+```
+
+In `v1alph3`, we provide the same configuration in a single `VirtualService` resource:
+
+```yaml
+apiVersion: networking.istio.io/v1alpha3
+kind: VirtualService
+metadata:
+ name: reviews
+spec:
+ hosts:
+ - reviews
+ http:
+ - match:
+ - headers:
+ cookie:
+ regex: "^(.*?;)?(user=jason)(;.*)?$"
+ route:
+ - destination:
+ host: reviews
+ subset: v2
+ - route:
+ - destination:
+ host: reviews
+ subset: v1
+```
+
+As you can see, both of the rules for the `reviews` service are consolidated in one place, which at first may or may not
+seem preferable. However, if you look closer at this new model, you’ll see there are fundamental differences that make
+`v1alpha3` vastly more functional.
+
+First of all, notice that the destination service for the `VirtualService` is specified using a `hosts` field (repeated field, in fact) and is then again specified in a `destination` field of each of the route specifications. This is a
+very important difference from the previous model.
+
+A `VirtualService` describes the mapping between one or more user-addressable destinations to the actual destination workloads inside the mesh. In our example, they are the same, however, the user-addressed hosts can be any DNS
+names with optional wildcard prefix or CIDR prefix that will be used to address the service. This can be particularly
+useful in facilitating turning monoliths into a composite service built out of distinct microservices without requiring the
+consumers of the service to adapt to the transition.
+
+For example, the following rule allows users to address both the `reviews` and `ratings` services of the Bookinfo application
+as if they are parts of a bigger (virtual) service at `http://bookinfo.com/`:
+
+```yaml
+apiVersion: networking.istio.io/v1alpha3
+kind: VirtualService
+metadata:
+ name: bookinfo
+spec:
+ hosts:
+ - bookinfo.com
+ http:
+ - match:
+ - uri:
+ prefix: /reviews
+ route:
+ - destination:
+ host: reviews
+ - match:
+ - uri:
+ prefix: /ratings
+ route:
+ - destination:
+ host: ratings
+ ...
+```
+
+The hosts of a `VirtualService` do not actually have to be part of the service registry, they are simply virtual
+destinations. This allows users to model traffic for virtual hosts that do not have routable entries inside the mesh.
+These hosts can be exposed outside the mesh by binding the `VirtualService` to a `Gateway` configuration for the same host
+(as described in the [previous section](#gateway)).
+
+In addition to this fundamental restructuring, `VirtualService` includes several other important changes:
+
+1. Multiple match conditions can be expressed inside the `VirtualService` configuration, reducing the need for redundant
+ rules.
+1. Each service version has a name (called a service subset). The set of pods/VMs belonging to a subset is defined in a
+ `DestinationRule`, described in the following section.
+1. `VirtualService` hosts can be specified using wildcard DNS prefixes to create a single rule for all matching services.
+ For example, in Kubernetes, to apply the same rewrite rule for all services in the `foo` namespace, the `VirtualService`
+ would use `*.foo.svc.cluster.local` as the host.
+
+### DestinationRule
+
+A [DestinationRule]({{home}}/docs/reference/config/istio.networking.v1alpha3.html#DestinationRule)
+configures the set of policies to be applied while forwarding traffic to a service. They are
+intended to be authored by service owners, describing the circuit breakers, load balancer settings, TLS settings, etc..
+`DestinationRule` is more or less the same as its predecessor, `DestinationPolicy`, with the following exceptions:
+
+1. The `host` of a `DestinationRule` can include wildcard prefixes, allowing a single rule to be specified for many actual
+ services.
+1. A `DestinationRule` defines addressable `subsets` (i.e., named versions) of the corresponding destination host. These
+ subsets are used in `VirtualService` route specifications when sending traffic to specific versions of the service.
+ Naming versions this way allows us to cleanly refer to them across different virtual services, simplify the stats that
+ Istio proxies emit, and to encode subsets in SNI headers.
+
+A `DestinationRule` that configures policies and subsets for the reviews service might look something like this:
+
+```yaml
+apiVersion: networking.istio.io/v1alpha3
+kind: DestinationRule
+metadata:
+ name: reviews
+spec:
+ host: reviews
+ trafficPolicy:
+ loadBalancer:
+ simple: RANDOM
+ subsets:
+ - name: v1
+ labels:
+ version: v1
+ - name: v2
+ labels:
+ version: v2
+ trafficPolicy:
+ loadBalancer:
+ simple: ROUND_ROBIN
+ - name: v3
+ labels:
+ version: v3
+```
+
+Notice that, unlike `DestinationPolicy`, multiple policies (e.g., default and v2-specific) are specified in a single
+`DestinationRule` configuration.
+
+### ServiceEntry
+
+[ServiceEntry]({{home}}/docs/reference/config/istio.networking.v1alpha3.html#ServiceEntry)
+is used to add additional entries into the service registry that Istio maintains internally.
+It is most commonly used to allow one to model traffic to external dependencies of the mesh
+such as APIs consumed from the web or traffic to services in legacy infrastructure.
+
+Everything you could previously configure using an `EgressRule` can just as easily be done with a `ServiceEntry`.
+For example, access to a simple external service from inside the mesh can be enabled using a configuration
+something like this:
+
+```yaml
+apiVersion: networking.istio.io/v1alpha3
+kind: ServiceEntry
+metadata:
+ name: foo-ext
+spec:
+ hosts:
+ - foo.com
+ ports:
+ - number: 80
+ name: http
+ protocol: HTTP
+```
+
+That said, `ServiceEntry` has significantly more functionality than its predecessor.
+First of all, a `ServiceEntry` is not limited to external service configuration,
+it can be of two types: mesh-internal or mesh-external.
+Mesh-internal entries are like all other internal services but are used to explicitly add services
+to the mesh. They can be used to add services as part of expanding the service mesh to include unmanaged infrastructure
+(e.g., VMs added to a Kubernetes-based service mesh).
+Mesh-external entries represent services external to the mesh.
+For them, mTLS authentication is disabled and policy enforcement is performed on the client-side,
+instead of on the usual server-side for internal service requests.
+
+Because a `ServiceEntry` configuration simply adds a destination to the internal service registry, it can be
+used in conjunction with a `VirtualService` and/or `DestinationRule`, just like any other service in the registry.
+The following `DestinationRule`, for example, can be used to initiate mTLS connections for an external service:
+
+```yaml
+apiVersion: networking.istio.io/v1alpha3
+kind: DestinationRule
+metadata:
+ name: foo-ext
+spec:
+ name: foo.com
+ trafficPolicy:
+ tls:
+ mode: MUTUAL
+ clientCertificate: /etc/certs/myclientcert.pem
+ privateKey: /etc/certs/client_private_key.pem
+ caCertificates: /etc/certs/rootcacerts.pem
+```
+
+In addition to its expanded generality, `ServiceEntry` provides several other improvements over `EgressRule`
+including the following:
+
+1. A single `ServiceEntry` can configure multiple service endpoints, which previously would have required multiple
+ `EgressRules`.
+1. The resolution mode for the endpoints is now configurable (`NONE`, `STATIC`, or `DNS`).
+1. Additionally, we are working on addressing another pain point: the need to access secure external services over plain
+ text ports (e.g., `http://google.com:443`). This should be fixed in the coming weeks, allowing you to directly access
+ `https://google.com` from your application. Stay tuned for an Istio patch release (0.8.x) that addresses this limitation.
+
+## Creating and deleting v1alpha3 route rules
+
+Because all route rules for a given destination are now stored together as an ordered
+list in a single `VirtualService` resource, adding a second and subsequent rules for a particular destination
+is no longer done by creating a new (`RouteRule`) resource, but instead by updating the one-and-only `VirtualService`
+resource for the destination.
+
+old routing rules:
+```command
+$ istioctl create -f my-second-rule-for-destination-abc.yaml
+```
+`v1alpha3` routing rules:
+```command
+$ istioctl replace -f my-updated-rules-for-destination-abc.yaml
+```
+
+Deleting route rules other than the last one for a particular destination is also done using `istioctl replace`.
+
+When adding or removing routes that refer to service versions, the `subsets` will need to be updated in
+the service's corresponding `DestinationRule`.
+As you might have guessed, this is also done using `istioctl replace`.
+
+## Summary
+
+The Istio `v1alpha3` routing API has significantly more functionality than
+its predecessor, but unfortunately is not backwards compatible, requiring a
+one time manual conversion. The previous configuration resources,
+`RouteRule`, `DesintationPolicy`, and `EgressRule`, will not be supported
+from Istio 0.9 onwards. Kubernetes users can continue to use `Ingress` to
+configure their edge load balancers for basic routing. However, advanced
+routing features (e.g., traffic split across two versions) will require use
+of `Gateway`, a significantly more functional and highly
+recommended `Ingress` replacement.
+
+## Acknowledgments
+
+Credit for the routing model redesign and implementation work goes to the
+following people (in alphabetical order):
+
+* Frank Budinsky (IBM)
+* Zack Butcher (Google)
+* Greg Hanson (IBM)
+* Costin Manolache (Google)
+* Martin Ostrowski (Google)
+* Shriram Rajagopalan (VMware)
+* Louis Ryan (Google)
+* Isaiah Snell-Feikema (IBM)
+* Kuat Yessenov (Google)
diff --git a/_blog/index.html b/_blog/index.html
index 4bd0c37935237..ff36940b5e0c5 100644
--- a/_blog/index.html
+++ b/_blog/index.html
@@ -1,6 +1,6 @@
---
title: Istio Blog
-overview: The Istio blog
+description: The Istio blog
layout: compress
---
{% include latest_blog_post.html %}
diff --git a/_config.yml b/_config.yml
index 837eba754191f..4757003cb9c1f 100644
--- a/_config.yml
+++ b/_config.yml
@@ -5,6 +5,8 @@ kramdown:
auto_ids: true
input: GFM
hard_wrap: false
+ syntax_highlighter_opts:
+ disable : true
baseurl:
@@ -40,6 +42,32 @@ plugins:
- jekyll-redirect-from
- jekyll-sitemap
+defaults:
+ -
+ scope:
+ path: ""
+ type: "about"
+ values:
+ layout: "about"
+ -
+ scope:
+ path: ""
+ type: "docs"
+ values:
+ layout: "docs"
+ -
+ scope:
+ path: ""
+ type: "help"
+ values:
+ layout: "help"
+ -
+ scope:
+ path: ""
+ type: "blog"
+ values:
+ layout: "blog"
+
exclude:
- README.md
- LICENSE
@@ -63,6 +91,11 @@ exclude:
- repos/*.html
- repos/*.md
- vendor/
+ - js/misc.js
+ - js/styleSwitcher.js
+ - firebase.json
+ - _rakesite
+ - mdl_style.rb
repository:
istio/istio.github.io
diff --git a/_data/istio.yml b/_data/istio.yml
index 16433d647fbc6..8e022b57f023e 100644
--- a/_data/istio.yml
+++ b/_data/istio.yml
@@ -1,4 +1,5 @@
-version: 0.6 (preliminary)
+version: 0.8
+preliminary: true
archive: false
archive_date: DD-MMM-YYYY
search_engine_id: "013699703217164175118:veyyqmfmpj4"
diff --git a/_data/releases.yml b/_data/releases.yml
index 2dd152b30fae5..8a9eb2db15caf 100644
--- a/_data/releases.yml
+++ b/_data/releases.yml
@@ -1,7 +1,11 @@
-- name: 0.6 (preliminary)
+- name: 0.8
+ url: https://preliminary.istio.io
+- name: 0.7
+ url: https://istio.io
+- name: 0.6
url: https://archive.istio.io/v0.6
- name: 0.5
- url: https://istio.io
+ url: https://archive.istio.io/v0.5
- name: 0.4
url: https://archive.istio.io/v0.4
- name: 0.3
diff --git a/_docs/concepts/index.md b/_docs/concepts/index.md
index 1fc77af69f9ee..5134317c9a9ef 100644
--- a/_docs/concepts/index.md
+++ b/_docs/concepts/index.md
@@ -1,11 +1,9 @@
---
title: Concepts
-overview: Concepts help you learn about the different parts of the Istio system and the abstractions it uses.
+description: Concepts help you learn about the different parts of the Istio system and the abstractions it uses.
-order: 10
+weight: 10
-layout: docs
-type: markdown
toc: false
---
diff --git a/_docs/concepts/policy-and-control/attributes.md b/_docs/concepts/policy-and-control/attributes.md
index 4b83c94cc4de5..c66c9e71a4129 100644
--- a/_docs/concepts/policy-and-control/attributes.md
+++ b/_docs/concepts/policy-and-control/attributes.md
@@ -1,11 +1,9 @@
---
title: Attributes
-overview: Explains the important notion of attributes, which is a central mechanism for how policies and control are applied to services within the mesh.
-
-order: 10
+description: Explains the important notion of attributes, which is a central mechanism for how policies and control are applied to services within the mesh.
+
+weight: 10
-layout: docs
-type: markdown
---
{% include home.html %}
@@ -19,11 +17,13 @@ environment this traffic occurs in. An Istio attribute carries a specific piece
of information such as the error code of an API request, the latency of an API request, or the
original IP address of a TCP connection. For example:
- request.path: xyz/abc
- request.size: 234
- request.time: 12:34:56.789 04/17/2017
- source.ip: 192.168.0.1
- destination.service: example
+```plain
+request.path: xyz/abc
+request.size: 234
+request.time: 12:34:56.789 04/17/2017
+source.ip: 192.168.0.1
+destination.service: example
+```
## Attribute vocabulary
@@ -44,4 +44,4 @@ separator. For example, `request.size` and `source.ip`.
## Attribute types
Istio attributes are strongly typed. The supported attribute types are defined by
-[ValueType](https://github.com/istio/api/blob/master/mixer/v1/config/descriptor/value_type.proto).
+[ValueType](https://github.com/istio/api/blob/master/policy/v1beta1/value_type.proto).
diff --git a/_docs/concepts/policy-and-control/index.md b/_docs/concepts/policy-and-control/index.md
index c85f12f4afda3..11ee676ce62c1 100644
--- a/_docs/concepts/policy-and-control/index.md
+++ b/_docs/concepts/policy-and-control/index.md
@@ -1,11 +1,9 @@
---
title: Policies and Control
-overview: Introduces the policy control mechanisms.
+description: Introduces the policy control mechanisms.
-order: 40
+weight: 40
-layout: docs
-type: markdown
toc: false
---
diff --git a/_docs/concepts/policy-and-control/mixer-config.md b/_docs/concepts/policy-and-control/mixer-config.md
index c851102948de0..b4fb99cf65de8 100644
--- a/_docs/concepts/policy-and-control/mixer-config.md
+++ b/_docs/concepts/policy-and-control/mixer-config.md
@@ -1,11 +1,9 @@
---
title: Mixer Configuration
-overview: An overview of the key concepts used to configure Mixer.
-
-order: 30
+description: An overview of the key concepts used to configure Mixer.
+
+weight: 30
-layout: docs
-type: markdown
---
{% include home.html %}
@@ -45,11 +43,9 @@ The set of attributes determines which backend Mixer calls for a given request a
each is given. In order to hide the details of individual backends, Mixer uses modules
known as [*adapters*](./mixer.html#adapters).
-{% include figure.html width='60%' ratio='42.60%'
- img='./img/mixer-config/machine.svg'
- alt='Attribute Machine'
- title='Attribute Machine'
- caption='Attribute Machine'
+{% include image.html width="60%" ratio="42.60%"
+ link="./img/mixer-config/machine.svg"
+ caption="Attribute Machine"
%}
Mixer's configuration has the following central responsibilities:
@@ -81,18 +77,18 @@ metadata:
namespace: istio-system
spec:
# kind specific configuration.
-```
+```
- **apiVersion** - A constant for an Istio release.
- **kind** - A Mixer assigned unique "kind" for every adapter and template.
- **name** - The configuration resource name.
-- **namespace** - The namespace in which the configuration resource is applicable.
+- **namespace** - The namespace in which the configuration resource is applicable.
- **spec** - The `kind`-specific configuration.
### Handlers
[Adapters](./mixer.html#adapters) encapsulate the logic necessary to interface Mixer with specific external infrastructure
backends such as [Prometheus](https://prometheus.io), [New Relic](https://newrelic.com), or [Stackdriver](https://cloud.google.com/logging).
-Individual adapters generally need operational parameters in order to do their work. For example, a logging adapter may require
+Individual adapters generally need operational parameters in order to do their work. For example, a logging adapter may require
the IP address and port of the log sink.
Here is an example showing how to configure an adapter of kind = `listchecker`. The listchecker adapter checks an input value against a list.
@@ -109,7 +105,7 @@ spec:
blacklist: false
```
-`{metadata.name}.{kind}.{metadata.namespace}` is the fully qualified name of a handler. The fully qualified name of the above handler is
+`{metadata.name}.{kind}.{metadata.namespace}` is the fully qualified name of a handler. The fully qualified name of the above handler is
`staticversion.listchecker.istio-system` and it must be unique.
The schema of the data in the `spec` stanza depends on the specific adapter being configured.
@@ -189,8 +185,8 @@ spec:
instances:
- requestduration.metric.istio-system
```
-A rule contains a `match` predicate expression and a list of actions to perform if the predicate is true.
-An action specifies the list of instances to be delivered to a handler.
+A rule contains a `match` predicate expression and a list of actions to perform if the predicate is true.
+An action specifies the list of instances to be delivered to a handler.
A rule must use the fully qualified names of handlers and instances.
If the rule, handlers, and instances are all in the same namespace, the namespace suffix can be elided from the fully qualified name as seen in `handler.prometheus`.
@@ -221,7 +217,7 @@ destination_version: destination.labels["version"] | "unknown"
With the above, the `destination_version` label is assigned the value of `destination.labels["version"]`. However if that attribute
is not present, the literal `"unknown"` is used.
-The attributes that can be used in attribute expressions must be defined in an
+The attributes that can be used in attribute expressions must be defined in an
[*attribute manifest*](#manifests) for the deployment. Within the manifest, each attribute has
a type which represents the kind of data that the attribute carries. In the
same way, attribute expressions are also typed, and their type is derived from
@@ -244,9 +240,9 @@ Mixer goes through the following steps to arrive at the set of `actions`.
1. Extract the value of the identity attribute from the request.
-2. Extract the service namespace from the identity attribute.
+1. Extract the service namespace from the identity attribute.
-3. Evaluate the `match` predicate for all rules in the `configDefaultNamespace` and the service namespace.
+1. Evaluate the `match` predicate for all rules in the `configDefaultNamespace` and the service namespace.
The actions resulting from these steps are performed by Mixer.
@@ -291,4 +287,4 @@ configuration](https://github.com/istio/istio/blob/master/mixer/testdata/config)
## What's next
-* Read the [blog post]({{home}}/blog/mixer-adapter-model.html) describing Mixer's adapter model.
+-Read the [blog post]({{home}}/blog/mixer-adapter-model.html) describing Mixer's adapter model.
diff --git a/_docs/concepts/policy-and-control/mixer.md b/_docs/concepts/policy-and-control/mixer.md
index 977a09364b19d..ca4abf3f36410 100644
--- a/_docs/concepts/policy-and-control/mixer.md
+++ b/_docs/concepts/policy-and-control/mixer.md
@@ -1,13 +1,13 @@
---
title: Mixer
-overview: Architectural deep-dive into the design of Mixer, which provides the policy and control mechanisms within the service mesh.
-
-order: 20
+description: Architectural deep-dive into the design of Mixer, which provides the policy and control mechanisms within the service mesh.
+
+weight: 20
-layout: docs
-type: markdown
---
+{% include home.html %}
+
The page explains Mixer's role and general architecture.
## Background
@@ -29,11 +29,10 @@ Mixer is designed to change the boundaries between layers in order to reduce
systemic complexity, eliminating policy logic from service code and giving
control to operators instead.
-{% include figure.html width='60%' ratio='59%'
- img='./img/mixer/traffic.svg'
- alt='Showing the flow of traffic through Mixer.'
- title='Mixer Traffic Flow'
- caption='Mixer Traffic Flow'
+{% include image.html width="60%" ratio="59%"
+ link="./img/mixer/traffic.svg"
+ alt="Showing the flow of traffic through Mixer."
+ caption="Mixer Traffic Flow"
%}
Mixer provides three core features:
@@ -72,11 +71,10 @@ single consistent API, independent of the backends in use. The exact set of
adapters used at runtime is determined through configuration and can easily be
extended to target new or custom infrastructure backends.
-{% include figure.html width='35%' ratio='138%'
- img='./img/mixer/adapters.svg'
- alt='Showing Mixer with adapters.'
- title='Mixer and its Adapters'
- caption='Mixer and its Adapters'
+{% include image.html width="35%" ratio="138%"
+ link="./img/mixer/adapters.svg"
+ alt="Showing Mixer with adapters."
+ caption="Mixer and its Adapters"
%}
## Configuration state
@@ -88,12 +86,12 @@ operator is responsible for:
- Configuring a set of *handlers* for Mixer-generated data. Handlers are
configured adapters (adapters being binary plugins as described
- [below](#adapters)). Providing a `statsd` adapter with the IP address for a
+ [here](#adapters)). Providing a `statsd` adapter with the IP address for a
statsd backend is an example of handler configuration.
- Configuring a set of *instances* for Mixer to generate based on attributes and
literal values. They represent a chunk of data that adapter code will operate
- on. For example, an operator may configure Mixer to generate `request_count`
+ on. For example, an operator may configure Mixer to generate `requestcount`
metric values from attributes such as `destination.service` and
`response.code`.
@@ -136,13 +134,12 @@ phases:
parameters. The Adapter Dispatching phase invokes the adapters associated with
each aspect and passes them those parameters.
-{% include figure.html width='50%' ratio='144%'
- img='./img/mixer/phases.svg'
- alt='Phases of Mixer request processing.'
- title='Request Phases'
- caption='Request Phases'
+{% include image.html width="50%" ratio="144%"
+ link="./img/mixer/phases.svg"
+ alt="Phases of Mixer request processing."
+ caption="Request Phases"
%}
## What's next
-* Read the [blog post]({{home}}/blog/2017/adapter-model.html) describing Mixer's adapter model.
+- Read the [blog post]({{home}}/blog/2017/adapter-model.html) describing Mixer's adapter model.
diff --git a/_docs/concepts/security/authn-policy.md b/_docs/concepts/security/authn-policy.md
new file mode 100644
index 0000000000000..02aac8736b80b
--- /dev/null
+++ b/_docs/concepts/security/authn-policy.md
@@ -0,0 +1,87 @@
+---
+title: Istio Authentication Policy
+description: Describes Istio authentication policy
+
+weight: 10
+
+---
+{% include home.html %}
+
+Istio authentication policy enables operators to specify authentication requirements for a service (or services). Istio authentication policy is composed of two parts:
+
+* Peer: verifies the party, the direct client, that makes the connection. The common authentication mechanism for this is [mutual TLS]({{home}}/docs/concepts/security/mutual-tls.html). Istio is responsible for managing both client and server sides to enforce the policy.
+
+* Origin: verifies the party, the original client, that makes the request (e.g end-users, devices etc). JWT is the only supported mechanism for origin authentication at the moment. Istio configures the server side to perform authentication, but doesn't enforce that the client side sends the required token.
+
+Identities from both authentication parts, if applicable, are output to the next layer (e.g authorization, Mixer). To simplify the authorization rules, the policy can also specify which identity (peer or origin) should be used as 'the principal'. By default, it is set to the peer's identity.
+
+## Architecture
+
+Authentication policies are saved in Istio config store (in 0.7, the storage implementation uses Kubernetes CRD), and distributed by control plane. Depending on the size of the mesh, config propagation may take a few seconds to a few minutes. During the transition, you can expect traffic lost or inconsistent authentication results.
+
+{% include image.html width="80%" ratio="100%"
+ link="./img/authn.svg"
+ caption="Istio authentication policy architecture"
+ %}
+
+Policy is scoped to namespaces, with (optional) target selector rules to narrow down the set of services (within the same namespace as the policy) on which the policy should be applied. This aligns with the ACL model based on Kubernetes RBAC. More specifically, only the admin of the namespace can set policies for services in that namespace.
+
+Authentication is implemented by the Istio sidecars. For example, with an Envoy sidecar, it is a combination of SSL setting and HTTP filters. If authentication fails, requests will be rejected (either with SSL handshake error code, or http 401, depending on the type of authentication mechanism). If authentication succeeds, the following authenticated attributes will be generated:
+
+* **source.principal**: peer principal. If peer authentication is not used, the attribute is not set.
+* **request.auth.principal**: depends on the policy principal binding, this could be peer principal (if USE_PEER) or origin principal (if USE_ORIGIN).
+* **request.auth.audiences**: reflect the audience (`aud`) claim within the origin JWT (JWT that is used for origin authentication)
+* **request.auth.presenter**: similarly, reflect the authorized presenter (`azp`) claim of the origin JWT.
+* **request.auth.claims**: all raw string claims from origin-JWT.
+
+Origin principal (principal from origin authentication) is not explicitly output. In general, it can always be reconstructed by joining (`iss`) and subject (`sub`) claims with a "/" separator (for example, if `iss` and `sub` claims are "*googleapis.com*" and "*123456*" respectively, then origin principal is "*googleapis.com/123456*"). On the other hand, if principal binding is USE_ORIGIN, **request.auth.principal** carries the same value as origin principal.
+
+## Anatomy of the policy
+
+### Target selectors
+
+Defines rule to find service(s) on which policy should be applied. If no rule is provided, the policy is matched to all services in the namespace, so-called namespace-level policy (as opposed to service-level policies which have non-empty selector rules). Istio uses the service-level policy if available, otherwise it falls back to namespace-level policy. If neither is defined, it uses the default policy based on service mesh config and/or service annotation, which can only set mutual TLS setting (these are mechanisms before Istio 0.7 to config mutual TLS for Istio service mesh). See [testing Istio mutual TLS]({{home}}/docs/tasks/security/mutual-tls.html) and [per-service mutual TLS enablement]({{home}}/docs/tasks/security/per-service-mtls.html) for more details.
+
+Operators are responsible for avoiding conflicts, e.g create more than one service-level policy that matches to the same service(s) (or more than one namespace-level policy on the same namespace).
+
+Example: rule to select product-page service (on any port), and reviews:9000.
+
+```yalm
+ targets:
+ - name: product-page
+ - name: reviews
+ ports:
+ - number: 9000
+```
+
+### Peer authentication
+
+Defines authentication methods (and associated parameters) that are supported for peer authentication. It can list more than one method; only one of them needs to be satisfied for the authentication to pass. However, starting with the 0.7 release, only mutual TLS is supported. Omit this if peer authentication is not needed.
+
+Example of peer authentication using mutual TLS:
+
+```yaml
+ peers:
+ - mtls:
+```
+
+> Starting with Istio 0.7, the `mtls` settings doesn't require any parameters (hence `-mtls: {}`, `- mtls:` or `- mtls: null` declaration is sufficient). In future, it may carry arguments to provide different mTLS implementations.
+
+### Origin authentication
+
+Defines authentication methods (and associated parameters) that are supported for origin authentication. Only JWT is supported for this, however, the policy can list multiple JWTs by different issuers. Similar to peer authentication, only one of the listed methods needs to be satisfied for the authentication to pass.
+
+```yaml
+origins:
+- jwt:
+ issuer: "https://accounts.google.com"
+ jwksUri: "https://www.googleapis.com/oauth2/v3/certs"
+```
+
+### Principal binding
+
+Defines what is the principal from the authentication. By default, this will be the peer's principal (and if peer authentication is not applied, it will be left unset). Policy writers can choose to overwrite it with USE_ORIGIN. In future, we will also support *conditional-binding* (e.g USE_PEER when peer is X, otherwise USE_ORIGIN)
+
+## What's next
+
+Try out the [Basic Istio authentication policy]({{home}}/docs/tasks/security/authn-policy.html) tutorial.
diff --git a/_docs/concepts/security/img/authn.svg b/_docs/concepts/security/img/authn.svg
new file mode 100644
index 0000000000000..2fd42df326fab
--- /dev/null
+++ b/_docs/concepts/security/img/authn.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/_docs/concepts/security/img/mutual-tls/auth.svg b/_docs/concepts/security/img/mutual-tls/auth.svg
index e8c28857eee4f..3b2da9fd9d4fc 100644
--- a/_docs/concepts/security/img/mutual-tls/auth.svg
+++ b/_docs/concepts/security/img/mutual-tls/auth.svg
@@ -1,4 +1 @@
-
-
-
-
+
\ No newline at end of file
diff --git a/_docs/concepts/security/index.md b/_docs/concepts/security/index.md
index 8b3170f4f3617..c1138c61d27c4 100644
--- a/_docs/concepts/security/index.md
+++ b/_docs/concepts/security/index.md
@@ -1,11 +1,9 @@
---
title: Security
-overview: Describes Istio's authorization and authentication functionality.
+description: Describes Istio's authorization and authentication functionality.
-order: 30
+weight: 30
-layout: docs
-type: markdown
toc: false
---
diff --git a/_docs/concepts/security/mutual-tls.md b/_docs/concepts/security/mutual-tls.md
index fb02fd45b8735..b59fab9dc41d0 100644
--- a/_docs/concepts/security/mutual-tls.md
+++ b/_docs/concepts/security/mutual-tls.md
@@ -1,166 +1,166 @@
---
title: Mutual TLS Authentication
-overview: Describes Istio's mutual TLS authentication architecture which provides a strong service identity and secure communication channels between services.
-order: 10
+description: Describes Istio's mutual TLS authentication architecture which provides a strong service identity and secure communication channels between services.
+weight: 10
-layout: docs
-type: markdown
---
-## Overview
-
-Istio Auth's aim is to enhance the security of microservices and their communication without requiring service code changes. It is responsible for:
-
+{% include home.html %}
+## Overview
-* Providing each service with a strong identity that represents its role to enable interoperability across clusters and clouds
+Istio's aim is to enhance the security of microservices and their communication without requiring service code changes. It is responsible for:
-* Securing service to service communication and end-user to service communication
+* Providing each service with a strong identity that represents its role to enable interoperability across clusters and clouds
-* Providing a key management system to automate key and certificate generation, distribution, rotation, and revocation
+* Securing service to service communication and end-user to service communication
+* Providing a key management system to automate key and certificate generation, distribution, rotation, and revocation
## Architecture
-The diagram below shows Istio Auth's architecture, which includes three primary components: identity, key management, and communication security. This diagram describes how Istio Auth is used to secure the service-to-service communication between service 'frontend' running as the service account 'frontend-team' and service 'backend' running as the service account 'backend-team'. Istio supports services running on both Kubernetes containers and VM/bare-metal machines.
+The diagram below shows Istio's security-related architecture, which includes three primary components: identity, key management, and communication
+security. This diagram describes how Istio is used to secure the service-to-service communication between service 'frontend' running
+as the service account 'frontend-team' and service 'backend' running as the service account 'backend-team'. Istio supports services running
+on both Kubernetes containers and VM/bare-metal machines.
-{% include figure.html width='80%' ratio='56.25%'
- img='./img/mutual-tls/auth.svg'
- alt='Components making up the Istio auth model.'
- title='Istio Auth Architecture'
- caption='Istio Auth Architecture'
+{% include image.html width="80%" ratio="56.25%"
+ link="./img/mutual-tls/auth.svg"
+ alt="Components making up the Istio auth model."
+ caption="Istio Security Architecture"
%}
-As illustrated in the diagram, Istio Auth leverages secret volume mount to deliver keys/certs from Istio CA to Kubernetes containers. For services running on VM/bare-metal machines, we introduce a node agent, which is a process running on each VM/bare-metal machine. It generates the private key and CSR (certificate signing request) locally, sends CSR to Istio CA for signing, and delivers the generated certificate together with the private key to Envoy.
+As illustrated in the diagram, Istio leverages secret volume mount to deliver keys/certs from Citadel to Kubernetes containers. For services running on
+VM/bare-metal machines, we introduce a node agent, which is a process running on each VM/bare-metal machine. It generates the private key and CSR (certificate
+signing request) locally, sends CSR to Citadel for signing, and delivers the generated certificate together with the private key to Envoy.
## Components
### Identity
-Istio Auth uses [Kubernetes service accounts](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) to identify who runs the service:
+Istio uses [Kubernetes service accounts](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) to identify who runs the service:
+* A service account in Istio has the format "spiffe://\<_domain_\>/ns/\<_namespace_>/sa/\<_serviceaccount_\>".
-* A service account in Istio has the format "spiffe://\<_domain_\>/ns/\<_namespace_>/sa/\<_serviceaccount_\>".
- * _domain_ is currently _cluster.local_. We will support customization of domain in the near future.
- * _namespace_ is the namespace of the Kubernetes service account.
- * _serviceaccount_ is the Kubernetes service account name.
+ * _domain_ is currently _cluster.local_. We will support customization of domain in the near future.
+ * _namespace_ is the namespace of the Kubernetes service account.
+ * _serviceaccount_ is the Kubernetes service account name.
-* A service account is **the identity (or role) a workload runs as**, which represents that workload's privileges. For systems requiring strong security, the amount of privilege for a workload should not be identified by a random string (i.e., service name, label, etc), or by the binary that is deployed.
+* A service account is **the identity (or role) a workload runs as**, which represents that workload's privileges. For systems requiring strong security, the
+amount of privilege for a workload should not be identified by a random string (i.e., service name, label, etc), or by the binary that is deployed.
- * For example, let's say we have a workload pulling data from a multi-tenant database. If Alice ran this workload, she will be able to pull a different set of data than if Bob ran this workload.
+ * For example, let's say we have a workload pulling data from a multi-tenant database. If Alice ran this workload, she will be able to pull
+ a different set of data than if Bob ran this workload.
-* Service accounts enable strong security policies by offering the flexibility to identify a machine, a user, a workload, or a group of workloads (different workloads can run as the same service account).
+* Service accounts enable strong security policies by offering the flexibility to identify a machine, a user, a workload, or a group of workloads (different
+workloads can run as the same service account).
-* The service account a workload runs as won't change during the lifetime of the workload.
+* The service account a workload runs as won't change during the lifetime of the workload.
-* Service account uniqueness can be ensured with domain name constraint
+* Service account uniqueness can be ensured with domain name constraint
### Communication security
Service-to-service communication is tunneled through the client side [Envoy](https://envoyproxy.github.io/envoy/) and the server side Envoy. End-to-end communication is secured by:
+* Local TCP connections between the service and Envoy
-* Local TCP connections between the service and Envoy
+* Mutual TLS connections between proxies
-* Mutual TLS connections between proxies
-
-* Secure Naming: during the handshake process, the client side Envoy checks that the service account provided by the server side certificate is allowed to run the target service
+* Secure Naming: during the handshake process, the client side Envoy checks that the service account provided by the server side certificate is allowed to run the target service
### Key management
-Istio v0.2 supports services running on both Kubernetes pods and VM/bare-metal machines. We use different key provisioning mechanisms for each scenario.
-
-For services running on Kubernetes pods, the per-cluster Istio CA (Certificate Authority) automates the key & certificate management process. It mainly performs four critical operations :
+Istio 0.2 supports services running on both Kubernetes pods and VM/bare-metal machines. We use different key provisioning mechanisms for each scenario.
+For services running on Kubernetes pods, the per-cluster Citadel (acting as Certificate Authority) automates the key & certificate management process. It mainly performs four critical operations:
-* Generate a [SPIFFE](https://spiffe.github.io/docs/svid) key and certificate pair for each service account
+* Generate a [SPIFFE](https://spiffe.github.io/docs/svid) key and certificate pair for each service account
-* Distribute a key and certificate pair to each pod according to the service account
+* Distribute a key and certificate pair to each pod according to the service account
-* Rotate keys and certificates periodically
+* Rotate keys and certificates periodically
-* Revoke a specific key and certificate pair when necessary
+* Revoke a specific key and certificate pair when necessary
-For services running on VM/bare-metal machines, the above four operations are performed by Istio CA together with node agents.
+For services running on VM/bare-metal machines, the above four operations are performed by Citadel together with node agents.
## Workflow
-The Istio Auth workflow consists of two phases, deployment and runtime. For the deployment phase, we discuss the two scenarios (i.e., in Kubernetes and VM/bare-metal machines) separately since they are different. Once the key and certificate are deployed, the runtime phase is the same for the two scenarios. We briefly cover the workflow in this section.
+The Istio Security workflow consists of two phases, deployment and runtime. For the deployment phase, we discuss the two
+scenarios (i.e., in Kubernetes and VM/bare-metal machines) separately since they are different. Once the key and
+certificate are deployed, the runtime phase is the same for the two scenarios. We briefly cover the workflow in this
+section.
### Deployment phase (Kubernetes Scenario)
+1. Citadel watches the Kubernetes API Server, creates a [SPIFFE](https://spiffe.github.io/docs/svid) key and certificate
+pair for each of the existing and new service accounts, and sends them to the API Server.
-1. Istio CA watches Kubernetes API Server, creates a [SPIFFE](https://spiffe.github.io/docs/svid) key and certificate pair for each of the existing and new service accounts, and sends them to API Server.
-
-1. When a pod is created, API Server mounts the key and certificate pair according to the service account using [Kubernetes secrets](https://kubernetes.io/docs/concepts/configuration/secret/).
+1. When a pod is created, API Server mounts the key and certificate pair according to the service account using [Kubernetes secrets](https://kubernetes.io/docs/concepts/configuration/secret/).
-1. [Pilot]({{home}}/docs/concepts/traffic-management/pilot.html) generates the config with proper key and certificate and secure naming information,
-which
- defines what service account(s) can run a certain service, and passes it to Envoy.
+1. [Pilot]({{home}}/docs/concepts/traffic-management/pilot.html) generates the config with proper key and certificate and secure naming information,
+which defines what service account(s) can run a certain service, and passes it to Envoy.
### Deployment phase (VM/bare-metal Machines Scenario)
+1. Citadel creates a gRPC service to take CSR request.
-1. Istio CA creates a gRPC service to take CSR request.
+1. Node agent creates the private key and CSR, sends the CSR to Citadel for signing.
-1. Node agent creates the private key and CSR, sends the CSR to Istio CA for signing.
+1. Citadel validates the credentials carried in the CSR, and signs the CSR to generate the certificate.
-1. Istio CA validates the credentials carried in the CSR, and signs the CSR to generate the certificate.
-
-1. Node agent puts the certificate received from CA and the private key to Envoy.
-
-1. The above CSR process repeats periodically for rotation.
+1. Node agent puts the certificate received from Citadel and the private key to Envoy.
+1. The above CSR process repeats periodically for rotation.
### Runtime phase
+1. The outbound traffic from a client service is rerouted to its local Envoy.
+1. The client side Envoy starts a mutual TLS handshake with the server side Envoy. During the handshake, it also does a secure naming check to verify that the service account presented in the server certificate can run the server service.
-1. The outbound traffic from a client service is rerouted to its local Envoy.
-
-1. The client side Envoy starts a mutual TLS handshake with the server side Envoy. During the handshake, it also does a secure naming check to verify that the service account presented in the server certificate can run the server service.
-
-1. The traffic is forwarded to the server side Envoy after mTLS connection is established, which is then forwarded to the server service through local TCP connections.
+1. The traffic is forwarded to the server side Envoy after mTLS connection is established, which is then forwarded to the server service through local TCP connections.
## Best practices
-In this section, we provide a few deployment guidelines and then discuss a real-world scenario.
+In this section, we provide a few deployment guidelines and then discuss a real-world scenario.
### Deployment guidelines
+* If there are multiple service operators (a.k.a. [SREs](https://en.wikipedia.org/wiki/Site_reliability_engineering)) deploying different services in a cluster (typically in a medium- or large-size cluster), we recommend creating a separate [namespace](https://kubernetes.io/docs/tasks/administer-cluster/namespaces-walkthrough/) for each SRE team to isolate their access. For example, you could create a "team1-ns" namespace for team1, and "team2-ns" namespace for team2, such that both teams won't be able to access each other's services.
-
-* If there are multiple service operators (a.k.a. [SREs](https://en.wikipedia.org/wiki/Site_reliability_engineering)) deploying different services in a cluster (typically in a medium- or large-size cluster), we recommend creating a separate [namespace](https://kubernetes.io/docs/tasks/administer-cluster/namespaces-walkthrough/) for each SRE team to isolate their access. For example, you could create a "team1-ns" namespace for team1, and "team2-ns" namespace for team2, such that both teams won't be able to access each other's services.
-
-* If Istio CA is compromised, all its managed keys and certificates in the cluster may be exposed. We *strongly* recommend running Istio CA on a dedicated namespace (for example, istio-ca-ns), which only cluster admins have access to.
+* If Citadel is compromised, all its managed keys and certificates in the cluster may be exposed. We *strongly* recommend running Citadel
+on a dedicated namespace (for example, istio-citadel-ns), which only cluster admins have access to.
### Example
Let's consider a 3-tier application with three services: photo-frontend, photo-backend, and datastore. Photo-frontend and photo-backend services are managed by the photo SRE team while the datastore service is managed by the datastore SRE team. Photo-frontend can access photo-backend, and photo-backend can access datastore. However, photo-frontend cannot access datastore.
-In this scenario, a cluster admin creates 3 namespaces: istio-ca-ns, photo-ns, and datastore-ns. Admin has access to all namespaces, and each team only has
-access to its own namespace. The photo SRE team creates 2 service accounts to run photo-frontend and photo-backend respectively in namespace photo-ns. The
-datastore SRE team creates 1 service account to run the datastore service in namespace datastore-ns. Moreover, we need to enforce the service access control
+In this scenario, a cluster admin creates 3 namespaces: istio-citadel-ns, photo-ns, and datastore-ns. Admin has access to all namespaces, and each team only has
+access to its own namespace. The photo SRE team creates 2 service accounts to run photo-frontend and photo-backend respectively in namespace photo-ns. The
+datastore SRE team creates 1 service account to run the datastore service in namespace datastore-ns. Moreover, we need to enforce the service access control
in [Istio Mixer]({{home}}/docs/concepts/policy-and-control/mixer.html) such that photo-frontend cannot access datastore.
-In this setup, Istio CA is able to provide keys and certificates management for all namespaces, and isolate microservice deployments from each other.
+In this setup, Citadel is able to provide keys and certificates management for all namespaces, and isolate
+microservice deployments from each other.
## Future work
-* Inter-cluster service-to-service authentication
+* Inter-cluster service-to-service authentication
-* Powerful authorization mechanisms: ABAC, RBAC, etc
+* Powerful authorization mechanisms: ABAC, RBAC, etc
-* Per-service auth enablement support
+* Per-service auth enablement support
-* Secure Istio components (Mixer, Pilot)
+* Secure Istio components (Mixer, Pilot)
-* End-user to service authentication using JWT/OAuth2/OpenID_Connect.
+* End-user to service authentication using JWT/OAuth2/OpenID_Connect.
-* Support GCP service account
+* Support GCP service account
-* Unix domain socket for local communication between service and Envoy
+* Unix domain socket for local communication between service and Envoy
-* Middle proxy support
+* Middle proxy support
-* Pluggable key management component
+* Pluggable key management component
diff --git a/_docs/concepts/security/rbac.md b/_docs/concepts/security/rbac.md
index 05ae247fec744..ead114865c56a 100644
--- a/_docs/concepts/security/rbac.md
+++ b/_docs/concepts/security/rbac.md
@@ -1,39 +1,41 @@
---
title: Istio Role-Based Access Control (RBAC)
-overview: Describes Istio RBAC which provides access control for services in Istio Mesh.
-order: 10
+description: Describes Istio RBAC which provides access control for services in Istio Mesh.
+weight: 20
-layout: docs
-type: markdown
---
{% include home.html %}
## Overview
-Istio Role-Based Access Control (RBAC) provides namespace-level, service-level, method-level access control for services in Istio Mesh.
+
+Istio Role-Based Access Control (RBAC) provides namespace-level, service-level, method-level access control for services in the Istio Mesh.
It features:
+
* Role-Based semantics, which is simple and easy to use.
+
* Service-to-service and endUser-to-Service authorization.
+
* Flexibility through custom properties support in roles and role-bindings.
## Architecture
-The diagram below shows Istio RBAC architecture. The admins specify Istio RBAC policies. The policies are saved in Istio config store.
+The diagram below shows the Istio RBAC architecture. Operators specify Istio RBAC policies. The policies are saved in
+the Istio config store.
-{% include figure.html width='80%' ratio='56.25%'
- img='./img/IstioRBAC.svg'
- alt='Istio RBAC'
- title='Istio RBAC Architecture'
- caption='Istio RBAC Architecture'
+{% include image.html width="80%" ratio="56.25%"
+ link="./img/IstioRBAC.svg"
+ alt="Istio RBAC"
+ caption="Istio RBAC Architecture"
%}
-Istio RBAC engine does two things:
+The Istio RBAC engine does two things:
* **Fetch RBAC policy.** Istio RBAC engine watches for changes on RBAC policy. It fetches the updated RBAC policy if it sees any changes.
* **Authorize Requests.** At runtime, when a request comes, the request context is passed to Istio RBAC engine. RBAC engine evaluates the
request context against the RBAC policies, and returns the authorization result (ALLOW or DENY).
-### Request Context
+### Request context
-In the current release, Istio RBAC engine is implemented as a [Mixer adapter]({{home}}/docs/concepts/policy-and-control/mixer.html#adapters).
+In the current release, the Istio RBAC engine is implemented as a [Mixer adapter]({{home}}/docs/concepts/policy-and-control/mixer.html#adapters).
The request context is provided as an instance of the
[authorization template](https://github.com/istio/istio/blob/master/mixer/template/authorization/template.proto). The request context
contains all the information about the request and the environment that an authorization module needs to know. In particular, it has two parts:
@@ -44,180 +46,206 @@ or any additional properties about the subject such as namespace, service name.
and any additional properties about the action.
Below we show an example "requestcontext".
-```rule
- apiVersion: "config.istio.io/v1alpha2"
- kind: authorization
- metadata:
- name: requestcontext
- namespace: istio-system
- spec:
- subject:
- user: request.auth.principal | ""
- groups: request.auth.principal | ""
- properties:
- service: source.service | ""
- namespace: source.namespace | ""
- action:
- namespace: destination.namespace | ""
- service: destination.service | ""
- method: request.method | ""
- path: request.path | ""
- properties:
- version: request.headers["version"] | ""
+```yaml
+apiVersion: "config.istio.io/v1alpha2"
+kind: authorization
+metadata:
+ name: requestcontext
+ namespace: istio-system
+spec:
+ subject:
+ user: source.user | ""
+ groups: ""
+ properties:
+ service: source.service | ""
+ namespace: source.namespace | ""
+ action:
+ namespace: destination.namespace | ""
+ service: destination.service | ""
+ method: request.method | ""
+ path: request.path | ""
+ properties:
+ version: request.headers["version"] | ""
```
-## Istio RBAC Policy
+## Istio RBAC policy
-Istio RBAC introduces ServiceRole and ServiceRoleBinding, both of which are defined as Kubernetes CustomResourceDefinition (CRD) objects.
+Istio RBAC introduces `ServiceRole` and `ServiceRoleBinding`, both of which are defined as Kubernetes CustomResourceDefinition (CRD) objects.
-* **ServiceRole** defines a role for access to services in the mesh.
-* **ServiceRoleBinding** grants a role to subjects (e.g., a user, a group, a service).
+* **`ServiceRole`** defines a role for access to services in the mesh.
+* **`ServiceRoleBinding`** grants a role to subjects (e.g., a user, a group, a service).
-### ServiceRole
+### `ServiceRole`
-A ServiceRole specification includes a list of rules. Each rule has the following standard fields:
+A `ServiceRole` specification includes a list of rules. Each rule has the following standard fields:
* **services**: A list of service names, which are matched against the `action.service` field of the "requestcontext".
* **methods**: A list of method names which are matched against the `action.method` field of the "requestcontext". In the above "requestcontext",
this is the HTTP or gRPC method. Note that gRPC methods are formatted in the form of "packageName.serviceName/methodName" (case sensitive).
* **paths**: A list of HTTP paths which are matched against the `action.path` field of the "requestcontext". It is ignored in gRPC case.
-A ServiceRole specification only applies to the **namespace** specified in `"metadata"` section. The "services" and "methods" are required
+A `ServiceRole` specification only applies to the **namespace** specified in `"metadata"` section. The "services" and "methods" are required
fields in a rule. "paths" is optional. If not specified or set to "*", it applies to "any" instance.
Here is an example of a simple role "service-admin", which has full access to all services in "default" namespace.
-```rule
- apiVersion: "config.istio.io/v1alpha2"
- kind: ServiceRole
- metadata:
- name: service-admin
- namespace: default
- spec:
- rules:
- - services: ["*"]
- methods: ["*"]
+```yaml
+apiVersion: "config.istio.io/v1alpha2"
+kind: ServiceRole
+metadata:
+ name: service-admin
+ namespace: default
+spec:
+ rules:
+ - services: ["*"]
+ methods: ["*"]
```
Here is another role "products-viewer", which has read ("GET" and "HEAD") access to service "products.default.svc.cluster.local"
in "default" namespace.
-```rule
- apiVersion: "config.istio.io/v1alpha2"
- kind: ServiceRole
- metadata:
- name: products-viewer
- namespace: default
- spec:
- rules:
- - services: ["products.default.svc.cluster.local"]
- methods: ["GET", "HEAD"]
+```yaml
+apiVersion: "config.istio.io/v1alpha2"
+kind: ServiceRole
+metadata:
+ name: products-viewer
+ namespace: default
+spec:
+ rules:
+ - services: ["products.default.svc.cluster.local"]
+ methods: ["GET", "HEAD"]
```
-In addition, we support **prefix match** and **suffix match** for all the fields in a rule. For example, you can define a "tester" role that
+In addition, we support **prefix matching** and **suffix matching** for all the fields in a rule. For example, you can define a "tester" role that
has the following permissions in "default" namespace:
* Full access to all services with prefix "test-" (e.g, "test-bookstore", "test-performance", "test-api.default.svc.cluster.local").
* Read ("GET") access to all paths with "/reviews" suffix (e.g, "/books/reviews", "/events/booksale/reviews", "/reviews")
in service "bookstore.default.svc.cluster.local".
-```rule
- apiVersion: "config.istio.io/v1alpha2"
- kind: ServiceRole
- metadata:
- name: tester
- namespace: default
- spec:
- rules:
- - services: ["test-*"]
- methods: ["*"]
- - services: ["bookstore.default.svc.cluster.local"]
- paths: ["*/reviews"]
- methods: ["GET"]
+```yaml
+apiVersion: "config.istio.io/v1alpha2"
+kind: ServiceRole
+metadata:
+ name: tester
+ namespace: default
+spec:
+ rules:
+ - services: ["test-*"]
+ methods: ["*"]
+ - services: ["bookstore.default.svc.cluster.local"]
+ paths: ["*/reviews"]
+ methods: ["GET"]
```
-In ServiceRole, the combination of "namespace"+"services"+"paths"+"methods" defines "how a service (services) is allowed to be accessed".
+In `ServiceRole`, the combination of "namespace"+"services"+"paths"+"methods" defines "how a service (services) is allowed to be accessed".
In some situations, you may need to specify additional constraints that a rule applies to. For example, a rule may only applies to a
certain "version" of a service, or only applies to services that are labeled "foo". You can easily specify these constraints using
custom fields.
-For example, the following ServiceRole definition extends the previous "products-viewer" role by adding a constraint on service "version"
+For example, the following `ServiceRole` definition extends the previous "products-viewer" role by adding a constraint on service "version"
being "v1" or "v2". Note that the "version" property is provided by `"action.properties.version"` in "requestcontext".
-```rule
- apiVersion: "config.istio.io/v1alpha2"
- kind: ServiceRole
- metadata:
- name: products-viewer-version
- namespace: default
- spec:
- rules:
- - services: ["products.default.svc.cluster.local"]
- methods: ["GET", "HEAD"]
- constraints:
- - key: "version"
- values: ["v1", "v2"]
+```yaml
+apiVersion: "config.istio.io/v1alpha2"
+kind: ServiceRole
+metadata:
+ name: products-viewer-version
+ namespace: default
+spec:
+ rules:
+ - services: ["products.default.svc.cluster.local"]
+ methods: ["GET", "HEAD"]
+ constraints:
+ - key: "version"
+ values: ["v1", "v2"]
```
-### ServiceRoleBinding
+### `ServiceRoleBinding`
-A ServiceRoleBinding specification includes two parts:
-* **roleRef** refers to a ServiceRole object **in the same namespace**.
+A `ServiceRoleBinding` specification includes two parts:
+* **roleRef** refers to a `ServiceRole` resource **in the same namespace**.
* A list of **subjects** that are assigned the role.
A subject can either be a "user", or a "group", or is represented with a set of "properties". Each entry ("user" or "group" or an entry
in "properties") must match one of fields ("user" or "groups" or an entry in "properties") in the "subject" part of the "requestcontext"
instance.
-Here is an example of ServiceRoleBinding object "test-binding-products", which binds two subjects to ServiceRole "product-viewer":
+Here is an example of `ServiceRoleBinding` resource "test-binding-products", which binds two subjects to ServiceRole "product-viewer":
* user "alice@yahoo.com".
* "reviews.abc.svc.cluster.local" service in "abc" namespace.
-```rule
- apiVersion: "config.istio.io/v1alpha2"
- kind: ServiceRoleBinding
- metadata:
- name: test-binding-products
- namespace: default
- spec:
- subjects:
- - user: "alice@yahoo.com"
- - properties:
- service: "reviews.abc.svc.cluster.local"
- namespace: "abc"
- roleRef:
- kind: ServiceRole
- name: "products-viewer"
+```yaml
+apiVersion: "config.istio.io/v1alpha2"
+kind: ServiceRoleBinding
+metadata:
+ name: test-binding-products
+ namespace: default
+spec:
+ subjects:
+ - user: "alice@yahoo.com"
+ - properties:
+ service: "reviews.abc.svc.cluster.local"
+ namespace: "abc"
+ roleRef:
+ kind: ServiceRole
+ name: "products-viewer"
+```
+
+In the case that you want to make a service(s) publicly accessible, you can use set the subject to `user: "*"`. This will assign a `ServiceRole`
+to all users/services.
+
+```yaml
+apiVersion: "config.istio.io/v1alpha2"
+kind: ServiceRoleBinding
+metadata:
+ name: binding-products-allusers
+ namespace: default
+spec:
+ subjects:
+ - user: "*"
+ roleRef:
+ kind: ServiceRole
+ name: "products-viewer"
```
## Enabling Istio RBAC
Istio RBAC can be enabled by adding the following Mixer adapter rule. The rule has two parts. The first part defines a RBAC handler.
-The `"config_store_url"` parameter specifies where RBAC engine fetches RBAC policies. The default value for "config_store_url" is `"k8s://"`,
-which means Kubernetes API server. Alternatively, if you are testing RBAC policy locally, you may set it to a local directory such as
-`"fs:///tmp/testdata/configroot"`.
+It has two parameters, `"config_store_url"` and `"cache_duration"`.
+* The `"config_store_url"` parameter specifies where RBAC engine fetches RBAC policies. The default value for `"config_store_url"` is
+`"k8s://"`, which means Kubernetes API server. Alternatively, if you are testing RBAC policy locally, you may set it to a local directory
+such as `"fs:///tmp/testdata/configroot"`.
+* The `"cache_duration"` parameter specifies the duration for which the authorization results may be cached on Mixer client (i.e., Istio proxy).
+The default value for `"cache_duration"` is 1 minute.
The second part defines a rule, which specifies that the RBAC handler should be invoked with the "requestcontext" instance [defined
earlier in the document](#request-context).
-```rule
- apiVersion: "config.istio.io/v1alpha2"
- kind: rbac
- metadata:
- name: handler
- namespace: istio-system
- spec:
- config_store_url: "k8s://"
-
- ---
- apiVersion: "config.istio.io/v1alpha2"
- kind: rule
- metadata:
- name: rbaccheck
- namespace: istio-system
- spec:
- actions:
- # handler and instance names default to the rule's namespace.
- - handler: handler.rbac
- instances:
- - requestcontext.authorization
- ---
+In the following example, Istio RBAC is enabled for "default" namespace. And the cache duration is set to 30 seconds.
+
+```yaml
+apiVersion: "config.istio.io/v1alpha2"
+kind: rbac
+metadata:
+ name: handler
+ namespace: istio-system
+spec:
+ config_store_url: "k8s://"
+ cache_duration: "30s"
+---
+apiVersion: "config.istio.io/v1alpha2"
+kind: rule
+metadata:
+ name: rbaccheck
+ namespace: istio-system
+spec:
+ match: destination.namespace == "default"
+ actions:
+ # handler and instance names default to the rule's namespace.
+ - handler: handler.rbac
+ instances:
+ - requestcontext.authorization
```
+
+## What's next
+
+Try out the [Istio RBAC with Bookinfo]({{home}}/docs/tasks/security/role-based-access-control.html) sample.
diff --git a/_docs/concepts/traffic-management/fault-injection.md b/_docs/concepts/traffic-management/fault-injection.md
index bfde97b87be7a..b8b11b5b31e93 100644
--- a/_docs/concepts/traffic-management/fault-injection.md
+++ b/_docs/concepts/traffic-management/fault-injection.md
@@ -1,14 +1,12 @@
---
title: Fault Injection
-overview: Introduces the idea of systematic fault injection that can be used to uncover conflicting failure recovery policies across services.
-
-order: 40
+description: Introduces the idea of systematic fault injection that can be used to uncover conflicting failure recovery policies across services.
+
+weight: 40
-layout: docs
-type: markdown
toc: false
---
-
+
While Envoy sidecar/proxy provides a host of
[failure recovery mechanisms](./handling-failures.html) to services running
on Istio, it is still
@@ -25,12 +23,12 @@ regardless of network level failures, and that more meaningful failures can
be injected at the application layer (e.g., HTTP error codes) to exercise
the resilience of an application.
-Operators can configure faults to be injected into requests that match
+Operators can configure faults to be injected into requests that match
specific criteria. Operators can further restrict the percentage of
requests that should be subjected to faults. Two types of faults can be
injected: delays and aborts. Delays are timing failures, mimicking
increased network latency, or an overloaded upstream service. Aborts are
-crash failures that mimick failures in upstream services. Aborts usually
+crash failures that mimic failures in upstream services. Aborts usually
manifest in the form of HTTP error codes, or TCP connection failures.
-Refer to [Istio's traffic management rules](./rules-configuration.html) for more details.
\ No newline at end of file
+Refer to [Istio's traffic management rules](./rules-configuration.html) for more details.
diff --git a/_docs/concepts/traffic-management/handling-failures.md b/_docs/concepts/traffic-management/handling-failures.md
index b4e97c6f526f7..fb619e6b17870 100644
--- a/_docs/concepts/traffic-management/handling-failures.md
+++ b/_docs/concepts/traffic-management/handling-failures.md
@@ -1,11 +1,9 @@
---
title: Handling Failures
-overview: An overview of failure recovery capabilities in Envoy that can be leveraged by unmodified applications to improve robustness and prevent cascading failures.
+description: An overview of failure recovery capabilities in Envoy that can be leveraged by unmodified applications to improve robustness and prevent cascading failures.
-order: 30
+weight: 30
-layout: docs
-type: markdown
---
{% include home.html %}
@@ -14,18 +12,22 @@ that can be taken advantage of by the services in an application. Features
include:
1. Timeouts
-2. Bounded retries with timeout budgets and variable jitter between retries
-3. Limits on number of concurrent connections and requests to upstream services
-4. Active (periodic) health checks on each member of the load balancing pool
-5. Fine-grained circuit breakers (passive health checks) -- applied per
- instance in the load balancing pool
+
+1. Bounded retries with timeout budgets and variable jitter between retries
+
+1. Limits on number of concurrent connections and requests to upstream services
+
+1. Active (periodic) health checks on each member of the load balancing pool
+
+1. Fine-grained circuit breakers (passive health checks) -- applied per
+instance in the load balancing pool
These features can be dynamically configured at runtime through
[Istio's traffic management rules](./rules-configuration.html).
The jitter between retries minimizes the impact of retries on an overloaded
upstream service, while timeout budgets ensure that the calling service
-gets a response (success/failure) within a predictable timeframe.
+gets a response (success/failure) within a predictable time frame.
A combination of active and passive health checks (4 and 5 above)
minimizes the chances of accessing an unhealthy instance in the load
@@ -37,7 +39,7 @@ mesh, minimizing the request failures and impact on latency.
Together, these features enable the service mesh to tolerate failing nodes
and prevent localized failures from cascading instability to other nodes.
-## Fine tuning
+## Fine tuning
Istio's traffic management rules allow
operators to set global defaults for failure recovery per
@@ -46,13 +48,12 @@ service/version. However, consumers of a service can also override
and
[retry]({{home}}/docs/reference/config/istio.routing.v1alpha1.html#HTTPRetry)
defaults by providing request-level overrides through special HTTP headers.
-With the Envoy proxy implementation, the headers are "x-envoy-upstream-rq-timeout-ms" and
-"x-envoy-max-retries", respectively.
-
+With the Envoy proxy implementation, the headers are `x-envoy-upstream-rq-timeout-ms` and
+`x-envoy-max-retries`, respectively.
## FAQ
-_1. Do applications still handle failures when running in Istio?_
+Q: *Do applications still handle failures when running in Istio?*
Yes. Istio improves the reliability and availability of services in the
mesh. However, **applications need to handle the failure (errors)
@@ -61,15 +62,15 @@ a load balancing pool have failed, Envoy will return HTTP 503. It is the
responsibility of the application to implement any fallback logic that is
needed to handle the HTTP 503 error code from an upstream service.
-_2. Will Envoy's failure recovery features break applications that already
-use fault tolerance libraries (e.g., [Hystrix](https://github.com/Netflix/Hystrix))?_
+Q: *Will Envoy's failure recovery features break applications that already
+use fault tolerance libraries (e.g. [Hystrix](https://github.com/Netflix/Hystrix))?*
No. Envoy is completely transparent to the application. A failure response
returned by Envoy would not be distinguishable from a failure response
returned by the upstream service to which the call was made.
-_3. How will failures be handled when using application-level libraries and
-Envoy at the same time?_
+Q: *How will failures be handled when using application-level libraries and
+Envoy at the same time?*
Given two failure recovery policies for the same destination service (e.g.,
two timeouts -- one set in Envoy and another in application's library), **the
diff --git a/_docs/concepts/traffic-management/index.md b/_docs/concepts/traffic-management/index.md
index 4940f53f0ac21..db6114be6502e 100644
--- a/_docs/concepts/traffic-management/index.md
+++ b/_docs/concepts/traffic-management/index.md
@@ -1,11 +1,9 @@
---
title: Traffic Management
-overview: Describes the various Istio features focused on traffic routing and control.
+description: Describes the various Istio features focused on traffic routing and control.
-order: 20
+weight: 20
-layout: docs
-type: markdown
toc: false
---
diff --git a/_docs/concepts/traffic-management/load-balancing.md b/_docs/concepts/traffic-management/load-balancing.md
index c7525614fca23..ead3ffdb37591 100644
--- a/_docs/concepts/traffic-management/load-balancing.md
+++ b/_docs/concepts/traffic-management/load-balancing.md
@@ -1,11 +1,9 @@
---
title: Discovery & Load Balancing
-overview: Describes how traffic is load balanced across instances of a service in the mesh.
-
-order: 25
+description: Describes how traffic is load balanced across instances of a service in the mesh.
+
+weight: 25
-layout: docs
-type: markdown
toc: false
---
@@ -22,14 +20,12 @@ applications.
**Service Discovery:** Pilot consumes information from the service
registry and provides a platform-agnostic service discovery
-interface. Envoy instances in the mesh perform service discovery and
+interface. Envoy instances in the mesh perform service discovery and
dynamically update their load balancing pools accordingly.
-{% include figure.html width='80%' ratio='74.79%'
- img='./img/pilot/LoadBalancing.svg'
- alt='Discovery and Load Balancing'
- title='Discovery and Load Balancing'
- caption='Discovery and Load Balancing'
+{% include image.html width="80%" ratio="74.79%"
+ link="./img/pilot/LoadBalancing.svg"
+ caption="Discovery and Load Balancing"
%}
As illustrated in the figure above, services in the mesh access each other
@@ -40,7 +36,6 @@ the load balancing pool. While Envoy supports several
Istio currently allows three load balancing modes:
round robin, random, and weighted least request.
-
In addition to load balancing, Envoy periodically checks the health of each
instance in the pool. Envoy follows a circuit breaker style pattern to
classify instances as unhealthy or healthy based on their failure rates for
diff --git a/_docs/concepts/traffic-management/overview.md b/_docs/concepts/traffic-management/overview.md
index e2002fd2b724e..8497c2867fc21 100644
--- a/_docs/concepts/traffic-management/overview.md
+++ b/_docs/concepts/traffic-management/overview.md
@@ -1,13 +1,13 @@
---
title: Overview
-overview: Provides a conceptual overview of traffic management in Istio and the features it enables.
-
-order: 0
+description: Provides a conceptual overview of traffic management in Istio and the features it enables.
+
+weight: 1
-layout: docs
-type: markdown
---
+{% include home.html %}
+
This page provides an overview of how traffic management works
in Istio, including the benefits of its traffic management
principles. It assumes that you've already read [What Is Istio?]({{home}}/docs/concepts/what-is-istio/overview.html)
@@ -43,11 +43,9 @@ of traffic for a particular service to go to a canary version irrespective
of the size of the canary deployment, or send traffic to a particular version
depending on the content of the request.
-{% include figure.html width='85%' ratio='69.52%'
- img='./img/pilot/TrafficManagementOverview.svg'
- alt='Traffic Management with Istio'
- title='Traffic Management with Istio'
- caption='Traffic Management with Istio'
+{% include image.html width="85%" ratio="69.52%"
+ link="./img/pilot/TrafficManagementOverview.svg"
+ caption="Traffic Management with Istio"
%}
Decoupling traffic flow from infrastructure scaling like this allows Istio
diff --git a/_docs/concepts/traffic-management/pilot.md b/_docs/concepts/traffic-management/pilot.md
index a2e2366908043..13c65c745f712 100644
--- a/_docs/concepts/traffic-management/pilot.md
+++ b/_docs/concepts/traffic-management/pilot.md
@@ -1,23 +1,21 @@
---
title: Pilot
-overview: Introduces Pilot, the component responsible for managing a distributed deployment of Envoy proxies in the service mesh.
-
-order: 10
+description: Introduces Pilot, the component responsible for managing a distributed deployment of Envoy proxies in the service mesh.
+
+weight: 10
-layout: docs
-type: markdown
toc: false
+redirect_from: /docs/concepts/traffic-management/manager.html
---
{% include home.html %}
Pilot is responsible for the lifecycle of Envoy instances deployed
across the Istio service mesh.
-{% include figure.html width='60%' ratio='72.17%'
- img='./img/pilot/PilotAdapters.svg'
+{% include image.html width="60%" ratio="72.17%"
+ link="./img/pilot/PilotAdapters.svg"
alt="Pilot's overall architecture."
- title='Pilot Architecture'
- caption='Pilot Architecture'
+ caption="Pilot Architecture"
%}
As illustrated in the figure above, Pilot maintains a canonical
@@ -36,6 +34,6 @@ and [routing tables](https://www.envoyproxy.io/docs/envoy/latest/configuration/h
These APIs decouple Envoy from platform-specific nuances, simplifying the
design and increasing portability across platforms.
-Operators can specify high-level traffic management rules through
+Operators can specify high-level traffic management rules through
[Pilot's Rules API]({{home}}/docs/reference/config/istio.routing.v1alpha1.html). These rules are translated into low-level
configurations and distributed to Envoy instances via the discovery API.
diff --git a/_docs/concepts/traffic-management/request-routing.md b/_docs/concepts/traffic-management/request-routing.md
index 7c318c44a0c96..01f12851dbcfd 100644
--- a/_docs/concepts/traffic-management/request-routing.md
+++ b/_docs/concepts/traffic-management/request-routing.md
@@ -1,12 +1,11 @@
---
title: Request Routing
-overview: Describes how requests are routed between services in an Istio service mesh.
-
-order: 20
+description: Describes how requests are routed between services in an Istio service mesh.
+
+weight: 20
-layout: docs
-type: markdown
---
+{% include home.html %}
This page describes how requests are routed between services in an Istio service mesh.
@@ -20,7 +19,6 @@ etc.). Platform-specific adapters are responsible for populating the
internal model representation with various fields from the metadata found
in the platform.
-
Istio introduces the concept of a service version, which is a finer-grained
way to subdivide service instances by versions (`v1`, `v2`) or environment
(`staging`, `prod`). These variants are not necessarily different API
@@ -32,11 +30,10 @@ additional control over traffic between services.
## Communication between services
-{% include figure.html width='60%' ratio='100.42%'
- img='./img/pilot/ServiceModel_Versions.svg'
- alt='Showing how service versions are handled.'
- title='Service Versions'
- caption='Service Versions'
+{% include image.html width="60%" ratio="100.42%"
+ link="./img/pilot/ServiceModel_Versions.svg"
+ alt="Showing how service versions are handled."
+ caption="Service Versions"
%}
As illustrated in the figure above, clients of a service have no knowledge
@@ -73,10 +70,9 @@ via the sidecar Envoy, operators can add failure recovery features such as
timeouts, retries, circuit breakers, etc., and obtain detailed metrics on
the connections to these services.
-{% include figure.html width='60%' ratio='28.88%'
- img='./img/pilot/ServiceModel_RequestFlow.svg'
- alt='Ingress and Egress through Envoy.'
- title='Request Flow'
- caption='Request Flow'
+{% include image.html width="60%" ratio="28.88%"
+ link="./img/pilot/ServiceModel_RequestFlow.svg"
+ alt="Ingress and Egress through Envoy."
+ caption="Request Flow"
%}
diff --git a/_docs/concepts/traffic-management/rules-configuration.md b/_docs/concepts/traffic-management/rules-configuration.md
index 86892827eeb34..4dc57624fd621 100644
--- a/_docs/concepts/traffic-management/rules-configuration.md
+++ b/_docs/concepts/traffic-management/rules-configuration.md
@@ -1,11 +1,9 @@
---
title: Rules Configuration
-overview: Provides a high-level overview of the domain-specific language used by Istio to configure traffic management rules in the service mesh.
+description: Provides a high-level overview of the domain-specific language used by Istio to configure traffic management rules in the service mesh.
-order: 50
+weight: 50
-layout: docs
-type: markdown
---
{% include home.html %}
@@ -37,7 +35,7 @@ spec:
The destination is the name of the service to which the traffic is being
routed. The route *labels* identify the specific service instances that will
-recieve traffic. For example, in a Kubernetes deployment of Istio, the route
+receive traffic. For example, in a Kubernetes deployment of Istio, the route
*label* "version: v1" indicates that only pods containing the label "version: v1"
will receive traffic.
@@ -76,7 +74,7 @@ domain name (FQDN). It is used by Istio Pilot for matching rules to services.
Normally, the FQDN of the service is composed from three components: *name*,
*namespace*, and *domain*:
-```
+```plain
FQDN = name + "." + namespace + "." + domain
```
@@ -409,7 +407,8 @@ spec:
match:
request:
headers:
- Foo: bar
+ Foo:
+ exact: bar
route:
- labels:
version: v2
diff --git a/_docs/concepts/what-is-istio/goals.md b/_docs/concepts/what-is-istio/goals.md
index 8172c5c32e0c5..8d055ce6f2633 100644
--- a/_docs/concepts/what-is-istio/goals.md
+++ b/_docs/concepts/what-is-istio/goals.md
@@ -1,38 +1,36 @@
---
title: Design Goals
-overview: Describes the core principles that Istio's design adheres to.
-
-order: 20
+description: Describes the core principles that Istio's design adheres to.
+
+weight: 20
-layout: docs
-type: markdown
---
This page outlines the core principles that guide Istio's design.
-Istio’s architecture is informed by a few key design goals that are essential to making the system capable of dealing with services at scale and with high
+Istio’s architecture is informed by a few key design goals that are essential to making the system capable of dealing with services at scale and with high
performance.
- **Maximize Transparency**.
-To adopt Istio, an operator or developer should be required to do the minimum amount of work possible to get real value from the system. To this end, Istio
-can automatically inject itself into all the network paths between services. Istio uses sidecar proxies to capture traffic, and where possible, automatically
-program the networking layer to route traffic through those proxies without any changes to the deployed application code. In Kubernetes, the proxies are
-injected into pods and traffic is captured by programming iptables rules. Once the sidecar proxies are injected and traffic routing is programmed, Istio is
-able to mediate all traffic. This principle also applies to performance. When applying Istio to a deployment, operators should see a minimal increase in
-resource costs for the
+To adopt Istio, an operator or developer should be required to do the minimum amount of work possible to get real value from the system. To this end, Istio
+can automatically inject itself into all the network paths between services. Istio uses sidecar proxies to capture traffic, and where possible, automatically
+program the networking layer to route traffic through those proxies without any changes to the deployed application code. In Kubernetes, the proxies are
+injected into pods and traffic is captured by programming iptables rules. Once the sidecar proxies are injected and traffic routing is programmed, Istio is
+able to mediate all traffic. This principle also applies to performance. When applying Istio to a deployment, operators should see a minimal increase in
+resource costs for the
functionality being provided. Components and APIs must all be designed with performance and scale in mind.
- **Incrementality**.
-As operators and developers become more dependent on the functionality that Istio provides, the system must grow with their needs. While we expect to
-continue adding new features ourselves, we expect the greatest need will be the ability to extend the policy system, to integrate with other sources of policy and control and to propagate signals about mesh behavior to other systems for analysis. The policy runtime supports a standard extension mechanism for plugging in other services. In addition, it allows for the extension of its vocabulary to allow policies to be enforced based on new signals that the mesh produces.
+As operators and developers become more dependent on the functionality that Istio provides, the system must grow with their needs. While we expect to
+continue adding new features ourselves, we expect the greatest need will be the ability to extend the policy system, to integrate with other sources of policy and control and to propagate signals about mesh behavior to other systems for analysis. The policy runtime supports a standard extension mechanism for plugging in other services. In addition, it allows for the extension of its vocabulary to allow policies to be enforced based on new signals that the mesh produces.
- **Portability**.
-The ecosystem in which Istio will be used varies along many dimensions. Istio must run on any cloud or on-prem environment with minimal effort. The task of
-porting Istio-based services to new environments should be trivial, and it should be possible to operate a single service deployed into multiple
+The ecosystem in which Istio will be used varies along many dimensions. Istio must run on any cloud or on-prem environment with minimal effort. The task of
+porting Istio-based services to new environments should be trivial, and it should be possible to operate a single service deployed into multiple
environments (on multiple clouds for redundancy for example) using Istio.
- **Policy Uniformity**.
-The application of policy to API calls between services provides a great deal of control over mesh behavior, but it can be equally important to apply
-policies to resources which are not necessarily expressed at the API level. For example, applying quota to the amount of CPU consumed by an ML training task
-is more useful than applying quota to the call which initiated the work. To this end, the policy system is maintained as a distinct service with its own API
+The application of policy to API calls between services provides a great deal of control over mesh behavior, but it can be equally important to apply
+policies to resources which are not necessarily expressed at the API level. For example, applying quota to the amount of CPU consumed by an ML training task
+is more useful than applying quota to the call which initiated the work. To this end, the policy system is maintained as a distinct service with its own API
rather than being baked into the proxy/sidecar, allowing services to directly integrate with it as needed.
diff --git a/_docs/concepts/what-is-istio/img/overview/arch.svg b/_docs/concepts/what-is-istio/img/overview/arch.svg
index e885cf8898531..87b0f52e20f4b 100644
--- a/_docs/concepts/what-is-istio/img/overview/arch.svg
+++ b/_docs/concepts/what-is-istio/img/overview/arch.svg
@@ -1,4 +1 @@
-
-
-
-
+
\ No newline at end of file
diff --git a/_docs/concepts/what-is-istio/index.md b/_docs/concepts/what-is-istio/index.md
index f8b57d6864351..300b912a04730 100644
--- a/_docs/concepts/what-is-istio/index.md
+++ b/_docs/concepts/what-is-istio/index.md
@@ -1,11 +1,9 @@
---
title: What is Istio?
-overview: A broad overview of the Istio system.
+description: A broad overview of the Istio system.
-order: 10
+weight: 10
-layout: docs
-type: markdown
toc: false
---
diff --git a/_docs/concepts/what-is-istio/overview.md b/_docs/concepts/what-is-istio/overview.md
index b43a0becb2a37..a3963965cc3a5 100644
--- a/_docs/concepts/what-is-istio/overview.md
+++ b/_docs/concepts/what-is-istio/overview.md
@@ -1,16 +1,15 @@
---
title: Overview
-overview: Provides a conceptual introduction to Istio, including the problems it solves and its high-level architecture.
-
-order: 15
+description: Provides a conceptual introduction to Istio, including the problems it solves and its high-level architecture.
+
+weight: 15
-layout: docs
-type: markdown
---
+{% include home.html %}
This document introduces Istio: an open platform to connect, manage, and secure microservices. Istio provides an easy way to create a network of deployed services with load balancing, service-to-service authentication, monitoring, and more, without requiring any changes in service code. You add Istio support to services by deploying a special sidecar proxy throughout your environment that intercepts all network communication between microservices, configured and managed using Istio's control plane functionality.
-Istio currently only supports service deployment on Kubernetes, though other environments will be supported in future versions.
+Istio currently supports service deployment on Kubernetes, as well as services registered with Consul or Eureka and services running on individual VMs.
For detailed conceptual information about Istio components see our other [Concepts]({{home}}/docs/concepts/) guides.
@@ -18,7 +17,7 @@ For detailed conceptual information about Istio components see our other [Concep
Istio addresses many of the challenges faced by developers and operators as monolithic applications transition towards a distributed microservice architecture. The term **service mesh** is often used to describe the network of
microservices that make up such applications and the interactions between them. As a service mesh grows in size and complexity, it can become harder to understand
-and manage. Its requirements can include discovery, load balancing, failure recovery, metrics, and monitoring, and often more complex operational requirements
+and manage. Its requirements can include discovery, load balancing, failure recovery, metrics, and monitoring, and often more complex operational requirements
such as A/B testing, canary releases, rate limiting, access control, and end-to-end authentication.
Istio provides a complete solution to satisfy the diverse requirements of microservice applications by providing
@@ -27,10 +26,10 @@ network of services:
- **Traffic Management**. Control the flow of traffic and API calls between services, make calls more reliable, and make the network more robust in the face
of adverse conditions.
-
+
- **Observability**. Gain understanding of the dependencies between services and the nature and flow of traffic between them, providing the ability to quickly identify issues.
-- **Policy Enforcement**. Apply organizational policy to the interaction between services, ensure access policies are enforced and resources are fairly
+- **Policy Enforcement**. Apply organizational policy to the interaction between services, ensure access policies are enforced and resources are fairly
distributed among consumers. Policy changes are made by configuring the mesh, not by changing application code.
- **Service Identity and Security**. Provide services in the mesh with a verifiable identity and provide the ability to protect service traffic
@@ -38,14 +37,14 @@ as it flows over networks of varying degrees of trustability.
In addition to these behaviors, Istio is designed for extensibility to meet diverse deployment needs:
-- **Platform Support**. Istio is designed to run in a variety of environments including ones that span Cloud, on-premise, Kubernetes, Mesos etc. We’re
+- **Platform Support**. Istio is designed to run in a variety of environments including ones that span Cloud, on-premise, Kubernetes, Mesos etc. We’re
initially focused on Kubernetes but are working to support other environments soon.
-- **Integration and Customization**. The policy enforcement component can be extended and customized to integrate with existing solutions for
+- **Integration and Customization**. The policy enforcement component can be extended and customized to integrate with existing solutions for
ACLs, logging, monitoring, quotas, auditing and more.
-These capabilities greatly decrease the coupling between application code, the underlying platform, and policy. This decreased coupling not only makes
-services easier to implement, but also makes it simpler for operators to move application deployments between environments or to new policy schemes.
+These capabilities greatly decrease the coupling between application code, the underlying platform, and policy. This decreased coupling not only makes
+services easier to implement, but also makes it simpler for operators to move application deployments between environments or to new policy schemes.
Applications become inherently more portable as a result.
## Architecture
@@ -55,21 +54,20 @@ An Istio service mesh is logically split into a **data plane** and a **control p
- The **data plane** is composed of a set of intelligent
proxies (Envoy) deployed as sidecars that mediate and control all network communication between microservices.
-- The **control plane** is responsible for managing and
+- The **control plane** is responsible for managing and
configuring proxies to route traffic, as well as enforcing policies at runtime.
The following diagram shows the different components that make up each plane:
-{% include figure.html width='80%' ratio='56.25%'
- img='./img/overview/arch.svg'
- alt='The overall architecture of an Istio-based application.'
- title='Istio Architecture'
- caption='Istio Architecture'
+{% include image.html width="80%" ratio="56.25%"
+ link="./img/overview/arch.svg"
+ alt="The overall architecture of an Istio-based application."
+ caption="Istio Architecture"
%}
### Envoy
-Istio uses an extended version of the [Envoy](https://envoyproxy.github.io/envoy/) proxy, a high-performance proxy developed in C++, to mediate all inbound and outbound traffic for all services in the service mesh.
+Istio uses an extended version of the [Envoy](https://envoyproxy.github.io/envoy/) proxy, a high-performance proxy developed in C++, to mediate all inbound and outbound traffic for all services in the service mesh.
Istio leverages Envoy’s many built-in features such as dynamic service discovery, load balancing, TLS termination, HTTP/2 & gRPC proxying, circuit breakers,
health checks, staged rollouts with %-based traffic split, fault injection, and rich metrics.
@@ -77,8 +75,8 @@ Envoy is deployed as a **sidecar** to the relevant service in the same Kubernete
### Mixer
-[Mixer]({{home}}/docs/concepts/policy-and-control/mixer.html) is a platform-independent component responsible for enforcing access control and usage policies across the service mesh and collecting telemetry data from the Envoy proxy and other
-services. The proxy extracts request level [attributes]({{home}}/docs/concepts/policy-and-control/attributes.html), which are sent to Mixer for evaluation. More information on this attribute extraction and policy
+[Mixer]({{home}}/docs/concepts/policy-and-control/mixer.html) is a platform-independent component responsible for enforcing access control and usage policies across the service mesh and collecting telemetry data from the Envoy proxy and other
+services. The proxy extracts request level [attributes]({{home}}/docs/concepts/policy-and-control/attributes.html), which are sent to Mixer for evaluation. More information on this attribute extraction and policy
evaluation can be found in [Mixer Configuration]({{home}}/docs/concepts/policy-and-control/mixer-config.html). Mixer includes a flexible plugin model enabling it to interface with a variety of host environments and infrastructure backends, abstracting the Envoy proxy and Istio-managed services from these details.
### Pilot
@@ -86,30 +84,30 @@ evaluation can be found in [Mixer Configuration]({{home}}/docs/concepts/policy-a
[Pilot]({{home}}/docs/concepts/traffic-management/pilot.html) provides
service discovery for the Envoy sidecars, traffic management capabilities
for intelligent routing (e.g., A/B tests, canary deployments, etc.),
-and resiliency (timeouts, retries, circuit breakers, etc.). It converts a
+and resiliency (timeouts, retries, circuit breakers, etc.). It converts
high level routing rules that control traffic behavior into Envoy-specific
configurations, and propagates them to the sidecars at runtime. Pilot
-abstracts platform-specifc service discovery mechanisms and synthesizes
+abstracts platform-specific service discovery mechanisms and synthesizes
them into a standard format consumable by any sidecar that conforms to the
[Envoy data plane APIs](https://github.com/envoyproxy/data-plane-api).
-This loose coupling allows Istio to run on multiple environments
-(e.g., Kubernetes, Consul/Nomad) while maintaining the same operator
+This loose coupling allows Istio to run on multiple environments
+(e.g., Kubernetes, Consul/Nomad) while maintaining the same operator
interface for traffic management.
-### Istio-Auth
+### Security
-[Istio-Auth]({{home}}/docs/concepts/security/mutual-tls.html) provides strong service-to-service and end-user authentication using mutual TLS, with built-in identity and credential management.
-It can be used to upgrade unencrypted traffic in the service mesh, and provides operators the ability to enforce policy based
-on service identity rather than network controls. Future releases of Istio will add fine-grained access control and auditing to control
-and monitor who accesses your service, API, or resource, using a variety of access control mechanisms, including attribute and
-role-based access control as well as authorization hooks.
+[Security]({{home}}/docs/concepts/security/) provides strong service-to-service and end-user authentication, with built-in identity and
+credential management. It can be used to upgrade unencrypted traffic in the service mesh, and provides operators the ability to enforce
+policy based on service identity rather than network controls. Starting from release 0.5, Istio supports
+[role-based access control]({{home}}/docs/concepts/security/rbac.html) to control who can access your services. Future
+releases of Istio will add service auditing feature.
## What's next
-* Learn about Istio's [design goals]({{home}}/docs/concepts/what-is-istio/goals.html).
+- Learn about Istio's [design goals]({{home}}/docs/concepts/what-is-istio/goals.html).
-* Explore our [Guides]({{home}}/docs/guides/).
+- Explore our [Guides]({{home}}/docs/guides/).
-* Read about Istio components in detail in our other [Concepts]({{home}}/docs/concepts/) guides.
+- Read about Istio components in detail in our other [Concepts]({{home}}/docs/concepts/) guides.
-* Learn how to deploy Istio with your own services using our [Tasks]({{home}}/docs/tasks/) guides.
+- Learn how to deploy Istio with your own services using our [Tasks]({{home}}/docs/tasks/) guides.
diff --git a/_docs/guides/bookinfo.md b/_docs/guides/bookinfo.md
index 77c34654f8a8e..4761c96236b51 100644
--- a/_docs/guides/bookinfo.md
+++ b/_docs/guides/bookinfo.md
@@ -1,11 +1,9 @@
---
1;95;0ctitle: Bookinfo Sample Application
-overview: This guide deploys a sample application composed of four separate microservices which will be used to demonstrate various features of the Istio service mesh.
+description: This guide deploys a sample application composed of four separate microservices which will be used to demonstrate various features of the Istio service mesh.
-order: 10
+weight: 10
-layout: docs
-type: markdown
---
{% include home.html %}
@@ -34,16 +32,14 @@ There are 3 versions of the reviews microservice:
The end-to-end architecture of the application is shown below.
-{% include figure.html width='80%' ratio='68.52%'
- img='./img/bookinfo/noistio.svg'
- alt='Bookinfo Application without Istio'
- title='Bookinfo Application without Istio'
- caption='Bookinfo Application without Istio'
+{% include image.html width="80%" ratio="68.52%"
+ link="./img/bookinfo/noistio.svg"
+ caption="Bookinfo Application without Istio"
%}
This application is polyglot, i.e., the microservices are written in different languages.
It’s worth noting that these services have no dependencies on Istio, but make an interesting
-sevice mesh example, particularly because of the multitude of services, languages and versions
+service mesh example, particularly because of the multitude of services, languages and versions
for the reviews service.
## Before you begin
@@ -59,11 +55,9 @@ Istio-enabled environment, with Envoy sidecars injected along side each service.
The needed commands and configuration vary depending on the runtime environment
although in all cases the resulting deployment will look like this:
-{% include figure.html width='80%' ratio='59.08%'
- img='./img/bookinfo/withistio.svg'
- alt='Bookinfo Application'
- title='Bookinfo Application'
- caption='Bookinfo Application'
+{% include image.html width="80%" ratio="59.08%"
+ link="./img/bookinfo/withistio.svg"
+ caption="Bookinfo Application"
%}
All of the microservices will be packaged with an Envoy sidecar that intercepts incoming
@@ -75,46 +69,46 @@ To start the application, follow the instructions below corresponding to your Is
### Running on Kubernetes
-> Note: If you use GKE, please ensure your cluster has at least 4 standard GKE nodes. If you use Minikube, please ensure you have at least 4GB RAM.
+> If you use GKE, please ensure your cluster has at least 4 standard GKE nodes. If you use Minikube, please ensure you have at least 4GB RAM.
1. Change directory to the root of the Istio installation directory.
1. Bring up the application containers:
- If you are using [manual sidecar injection]({{home}}/docs/setup/kubernetes/sidecar-injection.html#manual-sidecar-injection),
- use the following command instead:
+ * If you are using [manual sidecar injection]({{home}}/docs/setup/kubernetes/sidecar-injection.html#manual-sidecar-injection),
+ use the following command
- ```bash
- kubectl apply -f <(istioctl kube-inject -f samples/bookinfo/kube/bookinfo.yaml)
- ```
+ ```command
+ $ kubectl apply -f <(istioctl kube-inject --debug -f samples/bookinfo/kube/bookinfo.yaml)
+ ```
- If you are using a cluster with
- [automatic sidecar injection]({{home}}/docs/setup/kubernetes/sidecar-injection.html#automatic-sidecar-injection)
- enabled, simply deploy the services using `kubectl`:
+ The `istioctl kube-inject` command is used to manually modify the `bookinfo.yaml`
+ file before creating the deployments as documented [here]({{home}}/docs/reference/commands/istioctl.html#istioctl kube-inject).
- ```bash
- kubectl apply -f samples/bookinfo/kube/bookinfo.yaml
- ```
+ * If you are using a cluster with
+ [automatic sidecar injection]({{home}}/docs/setup/kubernetes/sidecar-injection.html#automatic-sidecar-injection)
+ enabled, simply deploy the services using `kubectl`
- The `istioctl kube-inject` command is used to manually modify the `bookinfo.yaml`
- file before creating the deployments as documented [here]({{home}}/docs/reference/commands/istioctl.html#istioctl kube-inject).
+ ```command
+ $ kubectl apply -f samples/bookinfo/kube/bookinfo.yaml
+ ```
- Either of the above commands launches all four microservices and creates the gateway
- ingress resource as illustrated in the above diagram.
+ Either of the above commands launches all four microservices as illustrated in the above diagram.
All 3 versions of the reviews service, v1, v2, and v3, are started.
- > Note that in a realistic deployment, new versions of a microservice are deployed
+ > In a realistic deployment, new versions of a microservice are deployed
over time instead of deploying all versions simultaneously.
-1. Confirm all services and pods are correctly defined and running:
+1. Define the ingress gateway for the application:
- ```bash
- kubectl get services
+ ```command
+ $ istioctl create -f samples/bookinfo/routing/bookinfo-gateway.yaml
```
- which produces the following output:
-
- ```bash
+1. Confirm all services and pods are correctly defined and running:
+
+ ```command
+ $ kubectl get services
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
details 10.0.0.31 9080/TCP 6m
kubernetes 10.0.0.1 443/TCP 7d
@@ -125,13 +119,8 @@ To start the application, follow the instructions below corresponding to your Is
and
- ```bash
- kubectl get pods
- ```
-
- which produces
-
- ```bash
+ ```command
+ $ kubectl get pods
NAME READY STATUS RESTARTS AGE
details-v1-1520924117-48z17 2/2 Running 0 6m
productpage-v1-560495357-jk1lz 2/2 Running 0 6m
@@ -143,49 +132,41 @@ To start the application, follow the instructions below corresponding to your Is
#### Determining the ingress IP and Port
-1. If your Kubernetes cluster is running in an environment that supports external load balancers, the IP address of ingress can be obtained by the following command:
+Execute the following command to determine if your Kubernetes cluster is running in an environment that supports external load balancers
- ```bash
- kubectl get ingress -o wide
- ```
+```command
+$ kubectl get svc istio-ingressgateway -n istio-system
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+istio-ingressgateway LoadBalancer 172.21.109.129 130.211.10.121 80:31380/TCP,443:31390/TCP,31400:31400/TCP 17h
+```
- whose output should be similar to
+If the `EXTERNAL-IP` value is set, your environment has an external load balancer that you can use for the ingress gateway
- ```bash
- NAME HOSTS ADDRESS PORTS AGE
- gateway * 130.211.10.121 80 1d
- ```
+```command
+$ export GATEWAY_URL=130.211.10.121:80
+```
- The address of the ingress service would then be
-
- ```bash
- export GATEWAY_URL=130.211.10.121:80
- ```
+If the `EXTERNAL-IP` value is `` (or perpetually ``), your environment does not support external load balancers.
+In this case, you can access the gateway using the service `nodePort`.
-1. _GKE:_ Sometimes when the service is unable to obtain an external IP, `kubectl get ingress -o wide` may display a list of worker node addresses. In this case, you can use any of the addresses, along with the NodePort, to access the ingress. If the cluster has a firewall, you will also need to create a firewall rule to allow TCP traffic to the NodePort.
+1. _GKE:_
- ```bash
- export GATEWAY_URL=:$(kubectl get svc istio-ingress -n istio-system -o jsonpath='{.spec.ports[0].nodePort}')
- gcloud compute firewall-rules create allow-book --allow tcp:$(kubectl get svc istio-ingress -n istio-system -o jsonpath='{.spec.ports[0].nodePort}')
+ ```command
+ $ export GATEWAY_URL=:$(kubectl get svc istio-ingressgateway -n istio-system -o jsonpath='{.spec.ports[0].nodePort}')
+ $ gcloud compute firewall-rules create allow-book --allow tcp:$(kubectl get svc istio-ingressgateway -n istio-system -o jsonpath='{.spec.ports[0].nodePort}')
```
-1. _IBM Cloud Container Service Free Tier:_ External load balancer is not available for kubernetes clusters in the free tier. You can use the public IP of the worker node, along with the NodePort, to access the ingress. The public IP of the worker node can be obtained from the output of the following command:
+1. _IBM Cloud Container Service Free Tier:_
- ```bash
- bx cs workers
- export GATEWAY_URL=:$(kubectl get svc istio-ingress -n istio-system -o jsonpath='{.spec.ports[0].nodePort}')
+ ```command
+ $ bx cs workers
+ $ export GATEWAY_URL=:$(kubectl get svc istio-ingressgateway -n istio-system -o jsonpath='{.spec.ports[0].nodePort}')
```
-1. _IBM Cloud Private:_ External load balancers are not supported in IBM Cloud Private. You can use the host IP of the ingress service, along with the NodePort, to access the ingress.
+1. _Other environments (e.g., minikube):_
- ```bash
- export GATEWAY_URL=$(kubectl get po -l istio=ingress -n istio-system -o 'jsonpath={.items[0].status.hostIP}'):$(kubectl get svc istio-ingress -n istio-system -o 'jsonpath={.spec.ports[0].nodePort}')
- ```
-
-1. _Minikube:_ External load balancers are not supported in Minikube. You can use the host IP of the ingress service, along with the NodePort, to access the ingress.
-
- ```bash
- export GATEWAY_URL=$(kubectl get po -l istio=ingress -n istio-system -o 'jsonpath={.items[0].status.hostIP}'):$(kubectl get svc istio-ingress -n istio-system -o 'jsonpath={.spec.ports[0].nodePort}')
+ ```command
+ $ export GATEWAY_URL=$(kubectl get po -l istio=ingressgateway -n istio-system -o 'jsonpath={.items[0].status.hostIP}'):$(kubectl get svc istio-ingressgateway -n istio-system -o 'jsonpath={.spec.ports[0].nodePort}')
```
### Running on Docker with Consul or Eureka
@@ -194,38 +175,40 @@ To start the application, follow the instructions below corresponding to your Is
1. Bring up the application containers.
- * To test with Consul, run the following commands:
- ```bash
- docker-compose -f samples/bookinfo/consul/bookinfo.yaml up -d
- docker-compose -f samples/bookinfo/consul/bookinfo.sidecars.yaml up -d
- ```
- * To test with Eureka, run the following commands:
- ```bash
- docker-compose -f samples/bookinfo/eureka/bookinfo.yaml up -d
- docker-compose -f samples/bookinfo/eureka/bookinfo.sidecars.yaml up -d
- ```
+ To test with Consul, run the following commands:
+
+ ```command
+ $ docker-compose -f samples/bookinfo/consul/bookinfo.yaml up -d
+ $ docker-compose -f samples/bookinfo/consul/bookinfo.sidecars.yaml up -d
+ ```
+
+ To test with Eureka, run the following commands:
+
+ ```command
+ $ docker-compose -f samples/bookinfo/eureka/bookinfo.yaml up -d
+ $ docker-compose -f samples/bookinfo/eureka/bookinfo.sidecars.yaml up -d
+ ```
+
1. Confirm that all docker containers are running:
- ```bash
- docker ps -a
+ ```command
+ $ docker ps -a
```
> If the Istio Pilot container terminates, re-run the command from the previous step.
1. Set the GATEWAY_URL:
- ```bash
- export GATEWAY_URL=localhost:9081
+ ```command
+ $ export GATEWAY_URL=localhost:9081
```
## What's next
To confirm that the Bookinfo application is running, run the following `curl` command:
-```bash
-curl -o /dev/null -s -w "%{http_code}\n" http://${GATEWAY_URL}/productpage
-```
-```
+```command
+$ curl -o /dev/null -s -w "%{http_code}\n" http://${GATEWAY_URL}/productpage
200
```
@@ -236,7 +219,7 @@ stars, black stars, no stars), since we haven't yet used Istio to control the
version routing.
You can now use this sample to experiment with Istio's features for
-traffic routing, fault injection, rate limitting, etc..
+traffic routing, fault injection, rate limiting, etc..
To proceed, refer to one or more of the [Istio Guides]({{home}}/docs/guides),
depending on your interest. [Intelligent Routing]({{home}}/docs/guides/intelligent-routing.html)
is a good place to start for beginners.
@@ -250,36 +233,36 @@ uninstall and clean it up using the following instructions.
1. Delete the routing rules and terminate the application pods
- ```bash
- samples/bookinfo/kube/cleanup.sh
+ ```command
+ $ samples/bookinfo/kube/cleanup.sh
```
1. Confirm shutdown
- ```bash
- istioctl get routerules #-- there should be no more routing rules
- kubectl get pods #-- the Bookinfo pods should be deleted
+ ```command
+ $ istioctl get virtualservices #-- there should be no more routing rules
+ $ kubectl get pods #-- the Bookinfo pods should be deleted
```
### Uninstall from Docker environment
1. Delete the routing rules and application containers
- 1. In a Consul setup, run the following command:
+ In a Consul setup, run the following command:
- ```bash
- samples/bookinfo/consul/cleanup.sh
+ ```command
+ $ samples/bookinfo/consul/cleanup.sh
```
-
- 1. In a Eureka setup, run the following command:
-
- ```bash
- samples/bookinfo/eureka/cleanup.sh
+
+ In a Eureka setup, run the following command:
+
+ ```command
+ $ samples/bookinfo/eureka/cleanup.sh
```
-2. Confirm cleanup
+1. Confirm cleanup
- ```bash
- istioctl get routerules #-- there should be no more routing rules
- docker ps -a #-- the Bookinfo containers should be deleted
+ ```command
+ $ istioctl get virtualservices #-- there should be no more routing rules
+ $ docker ps -a #-- the Bookinfo containers should be deleted
```
diff --git a/_docs/guides/endpoints.md b/_docs/guides/endpoints.md
new file mode 100644
index 0000000000000..4b5d95dad2438
--- /dev/null
+++ b/_docs/guides/endpoints.md
@@ -0,0 +1,116 @@
+---
+title: Install Istio for Google Cloud Endpoints Services
+description: Explains how to manually integrate Google Cloud Endpoints services with Istio.
+
+weight: 42
+---
+{% include home.html %}
+
+This document shows how to manually integrate Istio with existing
+Google Cloud Endpoints services.
+
+## Before you begin
+
+If you don't have an Endpoints service and want to try it out, you can follow
+the [instructions](https://cloud.google.com/endpoints/docs/openapi/get-started-kubernetes-engine)
+to setup an Endpoints service on GKE.
+After setup, you should be able to get an API key and store it in `ENDPOINTS_KEY` environment variable and the external IP address `EXTERNAL_IP`.
+You may test the service using the following command:
+
+```command
+$ curl --request POST --header "content-type:application/json" --data '{"message":"hello world"}' "http://${EXTERNAL_IP}:80/echo?key=${ENDPOINTS_KEY}"
+```
+
+You need to install Istio with [instructions]({{home}}/docs/setup/kubernetes/quick-start.html#google-kubernetes-engine).
+
+## HTTP Endpoints service
+
+1. Inject the service into the mesh using `--includeIPRanges` by following the
+[instructions]({{home}}/docs/tasks/traffic-management/egress.html#calling-external-services-directly)
+so that Egress is allowed to call external services directly.
+Otherwise, ESP won't be able to access Google cloud service control.
+
+1. After injection, issue the same test command as above to ensure that calling ESP continues to work.
+
+1. If you want to access the service through Ingress, create the following Ingress definition:
+
+ ```bash
+ cat < Note: this guide is still under development and only tested on Google Cloud Platform.
- On IBM Cloud or other platforms where overlay network of Pods is isolated from VM network,
- VMs cannot initiate any direct communication to Kubernetes Pods even when using Istio.
+> This guide is still under development and only tested on Google Cloud Platform.
+On IBM Cloud or other platforms where overlay network of Pods is isolated from VM network,
+VMs cannot initiate any direct communication to Kubernetes Pods even when using Istio.
## Overview
-{% include figure.html width='80%' ratio='56.78%'
- img='./img/mesh-expansion.svg'
- alt='Bookinfo Application with Istio Mesh Expansion'
- title='Bookinfo Application with Istio Mesh Expansion'
- caption='Bookinfo Application with Istio Mesh Expansion'
+{% include image.html width="80%" ratio="56.78%"
+ link="./img/mesh-expansion.svg"
+ caption="Bookinfo Application with Istio Mesh Expansion"
%}
-
## Before you begin
* Setup Istio by following the instructions in the
@@ -36,38 +31,48 @@ this infrastructure as a single mesh.
* Deploy the [Bookinfo]({{home}}/docs/guides/bookinfo.html) sample application (in the `bookinfo` namespace).
* Create a VM named 'vm-1' in the same project as Istio cluster, and [Join the Mesh]({{home}}/docs/setup/kubernetes/mesh-expansion.html).
-## Running mysql on the VM
+## Running MySQL on the VM
-We will first install mysql on the VM, and configure it as a backend for the ratings service.
+We will first install MySQL on the VM, and configure it as a backend for the ratings service.
On the VM:
-```bash
-sudo apt-get update && sudo apt-get install -y mariadb-server
-sudo mysql
+
+```command
+$ sudo apt-get update && sudo apt-get install -y mariadb-server
+$ sudo mysql
# Grant access to root
GRANT ALL PRIVILEGES ON *.* TO 'root'@'localhost' IDENTIFIED BY 'password' WITH GRANT OPTION;
quit;
-sudo systemctl restart mysql
```
-You can find details of configuring mysql at [Mysql](https://mariadb.com/kb/en/library/download/).
+
+```command
+$ sudo systemctl restart mysql
+```
+You can find details of configuring MySQL at [Mysql](https://mariadb.com/kb/en/library/download/).
On the VM add ratings database to mysql.
-```bash
-# Add ratings db to the mysql db
-curl -q https://raw.githubusercontent.com/istio/istio/master/samples/bookinfo/src/mysql/mysqldb-init.sql | mysql -u root -ppassword
+
+```command
+$ curl -q https://raw.githubusercontent.com/istio/istio/master/samples/bookinfo/src/mysql/mysqldb-init.sql | mysql -u root -ppassword
```
-To make it easy to visually inspect the difference in the output of the bookinfo application, you can change the ratings that are generated by using the following commands
-```bash
-# To inspect the ratings
-mysql -u root -ppassword test -e "select * from ratings;"
+
+To make it easy to visually inspect the difference in the output of the Bookinfo application, you can change the ratings that are generated by using the
+following commands to inspect the ratings:
+
+```command
+$ mysql -u root -ppassword test -e "select * from ratings;"
+----------+--------+
| ReviewID | Rating |
+----------+--------+
| 1 | 5 |
| 2 | 4 |
+----------+--------+
-# To change the ratings
-mysql -u root -ppassword test -e "update ratings set rating=1 where reviewid=1;select * from ratings;"
+```
+
+and to change the ratings
+
+```command
+$ mysql -u root -ppassword test -e "update ratings set rating=1 where reviewid=1;select * from ratings;"
+----------+--------+
| ReviewID | Rating |
+----------+--------+
@@ -79,18 +84,17 @@ mysql -u root -ppassword test -e "update ratings set rating=1 where reviewid=1;
## Find out the IP address of the VM that will be used to add it to the mesh
On the VM:
-```bash
-hostname -I
+
+```command
+$ hostname -I
```
## Registering the mysql service with the mesh
+
On a host with access to `istioctl` commands, register the VM and mysql db service
-```bash
-istioctl register -n vm mysqldb 3306
-```
-Sample output:
-```
-$ istioctl register -n vm mysqldb 10.150.0.5 3306
+
+```command
+$ istioctl register -n vm mysqldb 3306
I1108 20:17:54.256699 40419 register.go:43] Registering for service 'mysqldb' ip '10.150.0.5', ports list [{3306 mysql}]
I1108 20:17:54.256815 40419 register.go:48] 0 labels ([]) and 1 annotations ([alpha.istio.io/kubernetes-serviceaccounts=default])
W1108 20:17:54.573068 40419 register.go:123] Got 'services "mysqldb" not found' looking up svc 'mysqldb' in namespace 'vm', attempting to create it
@@ -104,14 +108,17 @@ Note that the 'mysqldb' virtual machine does not need and should not have specia
## Using the mysql service
The ratings service in bookinfo will use the DB on the machine. To verify that it works, create version 2 of the ratings service that uses the mysql db on the VM. Then specify route rules that force the review service to use the ratings version 2.
-```bash
-# Create the version of ratings service that will use mysql back end
-istioctl kube-inject -n bookinfo -f samples/bookinfo/kube/bookinfo-ratings-v2-mysql-vm.yaml | kubectl apply -n bookinfo -f -
-# Create route rules that will force bookinfo to use the ratings back end
-istioctl create -n bookinfo -f samples/bookinfo/kube/route-rule-ratings-mysql-vm.yaml
+```command
+$ istioctl kube-inject -n bookinfo -f samples/bookinfo/kube/bookinfo-ratings-v2-mysql-vm.yaml | kubectl apply -n bookinfo -f -
+```
+Create route rules that will force bookinfo to use the ratings back end:
+
+```command
+$ istioctl create -n bookinfo -f samples/bookinfo/kube/route-rule-ratings-mysql-vm.yaml
```
-You can verify the output of bookinfo application is showing 1 star from Reviewer1 and 4 stars from Reviewer2 or change the ratings on your VM and see the results.
+You can verify the output of the Bookinfo application is showing 1 star from Reviewer1 and 4 stars from Reviewer2 or change the ratings on your VM and see the
+results.
You can also find some troubleshooting and other information in the [RawVM MySQL](https://github.com/istio/istio/blob/master/samples/rawvm/README.md) document in the meantime.
diff --git a/_docs/guides/intelligent-routing.md b/_docs/guides/intelligent-routing.md
index c917577cb1304..5c6f391bc7884 100644
--- a/_docs/guides/intelligent-routing.md
+++ b/_docs/guides/intelligent-routing.md
@@ -1,10 +1,8 @@
---
title: Intelligent Routing
-overview: This guide demonstrates how to use various traffic management capabilities of an Istio service mesh.
+description: This guide demonstrates how to use various traffic management capabilities of an Istio service mesh.
-order: 20
-layout: docs
-type: markdown
+weight: 20
---
{% include home.html %}
diff --git a/_docs/guides/policy-enforcement.md b/_docs/guides/policy-enforcement.md
index 5c92621d8e130..68d30b2e66e37 100644
--- a/_docs/guides/policy-enforcement.md
+++ b/_docs/guides/policy-enforcement.md
@@ -1,11 +1,9 @@
---
title: Policy Enforcement
-overview: This sample uses the Bookinfo application to demonstrate policy enforcement using Istio Mixer.
+description: This sample uses the Bookinfo application to demonstrate policy enforcement using Istio Mixer.
-order: 40
+weight: 40
draft: true
-layout: docs
-type: markdown
---
{% include home.html %}
@@ -18,6 +16,7 @@ features are important, and so on. This is not a task, but a feature of
Istio.
## Before you begin
+
* Describe installation options.
* Install Istio control plane in a Kubernetes cluster by following the quick start instructions in the
diff --git a/_docs/guides/security.md b/_docs/guides/security.md
index 9da4ba1162f0a..2af8822707de8 100644
--- a/_docs/guides/security.md
+++ b/_docs/guides/security.md
@@ -1,11 +1,9 @@
---
title: Security
-overview: This sample demonstrates how to obtain uniform metrics, logs, traces across different services using Istio Mixer and Istio sidecar.
+description: This sample demonstrates how to obtain uniform metrics, logs, traces across different services using Istio Mixer and Istio sidecar.
-order: 30
+weight: 30
draft: true
-layout: docs
-type: markdown
---
{% include home.html %}
@@ -16,6 +14,7 @@ This sample demonstrates how to obtain uniform metrics, logs, traces across diff
Placeholder.
## Before you begin
+
* Describe installation options.
* Install Istio control plane in a Kubernetes cluster by following the quick start instructions in the
diff --git a/_docs/guides/telemetry.md b/_docs/guides/telemetry.md
index afe0b3b8ee73c..7076c39a32c1f 100644
--- a/_docs/guides/telemetry.md
+++ b/_docs/guides/telemetry.md
@@ -1,10 +1,8 @@
---
title: In-Depth Telemetry
-overview: This sample demonstrates how to obtain uniform metrics, logs, traces across different services using Istio Mixer and Istio sidecar.
+description: This sample demonstrates how to obtain uniform metrics, logs, traces across different services using Istio Mixer and Istio sidecar.
-order: 30
-layout: docs
-type: markdown
+weight: 30
---
{% include home.html %}
@@ -49,7 +47,7 @@ developers to manually instrument their applications.
applications.
1. [Using the Istio Dashboard]({{home}}/docs/tasks/telemetry/using-istio-dashboard.html)
- This task installs the Grafana add-on with a pre-configured dashboard
+ This task installs the Grafana add-on with a preconfigured dashboard
for monitoring mesh traffic.
## Cleanup
diff --git a/_docs/guides/upgrading-istio.md b/_docs/guides/upgrading-istio.md
deleted file mode 100644
index 691100bb26d21..0000000000000
--- a/_docs/guides/upgrading-istio.md
+++ /dev/null
@@ -1,24 +0,0 @@
----
-title: Upgrading Istio
-overview: This guide demonstrates how to upgrade the Istio control plane and data plane independently.
-
-order: 70
-draft: true
-layout: docs
-type: markdown
----
-{% include home.html %}
-
-This guide demonstrates how to upgrade the Istio control plane and data plane independently.
-
-## Overview
-
-Placeholder.
-
-## Application Setup
-
-1. Steps
-
-## Tasks
-
-1. some tasks that will complete the goal of this sample.
diff --git a/_docs/guides/using-external-services.md b/_docs/guides/using-external-services.md
index 579afa9a2083d..cd3f9692d7a62 100644
--- a/_docs/guides/using-external-services.md
+++ b/_docs/guides/using-external-services.md
@@ -1,11 +1,9 @@
---
title: Integrating with External Services
-overview: This sample integrates third party services with Bookinfo and demonstrates how to use Istio service mesh to provide metrics, and routing functions for these services.
+description: This sample integrates third party services with Bookinfo and demonstrates how to use Istio service mesh to provide metrics, and routing functions for these services.
-order: 50
+weight: 50
draft: true
-layout: docs
-type: markdown
---
{% include home.html %}
diff --git a/_docs/index.md b/_docs/index.md
index ad2cc2950de33..f6f469b611557 100644
--- a/_docs/index.md
+++ b/_docs/index.md
@@ -1,11 +1,9 @@
---
title: Welcome
-overview: Istio documentation home page.
+description: Istio documentation home page.
-order: 0
+weight: 1
-layout: docs
-type: markdown
toc: false
---
{% include home.html %}
@@ -19,8 +17,8 @@ is where you can learn about what Istio does and how it does it.
the Istio control plane in various environments, as well as instructions
for installing the sidecar in the application deployment. Quick start
instructions are available for
- [Kubernetes]({{docs}}/docs/setup/kubernetes/quick-start.html) and
- [Docker Compose w/ Consul]({{docs}}/docs/setup/consul/quick-start.html).
+ [Kubernetes]({{home}}/docs/setup/kubernetes/quick-start.html) and
+ [Docker Compose w/ Consul]({{home}}/docs/setup/consul/quick-start.html).
- [Tasks]({{home}}/docs/tasks/). Tasks show you how to do a single directed activity with Istio.
@@ -30,9 +28,14 @@ intended to highlight a particular set of Istio's features.
- [Reference]({{home}}/docs/reference/). Detailed exhaustive lists of
command-line options, configuration options, API definitions, and procedures.
-We're always looking for help improving our documentation, so please don't hesitate to
+In addition, you might find these links interesting:
+
+- The latest Istio monthly release is {{site.data.istio.version}}: [download {{site.data.istio.version}}](https://github.com/istio/istio/releases),
+[release notes]({{home}}/about/notes/{{site.data.istio.version}}.html).
+
+- Nostalgic for days gone by? We keep an [archive of the earlier releases' documentation](https://archive.istio.io/).
+
+- We're always looking for help improving our documentation, so please don't hesitate to
[file an issue](https://github.com/istio/istio.github.io/issues/new) if you see some problem.
Or better yet, submit your own [contributions]({{home}}/about/contribute/editing.html) to help
make our docs better.
-
-Follow this link for the archive of the [earlier releases' documentation](https://archive.istio.io/).
diff --git a/_docs/reference/api/index.md b/_docs/reference/api/index.md
index f20458e4c0b72..b9e5b9b9cce16 100644
--- a/_docs/reference/api/index.md
+++ b/_docs/reference/api/index.md
@@ -1,11 +1,9 @@
---
title: API
-overview: Detailed information on API parameters.
+description: Detailed information on API parameters.
-order: 10
+weight: 10
-layout: docs
-type: markdown
toc: false
---
diff --git a/_docs/reference/api/istio.mixer.v1.html b/_docs/reference/api/istio.mixer.v1.html
index 1636374e948fc..a1815b79b30be 100644
--- a/_docs/reference/api/istio.mixer.v1.html
+++ b/_docs/reference/api/istio.mixer.v1.html
@@ -1,6 +1,6 @@
---
title: Mixer
-overview: API definitions to interact with Mixer
+description: API definitions to interact with Mixer
location: https://istio.io/docs/reference/api/istio.mixer.v1.html
layout: protoc-gen-docs
redirect_from: /docs/reference/api/mixer/mixer.html
@@ -71,7 +71,7 @@
Attributes are strongly typed. The supported attribute types are defined by
-ValueType.
+ValueType.
Each type of value is encoded into one of the so-called transport types present
in this message.
diff --git a/_docs/reference/commands/index.md b/_docs/reference/commands/index.md
index 0fd20af35c09c..291b1d442c64f 100644
--- a/_docs/reference/commands/index.md
+++ b/_docs/reference/commands/index.md
@@ -1,11 +1,9 @@
---
title: Commands
-overview: Describes usage and options of the Istio commands and utilities.
+description: Describes usage and options of the Istio commands and utilities.
-order: 30
+weight: 30
-layout: docs
-type: markdown
toc: false
---
diff --git a/_docs/reference/commands/istio_ca.html b/_docs/reference/commands/istio_ca.html
index 0c1277eff8c52..51b2f9d845960 100644
--- a/_docs/reference/commands/istio_ca.html
+++ b/_docs/reference/commands/istio_ca.html
@@ -1,6 +1,6 @@
---
title: istio_ca
-overview: Istio Certificate Authority (CA)
+description: Istio Certificate Authority (CA)
layout: pkg-collateral-docs
number_of_entries: 4
---
@@ -10,199 +10,160 @@
Flags
-
Shorthand
Description
-
--alsologtostderr
-
-
log to standard error as well as files
+
--append-dns-names
+
Append DNS names to the certificates for webhook services.
--cert-chain <string>
-
Path to the certificate chain file (default ``)
+
--citadel-storage-namespace <string>
+
Namespace where the Citadel pod is running. Will not be used if explicit file or other storage mechanism is specified. (default `istio-system`)
+
+
+
--custom-dns-names <string>
+
The list of account.namespace:customdns names, separated by comma. (default ``)
+
+
+
--enable-profiling
+
Enabling profiling when monitoring Citadel.
+
+
+
--grpc-host-identities <string>
+
The list of hostnames for istio ca server, separated by comma. (default `istio-ca,istio-citadel`)
+
+
--grpc-hostname <string>
-
-
The hostname for GRPC server. (default `localhost`)
+
DEPRECATED, use --grpc-host-identites. (default `istio-ca`)
--grpc-port <int>
-
-
The port number for GRPC server. If unspecified, Istio CA will not server GRPC request. (default `0`)
+
The port number for Citadel GRPC server. If unspecified, Citadel will not serve GRPC requests. (default `8060`)
-
--istio-ca-storage-namespace <string>
-
-
Namespace where the Istio CA pods is running. Will not be used if explicit file or other storage mechanism is specified. (default `istio-system`)
+
--key-size <int>
+
Size of generated private key (default `2048`)
--kube-config <string>
-
Specifies path to kubeconfig file. This must be specified when not running inside a Kubernetes pod. (default ``)
--listened-namespace <string>
-
-
Select a namespace for the CA to listen to. If unspecified, Istio CA tries to use the ${NAMESPACE} environment variable. If neither is set, Istio CA listens to all namespaces. (default ``)
+
Select a namespace for the CA to listen to. If unspecified, Citadel tries to use the ${NAMESPACE} environment variable. If neither is set, Citadel listens to all namespaces. (default ``)
--liveness-probe-interval <duration>
-
Interval of updating file for the liveness probe. (default `0s`)
--liveness-probe-path <string>
-
Path to the file for the liveness probe. (default ``)
--log_as_json
-
Whether to format output as JSON or in plain console-friendly format
-
--log_backtrace_at <traceLocation>
-
-
when logging hits line file:N, emit a stack trace (default `:0`)
-
-
-
--log_callers
-
-
Include caller information, useful for debugging
-
-
-
--log_dir <string>
-
-
If non-empty, write log files in this directory (default ``)
+
--log_caller <string>
+
Comma-separated list of scopes for which to include called information, scopes can be any of [default] (default ``)
--log_output_level <string>
-
-
The minimum logging level of messages to output, can be one of "debug", "info", "warn", "error", or "none" (default `info`)
+
The minimum logging level of messages to output, can be one of [debug, info, warn, error, none] (default `default:info`)
--log_rotate <string>
-
The path for the optional rotating log file (default ``)
--log_rotate_max_age <int>
-
The maximum age in days of a log file beyond which the file is rotated (0 indicates no limit) (default `30`)
--log_rotate_max_backups <int>
-
The maximum number of log file backups to keep before older files are deleted (0 indicates no limit) (default `1000`)
--log_rotate_max_size <int>
-
The maximum size in megabytes of a log file beyond which the file is rotated (default `104857600`)
--log_stacktrace_level <string>
-
-
The minimum logging level at which stack traces are captured, can be one of "debug", "info", "warn", "error", or "none" (default `none`)
+
The minimum logging level at which stack traces are captured, can be one of [debug, info, warn, error, none] (default `default:none`)
--log_target <stringArray>
-
The set of paths where to output the log. This can be any path as well as the special values stdout and stderr (default `[stdout]`)
-
--logtostderr
-
-
log to standard error instead of files
+
--max-workload-cert-ttl <duration>
+
The max TTL of issued workload certificates (default `2160h0m0s`)
-
--max-workload-cert-ttl <duration>
-
-
The max TTL of issued workload certificates (default `168h0m0s`)
+
--monitoring-port <int>
+
The port number for monitoring Citadel. If unspecified, Citadel will disable monitoring. (default `9093`)
+
+
+
--org <string>
+
Organization for the cert (default ``)
--probe-check-interval <duration>
-
Interval of checking the liveness of the CA. (default `30s`)
+
--requested-ca-cert-ttl <duration>
+
The requested TTL for the workload (default `8760h0m0s`)
+
+
--root-cert <string>
-
Path to the root certificate file (default ``)
--self-signed-ca
-
Indicates whether to use auto-generated self-signed CA certificate. When set to true, the '--signing-cert' and '--signing-key' options are ignored.
--self-signed-ca-cert-ttl <duration>
-
The TTL of self-signed CA root certificate (default `8760h0m0s`)
--self-signed-ca-org <string>
-
The issuer organization used in self-signed CA certificate (default to k8s.cluster.local) (default `k8s.cluster.local`)
+
--sign-ca-certs
+
Whether Citadel signs certificates for other CAs
+
+
--signing-cert <string>
-
Path to the CA signing certificate file (default ``)
--signing-key <string>
-
Path to the CA signing key file (default ``)
-
--stderrthreshold <severity>
-
-
logs at or above this threshold go to stderr (default `2`)
-
-
-
--upstream-auth <string>
-
-
Specifies how the Istio CA is authenticated to the upstream CA. (default `mtls`)
-
-
--upstream-ca-address <string>
-
-
The IP:port address of the upstream CA. When set, the CA will rely on the upstream Istio CA to provision its own certificate. (default ``)
-
-
-
--upstream-ca-cert-file <string>
-
-
Path to the certificate for authenticating upstream CA. (default ``)
-
-
-
--v <Level>
-
-v
-
log level for V logs (default `0`)
-
-
-
--vmodule <moduleSpec>
-
-
comma-separated list of pattern=N settings for file-filtered logging (default ``)
+
The IP:port address of the upstream CA. When set, the CA will rely on the upstream Citadel to provision its own certificate. (default ``)
--workload-cert-grace-period-ratio <float32>
-
The workload certificate rotation grace period, as a ratio of the workload certificate TTL. (default `0.5`)
--workload-cert-min-grace-period <duration>
-
The minimum workload certificate rotation grace period. (default `10m0s`)
--workload-cert-ttl <duration>
-
-
The TTL of issued workload certificates (default `19h0m0s`)
+
The TTL of issued workload certificates (default `2160h0m0s`)
@@ -213,100 +174,53 @@
istio_ca probe
Flags
-
Shorthand
Description
-
--alsologtostderr
-
-
log to standard error as well as files
-
-
--interval <duration>
-
Duration used for checking the target file's last modified time. (default `0s`)
--log_as_json
-
Whether to format output as JSON or in plain console-friendly format
-
--log_backtrace_at <traceLocation>
-
-
when logging hits line file:N, emit a stack trace (default `:0`)
-
-
-
--log_callers
-
-
Include caller information, useful for debugging
-
-
-
--log_dir <string>
-
-
If non-empty, write log files in this directory (default ``)
+
--log_caller <string>
+
Comma-separated list of scopes for which to include called information, scopes can be any of [default] (default ``)
--log_output_level <string>
-
-
The minimum logging level of messages to output, can be one of "debug", "info", "warn", "error", or "none" (default `info`)
+
The minimum logging level of messages to output, can be one of [debug, info, warn, error, none] (default `default:info`)
--log_rotate <string>
-
The path for the optional rotating log file (default ``)
--log_rotate_max_age <int>
-
The maximum age in days of a log file beyond which the file is rotated (0 indicates no limit) (default `30`)
--log_rotate_max_backups <int>
-
The maximum number of log file backups to keep before older files are deleted (0 indicates no limit) (default `1000`)
--log_rotate_max_size <int>
-
The maximum size in megabytes of a log file beyond which the file is rotated (default `104857600`)
--log_stacktrace_level <string>
-
-
The minimum logging level at which stack traces are captured, can be one of "debug", "info", "warn", "error", or "none" (default `none`)
+
The minimum logging level at which stack traces are captured, can be one of [debug, info, warn, error, none] (default `default:none`)
--log_target <stringArray>
-
The set of paths where to output the log. This can be any path as well as the special values stdout and stderr (default `[stdout]`)
-
--logtostderr
-
-
log to standard error instead of files
-
-
--probe-path <string>
-
Path of the file for checking the availability. (default ``)
-
-
--stderrthreshold <severity>
-
-
logs at or above this threshold go to stderr (default `2`)
-
-
-
--v <Level>
-
-v
-
log level for V logs (default `0`)
-
-
-
--vmodule <moduleSpec>
-
-
comma-separated list of pattern=N settings for file-filtered logging (default ``)
-
istio_ca version
@@ -321,34 +235,19 @@
istio_ca version
-
--alsologtostderr
-
-
log to standard error as well as files
-
-
--log_as_json
Whether to format output as JSON or in plain console-friendly format
-
--log_backtrace_at <traceLocation>
-
-
when logging hits line file:N, emit a stack trace (default `:0`)
-
-
-
--log_callers
-
-
Include caller information, useful for debugging
-
-
-
--log_dir <string>
+
--log_caller <string>
-
If non-empty, write log files in this directory (default ``)
+
Comma-separated list of scopes for which to include called information, scopes can be any of [default] (default ``)
--log_output_level <string>
-
The minimum logging level of messages to output, can be one of "debug", "info", "warn", "error", or "none" (default `info`)
+
The minimum logging level of messages to output, can be one of [debug, info, warn, error, none] (default `default:info`)
--log_rotate <string>
@@ -373,7 +272,7 @@
istio_ca version
--log_stacktrace_level <string>
-
The minimum logging level at which stack traces are captured, can be one of "debug", "info", "warn", "error", or "none" (default `none`)
+
The minimum logging level at which stack traces are captured, can be one of [debug, info, warn, error, none] (default `default:none`)
--log_target <stringArray>
@@ -381,29 +280,9 @@
istio_ca version
The set of paths where to output the log. This can be any path as well as the special values stdout and stderr (default `[stdout]`)
-
--logtostderr
-
-
log to standard error instead of files
-
-
--short
-s
Displays a short form of the version information
-
-
--stderrthreshold <severity>
-
-
logs at or above this threshold go to stderr (default `2`)
-
-
-
--v <Level>
-
-v
-
log level for V logs (default `0`)
-
-
-
--vmodule <moduleSpec>
-
-
comma-separated list of pattern=N settings for file-filtered logging (default ``)
Whether to format output as JSON or in plain console-friendly format
+
+
+
--log_caller <string>
+
+
Comma-separated list of scopes for which to include called information, scopes can be any of [default] (default ``)
+
+
+
--log_output_level <string>
+
+
The minimum logging level of messages to output, can be one of [debug, info, warn, error, none] (default `default:info`)
+
+
+
--log_rotate <string>
+
+
The path for the optional rotating log file (default ``)
+
+
+
--log_rotate_max_age <int>
+
+
The maximum age in days of a log file beyond which the file is rotated (0 indicates no limit) (default `30`)
+
+
+
--log_rotate_max_backups <int>
+
+
The maximum number of log file backups to keep before older files are deleted (0 indicates no limit) (default `1000`)
+
+
+
--log_rotate_max_size <int>
+
+
The maximum size in megabytes of a log file beyond which the file is rotated (default `104857600`)
+
+
+
--log_stacktrace_level <string>
+
+
The minimum logging level at which stack traces are captured, can be one of [debug, info, warn, error, none] (default `default:none`)
+
+
+
--log_target <stringArray>
+
+
The set of paths where to output the log. This can be any path as well as the special values stdout and stderr (default `[stdout]`)
+
+
+
--namespace <string>
+
-n
+
Config namespace (default ``)
+
+
+
--platform <string>
+
-p
+
Istio host platform (default `kube`)
+
+
+
+
istioctl experimental convert-networking-config
+
Converts sets of v1alpha1 configs to v1alpha3 equivalents on a best effort basis. The output should be considered a starting point for your v1alpha3 configs and probably require some minor modification. Warnings will (hopefully) be generated where configs cannot be converted perfectly, or in certain edge cases. The input must be the set of configs that would be in place in an environment at a given time. This allows the command to attempt to create and merge output configs intelligently.Output configs are given the namespace and domain of the first input config so it is recommended that input configs be part of the same namespace and domain.
+Prints the metrics for the specified service(s) when running in Kubernetes.
+
This command finds a Prometheus pod running in the specified istio system
+namespace. It then executes a series of queries per requested service to
+find the following top-level service metrics: total requests per second,
+error rate, and request latency at p50, p90, and p99 percentiles. The
+query results are printed to the console, organized by service name.
+
All metrics returned are from server-side reports. This means that latencies
+and error rates are from the perspective of the service itself and not of an
+individual client (or aggregate set of clients). Rates and latencies are
+calculated over a time interval of 1 minute.
+
Whether to format output as JSON or in plain console-friendly format
+
+
+
--log_caller <string>
+
+
Comma-separated list of scopes for which to include called information, scopes can be any of [default] (default ``)
+
+
+
--log_output_level <string>
+
+
The minimum logging level of messages to output, can be one of [debug, info, warn, error, none] (default `default:info`)
+
+
+
--log_rotate <string>
+
+
The path for the optional rotating log file (default ``)
+
+
+
--log_rotate_max_age <int>
+
+
The maximum age in days of a log file beyond which the file is rotated (0 indicates no limit) (default `30`)
+
+
+
--log_rotate_max_backups <int>
-
when logging hits line file:N, emit a stack trace (default `:0`)
+
The maximum number of log file backups to keep before older files are deleted (0 indicates no limit) (default `1000`)
-
--log_callers
+
--log_rotate_max_size <int>
+
+
The maximum size in megabytes of a log file beyond which the file is rotated (default `104857600`)
+
+
+
--log_stacktrace_level <string>
+
+
The minimum logging level at which stack traces are captured, can be one of [debug, info, warn, error, none] (default `default:none`)
+
+
+
--log_target <stringArray>
-
Include caller information, useful for debugging
+
The set of paths where to output the log. This can be any path as well as the special values stdout and stderr (default `[stdout]`)
+
+
+
--namespace <string>
+
-n
+
Config namespace (default ``)
+
+
+
--platform <string>
+
-p
+
Istio host platform (default `kube`)
+
+
+
+
Examples
+
+# Retrieve service metrics for productpage service
+istioctl experimental metrics productpage
+
+# Retrieve service metrics for various services in the different namespaces
+istioctl experimental metrics productpage.foo reviews.bar ratings.baz
+
+
+
istioctl experimental rbac
+
+A group of commands used to interact with Istio RBAC policies. For example, Query whether a specific
+request is allowed or denied under the current Istio RBAC policies.
Whether to format output as JSON or in plain console-friendly format
+
+
+
--log_caller <string>
+
+
Comma-separated list of scopes for which to include called information, scopes can be any of [default] (default ``)
--log_output_level <string>
-
The minimum logging level of messages to output, can be one of "debug", "info", "warn", "error", or "none" (default `info`)
+
The minimum logging level of messages to output, can be one of [debug, info, warn, error, none] (default `default:info`)
--log_rotate <string>
@@ -481,7 +759,7 @@
istioctl deregister
--log_stacktrace_level <string>
-
The minimum logging level at which stack traces are captured, can be one of "debug", "info", "warn", "error", or "none" (default `none`)
+
The minimum logging level at which stack traces are captured, can be one of [debug, info, warn, error, none] (default `default:none`)
--log_target <stringArray>
@@ -498,18 +776,123 @@
istioctl deregister
-p
Istio host platform (default `kube`)
+
+
+
Examples
+
# Query if user test is allowed to GET /v1/health of service rating.
+istioctl experimental rbac can -u test GET rating /v1/health
+
+
istioctl experimental rbac can
+
+This command lets you query whether a specific request will be allowed or denied under current Istio
+RBAC policies. It constructs a fake request with the custom subject and action specified in the command
+line to check if your Istio RBAC policies are working as expected. Note the fake request is only used
+locally to evaluate the effect of the Istio RBAC policies, no actual request will be issued.
+
METHOD is the HTTP method being taken, like GET, POST, etc. SERVICE is the short service name the action
+is being taken on. PATH is the HTTP path within the service.
+
istioctl experimental rbac can METHOD SERVICE PATH [flags]
+
+
+
+
Flags
+
Shorthand
+
Description
+
+
+
+
--action-properties <stringArray>
+
-a
+
[Action] Additional data about the action. Specified as name1=value1,name2=value2,... (default `[]`)
+
+
+
--groups <string>
+
-g
+
[Subject] Group name/ID that the subject represents. (default ``)
Whether to format output as JSON or in plain console-friendly format
+
+
+
--log_caller <string>
+
+
Comma-separated list of scopes for which to include called information, scopes can be any of [default] (default ``)
+
+
+
--log_output_level <string>
+
+
The minimum logging level of messages to output, can be one of [debug, info, warn, error, none] (default `default:info`)
+
+
+
--log_rotate <string>
+
+
The path for the optional rotating log file (default ``)
+
+
+
--log_rotate_max_age <int>
+
+
The maximum age in days of a log file beyond which the file is rotated (0 indicates no limit) (default `30`)
+
+
+
--log_rotate_max_backups <int>
+
+
The maximum number of log file backups to keep before older files are deleted (0 indicates no limit) (default `1000`)
+
-
--v <Level>
-
-v
-
log level for V logs (default `0`)
+
--log_rotate_max_size <int>
+
+
The maximum size in megabytes of a log file beyond which the file is rotated (default `104857600`)
+
+
+
--log_stacktrace_level <string>
+
+
The minimum logging level at which stack traces are captured, can be one of [debug, info, warn, error, none] (default `default:none`)
-
--vmodule <moduleSpec>
+
--log_target <stringArray>
-
comma-separated list of pattern=N settings for file-filtered logging (default ``)
+
The set of paths where to output the log. This can be any path as well as the special values stdout and stderr (default `[stdout]`)
+
+
+
--namespace <string>
+
-n
+
Config namespace (default ``)
+
+
+
--platform <string>
+
-p
+
Istio host platform (default `kube`)
+
+
+
--subject-properties <stringArray>
+
-s
+
[Subject] Additional data about the subject. Specified as name1=value1,name2=value2,... (default `[]`)
+
+
+
--user <string>
+
-u
+
[Subject] User name/ID that the subject represents. (default ``)
+
Examples
+
# Query if user test is allowed to GET /v1/health of service rating.
+istioctl experimental rbac can -u test GET rating /v1/health
+
+# Query if service product-page is allowed to POST to /data of service rating with label version=dev.
+istioctl experimental rbac can -s service=product-page POST rating /data -a version=dev
+
istioctl gen-deploy
istioctl gen-deploy produces deployment files to run the minimum Istio control for the set of features requested by the --feature flag. If no features are provided, we create deployments for the default control plane: Pilot, Mixer, CA, and Ingress Proxies, with mTLS enabled.
istioctl gen-deploy [flags]
@@ -544,7 +927,7 @@
istioctl gen-deploy
--hyperkube-tag <Hyperkube>
-
The tag to use to pull the Hyperkube container (default `0.4.0`)
+
The tag to use to pull the Hyperkube container (default `v1.7.6_coreos.0`)
--ingress-node-port <uint16>
@@ -567,19 +950,14 @@
istioctl gen-deploy
Whether to format output as JSON or in plain console-friendly format
-
--log_backtrace_at <traceLocation>
+
--log_caller <string>
-
when logging hits line file:N, emit a stack trace (default `:0`)
-
-
-
--log_callers
-
-
Include caller information, useful for debugging
+
Comma-separated list of scopes for which to include called information, scopes can be any of [default] (default ``)
--log_output_level <string>
-
The minimum logging level of messages to output, can be one of "debug", "info", "warn", "error", or "none" (default `info`)
+
The minimum logging level of messages to output, can be one of [debug, info, warn, error, none] (default `default:info`)
--log_rotate <string>
@@ -604,7 +982,7 @@
istioctl gen-deploy
--log_stacktrace_level <string>
-
The minimum logging level at which stack traces are captured, can be one of "debug", "info", "warn", "error", or "none" (default `none`)
+
The minimum logging level at which stack traces are captured, can be one of [debug, info, warn, error, none] (default `default:none`)
--log_target <stringArray>
@@ -627,20 +1005,10 @@
istioctl gen-deploy
Istio host platform (default `kube`)
-
--v <Level>
-
-v
-
log level for V logs (default `0`)
-
-
--values <string>
Path to the Helm values.yaml file used to render YAML deployments locally when --out=yaml. Flag values are ignored in favor of using the file directly. (default ``)
-
-
--vmodule <moduleSpec>
-
-
comma-separated list of pattern=N settings for file-filtered logging (default ``)
-
Examples
@@ -673,19 +1041,14 @@
istioctl get
Whether to format output as JSON or in plain console-friendly format
-
--log_backtrace_at <traceLocation>
-
-
when logging hits line file:N, emit a stack trace (default `:0`)
-
-
-
--log_callers
+
--log_caller <string>
-
Include caller information, useful for debugging
+
Comma-separated list of scopes for which to include called information, scopes can be any of [default] (default ``)
--log_output_level <string>
-
The minimum logging level of messages to output, can be one of "debug", "info", "warn", "error", or "none" (default `info`)
+
The minimum logging level of messages to output, can be one of [debug, info, warn, error, none] (default `default:info`)
--log_rotate <string>
@@ -710,7 +1073,7 @@
istioctl get
--log_stacktrace_level <string>
-
The minimum logging level at which stack traces are captured, can be one of "debug", "info", "warn", "error", or "none" (default `none`)
+
The minimum logging level at which stack traces are captured, can be one of [debug, info, warn, error, none] (default `default:none`)
--log_target <stringArray>
@@ -732,16 +1095,6 @@
istioctl get
-p
Istio host platform (default `kube`)
-
-
--v <Level>
-
-v
-
log level for V logs (default `0`)
-
-
-
--vmodule <moduleSpec>
-
-
comma-separated list of pattern=N settings for file-filtered logging (default ``)
-
Examples
@@ -800,6 +1153,16 @@
istioctl kube-inject
Emit sidecar template based on parameterized flags
+
--excludeIPRanges <string>
+
+
Comma separated list of IP ranges in CIDR form. If set, outbound traffic will not be redirected for these IP ranges. Exclusions are only applied if configured to redirect all outbound traffic. By default, no IP ranges are excluded. (default ``)
+
+
+
--excludeInboundPorts <string>
+
+
Comma separated list of inbound ports. If set, inbound traffic will not be redirected for those ports. Exclusions are only applied if configured to redirect all inbound traffic. By default, no ports are excluded. (default ``)
+
+
--filename <string>
-f
Input Kubernetes resource filename (default ``)
@@ -817,7 +1180,12 @@
istioctl kube-inject
--includeIPRanges <string>
-
Comma separated list of IP ranges in CIDR form. If set, only redirect outbound traffic to Envoy for IP ranges. Otherwise all outbound traffic is redirected (default ``)
+
Comma separated list of IP ranges in CIDR form. If set, only redirect outbound traffic to Envoy for these IP ranges. All outbound traffic can be redirected with the wildcard character '*'. Defaults to '*'. (default `*`)
+
+
+
--includeInboundPorts <string>
+
+
Comma separated list of inbound ports for which traffic is to be redirected to Envoy. All ports can be redirected with the wildcard character '*'. Defaults to '*'. (default `*`)
--injectConfigFile <string>
@@ -845,19 +1213,14 @@
istioctl kube-inject
Whether to format output as JSON or in plain console-friendly format
-
--log_backtrace_at <traceLocation>
-
-
when logging hits line file:N, emit a stack trace (default `:0`)
-
-
-
--log_callers
+
--log_caller <string>
-
Include caller information, useful for debugging
+
Comma-separated list of scopes for which to include called information, scopes can be any of [default] (default ``)
--log_output_level <string>
-
The minimum logging level of messages to output, can be one of "debug", "info", "warn", "error", or "none" (default `info`)
+
The minimum logging level of messages to output, can be one of [debug, info, warn, error, none] (default `default:info`)
--log_rotate <string>
@@ -882,7 +1245,7 @@
istioctl kube-inject
--log_stacktrace_level <string>
-
The minimum logging level at which stack traces are captured, can be one of "debug", "info", "warn", "error", or "none" (default `none`)
+
The minimum logging level at which stack traces are captured, can be one of [debug, info, warn, error, none] (default `default:none`)
--log_target <stringArray>
@@ -930,20 +1293,10 @@
istioctl kube-inject
Docker tag (default `unknown`)
-
--v <Level>
-
-v
-
log level for V logs (default `0`)
-
-
--verbosity <int>
Runtime verbosity (default `2`)
-
-
--vmodule <moduleSpec>
-
-
comma-separated list of pattern=N settings for file-filtered logging (default ``)
-
Examples
@@ -965,10 +1318,15 @@
Examples
istioctl proxy-config
-Retrieves the static/bootstrap proxy configuration for the specified pod when running in Kubernetes.
-Support for other environments to follow.
-
-
istioctl proxy-config <pod-name> [flags]
+Retrieves proxy configuration for the specified pod from the endpoint proxy or Pilot when running in Kubernetes.
+It is also able to retrieve the state of the entire mesh by using mesh instead of <pod-name>. This is only available when querying Pilot.
+
Whether to format output as JSON or in plain console-friendly format
-
--log_backtrace_at <traceLocation>
-
-
when logging hits line file:N, emit a stack trace (default `:0`)
-
-
-
--log_callers
+
--log_caller <string>
-
Include caller information, useful for debugging
+
Comma-separated list of scopes for which to include called information, scopes can be any of [default] (default ``)
--log_output_level <string>
-
The minimum logging level of messages to output, can be one of "debug", "info", "warn", "error", or "none" (default `info`)
+
The minimum logging level of messages to output, can be one of [debug, info, warn, error, none] (default `default:info`)
--log_rotate <string>
@@ -1030,7 +1383,7 @@
istioctl proxy-config
--log_stacktrace_level <string>
-
The minimum logging level at which stack traces are captured, can be one of "debug", "info", "warn", "error", or "none" (default `none`)
+
The minimum logging level at which stack traces are captured, can be one of [debug, info, warn, error, none] (default `default:none`)
--log_target <stringArray>
@@ -1047,21 +1400,20 @@
istioctl proxy-config
-p
Istio host platform (default `kube`)
-
-
--v <Level>
-
-v
-
log level for V logs (default `0`)
-
-
-
--vmodule <moduleSpec>
-
-
comma-separated list of pattern=N settings for file-filtered logging (default ``)
-
Examples
-
# Retrieve config for productpage-v1-bb8d5cbc7-k7qbm pod
-istioctl proxy-config productpage-v1-bb8d5cbc7-k7qbm
+
# Retrieve all config for productpage-v1-bb8d5cbc7-k7qbm pod from the endpoint proxy
+istioctl proxy-config endpoint productpage-v1-bb8d5cbc7-k7qbm
+
+# Retrieve eds config for productpage-v1-bb8d5cbc7-k7qbm pod from Pilot
+istioctl proxy-config pilot productpage-v1-bb8d5cbc7-k7qbm eds
+
+# Retrieve ads config for the mesh from Pilot
+istioctl proxy-config pilot mesh ads
+
+# Retrieve static config for productpage-v1-bb8d5cbc7-k7qbm pod in the application namespace from the endpoint proxy
+istioctl proxy-config endpoint -n application productpage-v1-bb8d5cbc7-k7qbm static
istioctl register
Registers a service instance (e.g. VM) joining the mesh
@@ -1100,19 +1452,14 @@
istioctl register
Whether to format output as JSON or in plain console-friendly format
-
--log_backtrace_at <traceLocation>
-
-
when logging hits line file:N, emit a stack trace (default `:0`)
-
-
-
--log_callers
+
--log_caller <string>
-
Include caller information, useful for debugging
+
Comma-separated list of scopes for which to include called information, scopes can be any of [default] (default ``)
--log_output_level <string>
-
The minimum logging level of messages to output, can be one of "debug", "info", "warn", "error", or "none" (default `info`)
+
The minimum logging level of messages to output, can be one of [debug, info, warn, error, none] (default `default:info`)
--log_rotate <string>
@@ -1137,7 +1484,7 @@
istioctl register
--log_stacktrace_level <string>
-
The minimum logging level at which stack traces are captured, can be one of "debug", "info", "warn", "error", or "none" (default `none`)
+
The minimum logging level at which stack traces are captured, can be one of [debug, info, warn, error, none] (default `default:none`)
--log_target <stringArray>
@@ -1159,16 +1506,6 @@
istioctl register
-s
Service account to link to the service (default `default`)
-
-
--v <Level>
-
-v
-
log level for V logs (default `0`)
-
-
-
--vmodule <moduleSpec>
-
-
comma-separated list of pattern=N settings for file-filtered logging (default ``)
-
istioctl replace
@@ -1203,19 +1540,14 @@
istioctl replace
Whether to format output as JSON or in plain console-friendly format
-
--log_backtrace_at <traceLocation>
+
--log_caller <string>
-
when logging hits line file:N, emit a stack trace (default `:0`)
-
-
-
--log_callers
-
-
Include caller information, useful for debugging
+
Comma-separated list of scopes for which to include called information, scopes can be any of [default] (default ``)
--log_output_level <string>
-
The minimum logging level of messages to output, can be one of "debug", "info", "warn", "error", or "none" (default `info`)
+
The minimum logging level of messages to output, can be one of [debug, info, warn, error, none] (default `default:info`)
--log_rotate <string>
@@ -1240,7 +1572,7 @@
istioctl replace
--log_stacktrace_level <string>
-
The minimum logging level at which stack traces are captured, can be one of "debug", "info", "warn", "error", or "none" (default `none`)
+
The minimum logging level at which stack traces are captured, can be one of [debug, info, warn, error, none] (default `default:none`)
--log_target <stringArray>
@@ -1257,16 +1589,6 @@
istioctl replace
-p
Istio host platform (default `kube`)
-
-
--v <Level>
-
-v
-
log level for V logs (default `0`)
-
-
-
--vmodule <moduleSpec>
-
-
comma-separated list of pattern=N settings for file-filtered logging (default ``)
-
Examples
@@ -1299,19 +1621,14 @@
istioctl version
Whether to format output as JSON or in plain console-friendly format
-
--log_backtrace_at <traceLocation>
-
-
when logging hits line file:N, emit a stack trace (default `:0`)
-
-
-
--log_callers
+
--log_caller <string>
-
Include caller information, useful for debugging
+
Comma-separated list of scopes for which to include called information, scopes can be any of [default] (default ``)
--log_output_level <string>
-
The minimum logging level of messages to output, can be one of "debug", "info", "warn", "error", or "none" (default `info`)
+
The minimum logging level of messages to output, can be one of [debug, info, warn, error, none] (default `default:info`)
--log_rotate <string>
@@ -1336,7 +1653,7 @@
istioctl version
--log_stacktrace_level <string>
-
The minimum logging level at which stack traces are captured, can be one of "debug", "info", "warn", "error", or "none" (default `none`)
+
The minimum logging level at which stack traces are captured, can be one of [debug, info, warn, error, none] (default `default:none`)
--log_target <stringArray>
@@ -1358,15 +1675,5 @@
istioctl version
-s
Displays a short form of the version information
-
-
--v <Level>
-
-v
-
log level for V logs (default `0`)
-
-
-
--vmodule <moduleSpec>
-
-
comma-separated list of pattern=N settings for file-filtered logging (default ``)
-
diff --git a/_docs/reference/commands/mixc.html b/_docs/reference/commands/mixc.html
index 118e1b7c3a187..428e0c0ecf9de 100644
--- a/_docs/reference/commands/mixc.html
+++ b/_docs/reference/commands/mixc.html
@@ -1,36 +1,12 @@
---
title: mixc
-overview: Utility to trigger direct calls to Mixer's API.
+description: Utility to trigger direct calls to Mixer's API.
layout: pkg-collateral-docs
number_of_entries: 5
---
This command lets you interact with a running instance of
Mixer. Note that you need a pretty good understanding of Mixer's
API in order to use this command.
-
-
-
Flags
-
Shorthand
-
Description
-
-
-
-
--trace_jaeger_url <string>
-
-
URL of Jaeger HTTP collector (example: 'http://jaeger:14268/api/traces?format=jaeger.thrift'). (default ``)
-
-
-
--trace_log_spans
-
-
Whether or not to log trace spans.
-
-
-
--trace_zipkin_url <string>
-
-
URL of Zipkin collector (example: 'http://zipkin:9411/api/v1/spans'). (default ``)
-
-
-
mixc check
The Check method is used to perform precondition checks and quota allocations. Mixer
expects a set of attributes as input, which it uses, along with
@@ -224,20 +200,5 @@
mixc version
-s
Displays a short form of the version information
-
-
--trace_jaeger_url <string>
-
-
URL of Jaeger HTTP collector (example: 'http://jaeger:14268/api/traces?format=jaeger.thrift'). (default ``)
-
-
-
--trace_log_spans
-
-
Whether or not to log trace spans.
-
-
-
--trace_zipkin_url <string>
-
-
URL of Zipkin collector (example: 'http://zipkin:9411/api/v1/spans'). (default ``)
-
diff --git a/_docs/reference/commands/mixs.html b/_docs/reference/commands/mixs.html
index 41e7d6f2e80ea..ccbda2dfe3587 100644
--- a/_docs/reference/commands/mixs.html
+++ b/_docs/reference/commands/mixs.html
@@ -1,245 +1,25 @@
---
title: mixs
-overview: Mixer is Istio's abstraction on top of infrastructure backends.
+description: Mixer is Istio's abstraction on top of infrastructure backends.
layout: pkg-collateral-docs
-number_of_entries: 10
+number_of_entries: 9
---
Mixer is Istio's point of integration with infrastructure backends and is the
nexus for policy evaluation and telemetry reporting.
-
-
-
Flags
-
Shorthand
-
Description
-
-
-
-
--alsologtostderr
-
-
log to standard error as well as files
-
-
-
--log_backtrace_at <traceLocation>
-
-
when logging hits line file:N, emit a stack trace (default `:0`)
-
-
-
--log_dir <string>
-
-
If non-empty, write log files in this directory (default ``)
-
-
-
--logtostderr
-
-
log to standard error instead of files
-
-
-
--stderrthreshold <severity>
-
-
logs at or above this threshold go to stderr (default `2`)
-
-
-
--v <Level>
-
-v
-
log level for V logs (default `0`)
-
-
-
--vmodule <moduleSpec>
-
-
comma-separated list of pattern=N settings for file-filtered logging (default ``)
-
-
-
mixs crd
CRDs (CustomResourceDefinition) available in Mixer
-
-
-
Flags
-
Shorthand
-
Description
-
-
-
-
--alsologtostderr
-
-
log to standard error as well as files
-
-
-
--log_backtrace_at <traceLocation>
-
-
when logging hits line file:N, emit a stack trace (default `:0`)
-
-
-
--log_dir <string>
-
-
If non-empty, write log files in this directory (default ``)
-
-
-
--logtostderr
-
-
log to standard error instead of files
-
-
-
--stderrthreshold <severity>
-
-
logs at or above this threshold go to stderr (default `2`)
-
-
-
--v <Level>
-
-v
-
log level for V logs (default `0`)
-
-
-
--vmodule <moduleSpec>
-
-
comma-separated list of pattern=N settings for file-filtered logging (default ``)
-
-
-
mixs crd adapter
List CRDs for available adapters
mixs crd adapter [flags]
-
-
-
Flags
-
Shorthand
-
Description
-
-
-
-
--alsologtostderr
-
-
log to standard error as well as files
-
-
-
--log_backtrace_at <traceLocation>
-
-
when logging hits line file:N, emit a stack trace (default `:0`)
-
-
-
--log_dir <string>
-
-
If non-empty, write log files in this directory (default ``)
-
-
-
--logtostderr
-
-
log to standard error instead of files
-
-
-
--stderrthreshold <severity>
-
-
logs at or above this threshold go to stderr (default `2`)
-
-
-
--v <Level>
-
-v
-
log level for V logs (default `0`)
-
-
-
--vmodule <moduleSpec>
-
-
comma-separated list of pattern=N settings for file-filtered logging (default ``)
-
-
-
mixs crd all
List all CRDs
mixs crd all [flags]
-
-
-
Flags
-
Shorthand
-
Description
-
-
-
-
--alsologtostderr
-
-
log to standard error as well as files
-
-
-
--log_backtrace_at <traceLocation>
-
-
when logging hits line file:N, emit a stack trace (default `:0`)
-
-
-
--log_dir <string>
-
-
If non-empty, write log files in this directory (default ``)
-
-
-
--logtostderr
-
-
log to standard error instead of files
-
-
-
--stderrthreshold <severity>
-
-
logs at or above this threshold go to stderr (default `2`)
-
-
-
--v <Level>
-
-v
-
log level for V logs (default `0`)
-
-
-
--vmodule <moduleSpec>
-
-
comma-separated list of pattern=N settings for file-filtered logging (default ``)
-
-
-
mixs crd instance
List CRDs for available instance kinds (mesh functions)
mixs crd instance [flags]
-
-
-
Flags
-
Shorthand
-
Description
-
-
-
-
--alsologtostderr
-
-
log to standard error as well as files
-
-
-
--log_backtrace_at <traceLocation>
-
-
when logging hits line file:N, emit a stack trace (default `:0`)
-
-
-
--log_dir <string>
-
-
If non-empty, write log files in this directory (default ``)
-
-
-
--logtostderr
-
-
log to standard error instead of files
-
-
-
--stderrthreshold <severity>
-
-
logs at or above this threshold go to stderr (default `2`)
-
-
-
--v <Level>
-
-v
-
log level for V logs (default `0`)
-
-
-
--vmodule <moduleSpec>
-
-
comma-separated list of pattern=N settings for file-filtered logging (default ``)
-
-
-
mixs probe
Check the liveness or readiness of a locally-running server
mixs probe [flags]
@@ -247,100 +27,53 @@
mixs probe
Flags
-
Shorthand
Description
-
--alsologtostderr
-
-
log to standard error as well as files
-
-
--interval <duration>
-
Duration used for checking the target file's last modified time. (default `0s`)
--log_as_json
-
Whether to format output as JSON or in plain console-friendly format
-
--log_backtrace_at <traceLocation>
-
-
when logging hits line file:N, emit a stack trace (default `:0`)
-
-
-
--log_callers
-
-
Include caller information, useful for debugging
-
-
-
--log_dir <string>
-
-
If non-empty, write log files in this directory (default ``)
+
--log_caller <string>
+
Comma-separated list of scopes for which to include caller information, scopes can be any of [adapters, default, attributes] (default ``)
--log_output_level <string>
-
-
The minimum logging level of messages to output, can be one of "debug", "info", "warn", "error", or "none" (default `info`)
+
Comma-separated minimum per-scope logging level of messages to output, in the form of <scope>:<level>,<scope>:<level>,... where scope can be one of [adapters, default, attributes] and level can be one of [debug, info, warn, error, none] (default `default:info`)
--log_rotate <string>
-
The path for the optional rotating log file (default ``)
--log_rotate_max_age <int>
-
The maximum age in days of a log file beyond which the file is rotated (0 indicates no limit) (default `30`)
--log_rotate_max_backups <int>
-
The maximum number of log file backups to keep before older files are deleted (0 indicates no limit) (default `1000`)
--log_rotate_max_size <int>
-
The maximum size in megabytes of a log file beyond which the file is rotated (default `104857600`)
--log_stacktrace_level <string>
-
-
The minimum logging level at which stack traces are captured, can be one of "debug", "info", "warn", "error", or "none" (default `none`)
+
Comma-separated minimum per-scope logging level at which stack traces are captured, in the form of <scope>:<level>,<scope:level>,... where scope can be one of [adapters, default, attributes] and level can be one of [debug, info, warn, error, none] (default `default:none`)
--log_target <stringArray>
-
The set of paths where to output the log. This can be any path as well as the special values stdout and stderr (default `[stdout]`)
-
--logtostderr
-
-
log to standard error instead of files
-
-
--probe-path <string>
-
Path of the file for checking the availability. (default ``)
-
-
--stderrthreshold <severity>
-
-
logs at or above this threshold go to stderr (default `2`)
-
-
-
--v <Level>
-
-v
-
log level for V logs (default `0`)
-
-
-
--vmodule <moduleSpec>
-
-
comma-separated list of pattern=N settings for file-filtered logging (default ``)
-
mixs server
@@ -360,11 +93,6 @@
mixs server
Max number of goroutines in the adapter worker pool (default `1024`)
-
--alsologtostderr
-
-
log to standard error as well as files
-
-
--apiWorkerPoolSize <int>
Max number of goroutines in the API worker pool (default `1024`)
@@ -380,9 +108,9 @@
mixs server
URL of the config store. Use k8s://path_to_kubeconfig or fs:// for file system. If path_to_kubeconfig is empty, in-cluster kubeconfig is used. (default ``)
-
--expressionEvalCacheSize <int>
+
--ctrlz_port <uint16>
-
Number of entries in the expression cache (default `1024`)
+
The IP port to use for the ControlZ introspection facility (default `9876`)
--livenessProbeInterval <duration>
@@ -400,24 +128,14 @@
mixs server
Whether to format output as JSON or in plain console-friendly format
-
--log_backtrace_at <traceLocation>
-
-
when logging hits line file:N, emit a stack trace (default `:0`)
-
-
-
--log_callers
+
--log_caller <string>
-
Include caller information, useful for debugging
-
-
-
--log_dir <string>
-
-
If non-empty, write log files in this directory (default ``)
+
Comma-separated list of scopes for which to include caller information, scopes can be any of [adapters, default, attributes] (default ``)
--log_output_level <string>
-
The minimum logging level of messages to output, can be one of "debug", "info", "warn", "error", or "none" (default `info`)
+
Comma-separated minimum per-scope logging level of messages to output, in the form of <scope>:<level>,<scope>:<level>,... where scope can be one of [adapters, default, attributes] and level can be one of [debug, info, warn, error, none] (default `default:info`)
--log_rotate <string>
@@ -442,7 +160,7 @@
mixs server
--log_stacktrace_level <string>
-
The minimum logging level at which stack traces are captured, can be one of "debug", "info", "warn", "error", or "none" (default `none`)
+
Comma-separated minimum per-scope logging level at which stack traces are captured, in the form of <scope>:<level>,<scope:level>,... where scope can be one of [adapters, default, attributes] and level can be one of [debug, info, warn, error, none] (default `default:none`)
--log_target <stringArray>
@@ -450,11 +168,6 @@
mixs server
The set of paths where to output the log. This can be any path as well as the special values stdout and stderr (default `[stdout]`)
-
--logtostderr
-
-
log to standard error instead of files
-
-
--maxConcurrentStreams <uint>
Maximum number of outstanding RPCs per connection (default `1024`)
@@ -475,6 +188,11 @@
mixs server
TCP port to use for Mixer's gRPC API (default `9091`)
+
--profile
+
+
Enable profiling via web interface host:port/debug/pprof
+
+
--readinessProbeInterval <duration>
Interval of updating file for the readiness probe. (default `0s`)
@@ -490,11 +208,6 @@
mixs server
If true, each request to Mixer will be executed in a single go routine (useful for debugging)
-
--stderrthreshold <severity>
-
-
logs at or above this threshold go to stderr (default `2`)
-
-
--trace_jaeger_url <string>
URL of Jaeger HTTP collector (example: 'http://jaeger:14268/api/traces?format=jaeger.thrift'). (default ``)
@@ -509,109 +222,6 @@
mixs server
URL of Zipkin collector (example: 'http://zipkin:9411/api/v1/spans'). (default ``)
-
-
--useNewRuntime
-
-
Use the new runtime code for processing requests.
-
-
-
--v <Level>
-
-v
-
log level for V logs (default `0`)
-
-
-
--vmodule <moduleSpec>
-
-
comma-separated list of pattern=N settings for file-filtered logging (default ``)
-
-
-
-
mixs validator
-
Runs an https server for validations. Works as an external admission webhook for k8s
-
mixs validator [flags]
-
-
-
-
Flags
-
Shorthand
-
Description
-
-
-
-
--alsologtostderr
-
-
log to standard error as well as files
-
-
-
--external-admission-webook-name <string>
-
-
the name of the external admission webhook registration. Needs to be a domain with at least three segments separated by dots. (default `mixer-webhook.istio.io`)
-
-
-
--kubeconfig <string>
-
-
Use a Kubernetes configuration file instead of in-cluster configuration (default ``)
-
-
-
--log_backtrace_at <traceLocation>
-
-
when logging hits line file:N, emit a stack trace (default `:0`)
-
-
-
--log_dir <string>
-
-
If non-empty, write log files in this directory (default ``)
-
-
-
--logtostderr
-
-
log to standard error instead of files
-
-
-
--namespace <string>
-
-
the namespace where this webhook is deployed (default `istio-system`)
-
-
-
--port <int>
-
-p
-
the port number of the webhook (default `9099`)
-
-
-
--registration-delay <duration>
-
-
Time to delay webhook registration after starting webhook server (default `5s`)
-
-
-
--secret-name <string>
-
-
The name of k8s secret where the certificates are stored (default ``)
-
-
-
--stderrthreshold <severity>
-
-
logs at or above this threshold go to stderr (default `2`)
-
-
-
--target-namespaces <stringArray>
-
-
the list of namespaces where changes should be validated. Empty means to validate everything. Used for test only. (default `[]`)
-
-
-
--v <Level>
-
-v
-
log level for V logs (default `0`)
-
-
-
--vmodule <moduleSpec>
-
-
comma-separated list of pattern=N settings for file-filtered logging (default ``)
-
-
-
--webhook-name <string>
-
-
the name of the webhook (default `istio-mixer-webhook`)
-
mixs version
@@ -626,44 +236,9 @@
mixs version
-
--alsologtostderr
-
-
log to standard error as well as files
-
-
-
--log_backtrace_at <traceLocation>
-
-
when logging hits line file:N, emit a stack trace (default `:0`)
-
-
-
--log_dir <string>
-
-
If non-empty, write log files in this directory (default ``)
-
-
-
--logtostderr
-
-
log to standard error instead of files
-
-
--short
-s
Displays a short form of the version information
-
-
--stderrthreshold <severity>
-
-
logs at or above this threshold go to stderr (default `2`)
-
-
-
--v <Level>
-
-v
-
log level for V logs (default `0`)
-
-
-
--vmodule <moduleSpec>
-
-
comma-separated list of pattern=N settings for file-filtered logging (default ``)
Time to delay webhook registration after starting webhook server (default `0s`)
+
--log_as_json
+
Whether to format output as JSON or in plain console-friendly format
-
--admission-secret <string>
-
-
Name of k8s secret for pilot webhook certs (default `pilot-webhook`)
+
--log_caller <string>
+
Comma-separated list of scopes for which to include called information, scopes can be any of [default] (default ``)
-
--admission-service <string>
-
-
Service name the admission controller uses during registration (default `istio-pilot`)
+
--log_output_level <string>
+
The minimum logging level of messages to output, can be one of [debug, info, warn, error, none] (default `default:info`)
-
--admission-service-port <int>
-
-
HTTPS port of the admission service. Must be 443 if service has more than one port (default `443`)
+
--log_rotate <string>
+
The path for the optional rotating log file (default ``)
-
--admission-webhook-name <string>
-
-
Webhook name for Pilot admission controller (default `pilot-webhook.istio.io`)
+
--log_rotate_max_age <int>
+
The maximum age in days of a log file beyond which the file is rotated (0 indicates no limit) (default `30`)
+
+
+
--log_rotate_max_backups <int>
+
The maximum number of log file backups to keep before older files are deleted (0 indicates no limit) (default `1000`)
+
+
+
--log_rotate_max_size <int>
+
The maximum size in megabytes of a log file beyond which the file is rotated (default `104857600`)
+
+
+
--log_stacktrace_level <string>
+
The minimum logging level at which stack traces are captured, can be one of [debug, info, warn, error, none] (default `default:none`)
+
+
+
--log_target <stringArray>
+
The set of paths where to output the log. This can be any path as well as the special values stdout and stderr (default `[stdout]`)
+
+
+
pilot-discovery discovery
+
Start Istio proxy discovery service
+
pilot-discovery discovery [flags]
+
+
+
+
Flags
+
Shorthand
+
Description
+
+
--appNamespace <string>
-a
@@ -121,29 +119,19 @@
pilot-discovery discovery
Cloud Foundry config file (default ``)
-
--clusterRegistriesDir <string>
-
-
Directory for a file-based cluster config store (default ``)
-
-
-
--configDir <string>
-
-
Directory to watch for updates to config yaml files. If specified, the files will be used as the source of config, rather than a CRD client. (default ``)
-
-
-
--consulconfig <string>
+
--clusterRegistriesConfigMap <string>
-
Consul Config file for discovery (default ``)
+
ConfigMap map for clusters config store (default ``)
-
--consulserverInterval <duration>
+
--clusterRegistriesNamespace <string>
-
Interval (in seconds) for polling the Consul service registry (default `2s`)
+
Namespace for ConfigMap which stores clusters configs (default `istio-system`)
-
--consulserverURL <string>
+
--configDir <string>
-
URL for the Consul server (default ``)
+
Directory to watch for updates to config yaml files. If specified, the files will be used as the source of config, rather than a CRD client. (default ``)
--discovery_cache
@@ -156,14 +144,9 @@
pilot-discovery discovery
DNS domain suffix (default `cluster.local`)
-
--eurekaserverInterval <duration>
+
--grpcAddr <string>
-
Interval (in seconds) for polling the Eureka service registry (default `2s`)
-
-
-
--eurekaserverURL <string>
-
-
URL for the Eureka server (default ``)
+
Discovery service grpc address (default `:15010`)
--kubeconfig <string>
@@ -176,19 +159,14 @@
pilot-discovery discovery
Whether to format output as JSON or in plain console-friendly format
-
--log_backtrace_at <traceLocation>
-
-
when logging hits line file:N, emit a stack trace (default `:0`)
-
-
-
--log_callers
+
--log_caller <string>
-
Include caller information, useful for debugging
+
Comma-separated list of scopes for which to include called information, scopes can be any of [default] (default ``)
--log_output_level <string>
-
The minimum logging level of messages to output, can be one of "debug", "info", "warn", "error", or "none" (default `info`)
+
The minimum logging level of messages to output, can be one of [debug, info, warn, error, none] (default `default:info`)
--log_rotate <string>
@@ -213,7 +191,7 @@
pilot-discovery discovery
--log_stacktrace_level <string>
-
The minimum logging level at which stack traces are captured, can be one of "debug", "info", "warn", "error", or "none" (default `none`)
+
The minimum logging level at which stack traces are captured, can be one of [debug, info, warn, error, none] (default `default:none`)
--log_target <stringArray>
@@ -256,19 +234,14 @@
pilot-discovery discovery
Controller resync interval (default `1m0s`)
-
--v <Level>
-
-v
-
log level for V logs (default `0`)
-
-
-
--vmodule <moduleSpec>
+
--secureGrpcAddr <string>
-
comma-separated list of pattern=N settings for file-filtered logging (default ``)
+
Discovery service grpc address, with https (default `:15012`)
--webhookEndpoint <string>
-
Webhook API endpoint (supports DNS, IP, and unix domain socket. (default ``)
+
Webhook API endpoint (supports http://sockethost, and unix:///absolute/path/to/socket (default ``)
@@ -289,19 +262,14 @@
pilot-discovery version
Whether to format output as JSON or in plain console-friendly format
-
--log_backtrace_at <traceLocation>
-
-
when logging hits line file:N, emit a stack trace (default `:0`)
-
-
-
--log_callers
+
--log_caller <string>
-
Include caller information, useful for debugging
+
Comma-separated list of scopes for which to include called information, scopes can be any of [default] (default ``)
--log_output_level <string>
-
The minimum logging level of messages to output, can be one of "debug", "info", "warn", "error", or "none" (default `info`)
+
The minimum logging level of messages to output, can be one of [debug, info, warn, error, none] (default `default:info`)
--log_rotate <string>
@@ -326,7 +294,7 @@
pilot-discovery version
--log_stacktrace_level <string>
-
The minimum logging level at which stack traces are captured, can be one of "debug", "info", "warn", "error", or "none" (default `none`)
+
The minimum logging level at which stack traces are captured, can be one of [debug, info, warn, error, none] (default `default:none`)
--log_target <stringArray>
@@ -338,15 +306,5 @@
pilot-discovery version
-s
Displays a short form of the version information
-
-
--v <Level>
-
-v
-
log level for V logs (default `0`)
-
-
-
--vmodule <moduleSpec>
-
-
comma-separated list of pattern=N settings for file-filtered logging (default ``)
The CloudWatch adapter enables Istio to deliver metrics to
+Amazon CloudWatch.
+
+
To push metrics to CloudWatch using this adapter you need create an IAM user
+that has permissions to call cloudwatch APIs. The credentials for the user
+need to be available on the instance the adapter is running on
+(see AWS docs).
+
+
To activate the CloudWatch adapter, operators need to provide configuration for the
+cloudwatch adapter.
+
+
The handler configuration must contain the same metrics as the instance configuration.
+The metrics specified in both instance and handler configurations will be sent to CloudWatch.
The unit of the metric. Must be valid cloudwatch unit value.
+CloudWatch docs
+
+
+
+
+
+
+
Params.MetricDatum.Unit
+
+
+
+
+
Name
+
Description
+
+
+
+
+
None
+
+
+
+
+
Seconds
+
+
+
+
+
Microseconds
+
+
+
+
+
Milliseconds
+
+
+
+
+
Count
+
+
+
+
+
Bytes
+
+
+
+
+
Kilobytes
+
+
+
+
+
Megabytes
+
+
+
+
+
Gigabytes
+
+
+
+
+
Terabytes
+
+
+
+
+
Bits
+
+
+
+
+
Kilobits
+
+
+
+
+
Megabits
+
+
+
+
+
Gigabits
+
+
+
+
+
Terabits
+
+
+
+
+
Percent
+
+
+
+
+
Bytes_Second
+
+
+
+
+
Kilobytes_Second
+
+
+
+
+
Megabytes_Second
+
+
+
+
+
Gigabytes_Second
+
+
+
+
+
Terabytes_Second
+
+
+
+
+
Bits_Second
+
+
+
+
+
Kilobits_Second
+
+
+
+
+
Megabits_Second
+
+
+
+
+
Gigabits_Second
+
+
+
+
+
Terabits_Second
+
+
+
+
+
Count_Second
+
+
+
+
+
+
diff --git a/_docs/reference/config/adapters/datadog.html b/_docs/reference/config/adapters/datadog.html
new file mode 100644
index 0000000000000..ba773ec25600c
--- /dev/null
+++ b/_docs/reference/config/adapters/datadog.html
@@ -0,0 +1,167 @@
+---
+title: Datadog
+description: Adapter to deliver metrics to a dogstatsd agent for delivery to DataDog
+location: https://istio.io/docs/reference/config/adapters/datadog.html
+layout: protoc-gen-docs
+number_of_entries: 3
+---
+
The dogstatsd adapter is designed to deliver Istio metric instances to a
+listening DataDog agent.
+
+
Params
+
+
Configuration parameter for the DataDog adapter. These params control how Mixer telemetry is transformed and sent to a dogstatsd agent.
+
+
The adapter assumes that a dogstatsd agent is running as a sidecar or at some other endpoint that the Mixer can reach.
+Any dimension that is a part of the metric is converted to a tag automatically. The configuration of the DataDog agent/daemon is outside the scope of the adapter.
+
+
+
+
+
Field
+
Type
+
Description
+
+
+
+
+
address
+
string
+
+
Address of the dogstatsd server.
+Default: localhost:8125
+
+
+
+
+
prefix
+
string
+
+
Prefix to prepend to all metrics handled by the adapter. Metric “bar” with prefix “foo.” becomes “foo.bar” in DataDog. In order to make sure the metrics get populated into Datadog properly and avoid any billing issues, it’s important to leave the metric prefix to its default value of ‘istio.’
+Default: “istio.”
+
+
+
+
+
bufferLength
+
int32
+
+
Number of individual metrics to buffer before flushing metrics to the network. When buffered, metrics are flushed every 100ms or when the buffer is filled.
+When buffer is 0, metrics are not buffered.
+Default: 0
+
+
+
+
+
globalTags
+
map<string, string>
+
+
Tags to add to every metric. “global”: “tag” becomes “global:tag” in DataDog
+Default: []
+
+
+
+
+
sampleRate
+
double
+
+
Chance that any particular metric is sampled when emitted; can take the range [0, 1].
+Default: 1
Tags to add to the metric in addition to the dimensions. “tag”: “val” becomes “tag:val” in DataDog
+Default: []
+
+
+
+
+
+
+
Params.MetricInfo.Type
+
+
Describes the type of metric
+
+
+
+
+
Name
+
Description
+
+
+
+
+
UNKNOWN_TYPE
+
+
Default Unknown Type
+
+
+
+
+
COUNTER
+
+
Increments a DataDog counter
+
+
+
+
+
GAUGE
+
+
Sets the new value of a DataDog gauge
+
+
+
+
+
DISTRIBUTION
+
+
DISTRIBUTION is converted to a Timing Histogram for metrics with a time unit and a Histogram for all other units
+
+
+
+
+
+
diff --git a/_docs/reference/config/adapters/denier.html b/_docs/reference/config/adapters/denier.html
index f2fed94d27983..0ce5cb1d9371b 100644
--- a/_docs/reference/config/adapters/denier.html
+++ b/_docs/reference/config/adapters/denier.html
@@ -1,6 +1,6 @@
---
title: Denier
-overview: Adapter that always returns a precondition denial.
+description: Adapter that always returns a precondition denial.
location: https://istio.io/docs/reference/config/adapters/denier.html
layout: protoc-gen-docs
number_of_entries: 2
@@ -10,7 +10,7 @@
Params
-
Cnofiguration format for the Denier adapter.
+
Configuration format for the Denier adapter.
diff --git a/_docs/reference/config/adapters/fluentd.html b/_docs/reference/config/adapters/fluentd.html
index a43929170ef21..cbefeeef51042 100644
--- a/_docs/reference/config/adapters/fluentd.html
+++ b/_docs/reference/config/adapters/fluentd.html
@@ -1,6 +1,6 @@
---
title: Fluentd
-overview: Adapter that delivers logs to a fluentd daemon.
+description: Adapter that delivers logs to a fluentd daemon.
location: https://istio.io/docs/reference/config/adapters/fluentd.html
layout: protoc-gen-docs
number_of_entries: 1
diff --git a/_docs/reference/config/adapters/index.md b/_docs/reference/config/adapters/index.md
index a1f045029f0da..685b15ecee529 100644
--- a/_docs/reference/config/adapters/index.md
+++ b/_docs/reference/config/adapters/index.md
@@ -1,15 +1,13 @@
---
title: Adapters
-overview: Generated documentation for Mixer's adapters.
+description: Documentation for Mixer's adapters.
-order: 40
+weight: 40
-layout: docs
-type: markdown
toc: false
---
{% include section-index.html docs=site.docs %}
-To implement a new adapter for Mixer, please refer to the [Adapter Developer's Guide](https://github.com/istio/istio/blob/master/mixer/doc/adapters.md).
-
+To implement a new adapter for Mixer, please refer to the
+[Adapter Developer's Guide](https://github.com/istio/istio/wiki/Mixer-Adapter-Dev-Guide).
diff --git a/_docs/reference/config/adapters/kubernetesenv.html b/_docs/reference/config/adapters/kubernetesenv.html
index 9103b413e78ee..ebbbfc4088b45 100644
--- a/_docs/reference/config/adapters/kubernetesenv.html
+++ b/_docs/reference/config/adapters/kubernetesenv.html
@@ -1,6 +1,6 @@
---
title: Kubernetes Env
-overview: Adapter that extracts information from a Kubernetes environment.
+description: Adapter that extracts information from a Kubernetes environment.
location: https://istio.io/docs/reference/config/adapters/kubernetesenv.html
layout: protoc-gen-docs
number_of_entries: 1
diff --git a/_docs/reference/config/adapters/list.html b/_docs/reference/config/adapters/list.html
index 0ed11f451e5bf..74983a295a9a6 100644
--- a/_docs/reference/config/adapters/list.html
+++ b/_docs/reference/config/adapters/list.html
@@ -1,6 +1,6 @@
---
title: List
-overview: Adapter that performs whitelist or blacklist checks
+description: Adapter that performs whitelist or blacklist checks
location: https://istio.io/docs/reference/config/adapters/list.html
layout: protoc-gen-docs
number_of_entries: 2
@@ -132,7 +132,7 @@
Params.ListEntryType
REGEX
-
List entries are treated as re2 regexp. See https://github.com/google/re2/wiki/Syntax for syntax.
+
List entries are treated as re2 regexp. See here for the supported syntax.
diff --git a/_docs/reference/config/adapters/memquota.html b/_docs/reference/config/adapters/memquota.html
index e3e4435a2d866..18e9686af3110 100644
--- a/_docs/reference/config/adapters/memquota.html
+++ b/_docs/reference/config/adapters/memquota.html
@@ -1,6 +1,6 @@
---
title: Memory quota
-overview: Adapter for a simple in-memory quota management system.
+description: Adapter for a simple in-memory quota management system.
location: https://istio.io/docs/reference/config/adapters/memquota.html
layout: protoc-gen-docs
number_of_entries: 3
diff --git a/_docs/reference/config/adapters/opa.html b/_docs/reference/config/adapters/opa.html
index 851addeb2bbad..c7d5c15f81962 100644
--- a/_docs/reference/config/adapters/opa.html
+++ b/_docs/reference/config/adapters/opa.html
@@ -1,6 +1,6 @@
---
title: OPA
-overview: Adapter that implements an Open Policy Agent engine
+description: Adapter that implements an Open Policy Agent engine
location: https://istio.io/docs/reference/config/adapters/opa.html
layout: protoc-gen-docs
number_of_entries: 1
diff --git a/_docs/reference/config/adapters/prometheus.html b/_docs/reference/config/adapters/prometheus.html
index 52aabbb7dfb91..b597d4acb10bc 100644
--- a/_docs/reference/config/adapters/prometheus.html
+++ b/_docs/reference/config/adapters/prometheus.html
@@ -1,6 +1,6 @@
---
title: Prometheus
-overview: Adapter that exposes Istio metrics for ingestion by a Prometheus harvester.
+description: Adapter that exposes Istio metrics for ingestion by a Prometheus harvester.
location: https://istio.io/docs/reference/config/adapters/prometheus.html
layout: protoc-gen-docs
number_of_entries: 7
@@ -46,6 +46,21 @@
Params.MetricInfo
+
+
namespace
+
string
+
+
Optional. The namespace is used as a prefix on the metric names.
+An example: for a metric named “requestcount” with a namespace of “istio”,
+the full metric name becomes “istiorequest_count”.
+NOTE: The specificed namespace should be a prometheus-safe name. If not, the adapter
+will attempt to convert the namespace to a prometheus-safe name.
+NOTE: Changing the value for this will potentially impact downstream integrations
+and should be used with caution.
+Default value: “istio”.
+
+
+
name
string
diff --git a/_docs/reference/config/adapters/rbac.html b/_docs/reference/config/adapters/rbac.html
index 0aeffc3d960d5..9f5897903f620 100644
--- a/_docs/reference/config/adapters/rbac.html
+++ b/_docs/reference/config/adapters/rbac.html
@@ -1,6 +1,6 @@
---
title: RBAC
-overview: Adapter that exposes Istio's Role-Based Access Control model.
+description: Adapter that exposes Istio's Role-Based Access Control model.
location: https://istio.io/docs/reference/config/adapters/rbac.html
layout: protoc-gen-docs
number_of_entries: 1
diff --git a/_docs/reference/config/adapters/redisquota.html b/_docs/reference/config/adapters/redisquota.html
index e8b274e935fe8..d45ea817863ea 100644
--- a/_docs/reference/config/adapters/redisquota.html
+++ b/_docs/reference/config/adapters/redisquota.html
@@ -1,6 +1,6 @@
---
title: Redis Quota
-overview: Adapter for a Redis-based quota management system.
+description: Adapter for a Redis-based quota management system.
location: https://istio.io/docs/reference/config/adapters/redisquota.html
layout: protoc-gen-docs
number_of_entries: 4
diff --git a/_docs/reference/config/adapters/servicecontrol.html b/_docs/reference/config/adapters/servicecontrol.html
index fe904191eeeb0..37d979423c9a7 100644
--- a/_docs/reference/config/adapters/servicecontrol.html
+++ b/_docs/reference/config/adapters/servicecontrol.html
@@ -1,6 +1,6 @@
---
title: Service Control
-overview: Adapter that delivers logs and metrics to Google Service Control
+description: Adapter that delivers logs and metrics to Google Service Control
location: https://istio.io/docs/reference/config/adapters/servicecontrol.html
layout: protoc-gen-docs
number_of_entries: 4
diff --git a/_docs/reference/config/adapters/solarwinds.html b/_docs/reference/config/adapters/solarwinds.html
index d68af8492a95e..9c5d2d6f575ef 100644
--- a/_docs/reference/config/adapters/solarwinds.html
+++ b/_docs/reference/config/adapters/solarwinds.html
@@ -1,12 +1,12 @@
---
title: SolarWinds
-overview: Adapter to deliver logs and metrics to Papertrail and AppOptics backends
+description: Adapter to deliver logs and metrics to Papertrail and AppOptics backends
location: https://istio.io/docs/reference/config/adapters/solarwinds.html
layout: protoc-gen-docs
number_of_entries: 3
---
The solarwinds adapter enables Istio to deliver log and metric data to the
-Papertrail logging backendu and the
+Papertrail logging backend and the
AppOptics monitoring backend.
Params
diff --git a/_docs/reference/config/adapters/stackdriver.html b/_docs/reference/config/adapters/stackdriver.html
index 427a1dbd5a333..e3abd4b6e0e41 100644
--- a/_docs/reference/config/adapters/stackdriver.html
+++ b/_docs/reference/config/adapters/stackdriver.html
@@ -1,9 +1,9 @@
---
title: Stackdriver
-overview: Adapter to deliver logs and metrics to Stackdriver
+description: Adapter to deliver logs and metrics to Stackdriver
location: https://istio.io/docs/reference/config/adapters/stackdriver.html
layout: protoc-gen-docs
-number_of_entries: 10
+number_of_entries: 11
---
The stackdriver adapter enables Istio to deliver log and metric data to the
Stackdriver logging and monitoring backend.
@@ -133,6 +133,14 @@
Params.LogInfo
If an HttpRequestMapping is provided, a HttpRequest object will be filled out for this log entry using the
variables named in the mapping to populate the fields of the request struct from the instance’s variables.
If SinkInfo is provided, Stackriver logs would be exported to that sink.
+
@@ -198,6 +206,53 @@
Params.LogInfo.HttpRequestMapping
template variable name to map into HTTPRequest.RemoteIP
+
+
+
+
+
+
Params.LogInfo.SinkInfo
+
+
Contains information about sink to export Stackdriver logs to.
+See https://godoc.org/cloud.google.com/go/logging/logadmin#Sink.
+Ex: If you want to export it to a GCS bucket, id would be a unique idetifier you want for the sink,
+destination would be the storage be name of GCS Storage bucket and filter would be user defined condition for
+filtering logs. See below for a sample config:
+ id: ‘info-errors-to-gcs’
+ destination: ‘storage.googleapis.com/’
+ filter: ‘severity >= Default’
+
+
+
+
+
Field
+
Type
+
Description
+
+
+
+
+
id
+
string
+
+
Client assigned sink identifier.
+
+
+
+
+
destination
+
string
+
+
Export Destination.
+
+
+
+
+
filter
+
string
+
+
Filter that specifies any filtering to be done on logs.
+
@@ -238,6 +293,17 @@
Params.MetricInfo
This field must be provided for metrics declared to be of type DISTRIBUTION.
This field will be ignored for non-distribution metric kinds.
+
+
+
+
metricType
+
string
+
+
Stackdriver metric type name, e.g.
+istio.io/service/server/request_count. If this is not provided, a
+concantenation of custom metric prefix (custom.googleapis.com/) and
+Istio metric name will be used.
+
diff --git a/_docs/reference/config/adapters/statsd.html b/_docs/reference/config/adapters/statsd.html
index fc1754acc971e..530f9807ffb08 100644
--- a/_docs/reference/config/adapters/statsd.html
+++ b/_docs/reference/config/adapters/statsd.html
@@ -1,6 +1,6 @@
---
title: StatsD
-overview: Adapter to deliver metrics to a StatsD backend
+description: Adapter to deliver metrics to a StatsD backend
location: https://istio.io/docs/reference/config/adapters/statsd.html
layout: protoc-gen-docs
number_of_entries: 3
diff --git a/_docs/reference/config/adapters/stdio.html b/_docs/reference/config/adapters/stdio.html
index 9fff59a1040bc..100abeb3ef99c 100644
--- a/_docs/reference/config/adapters/stdio.html
+++ b/_docs/reference/config/adapters/stdio.html
@@ -1,6 +1,6 @@
---
title: Stdio
-overview: Adapter for outputting logs and metrics locally.
+description: Adapter for outputting logs and metrics locally.
location: https://istio.io/docs/reference/config/adapters/stdio.html
layout: protoc-gen-docs
number_of_entries: 3
diff --git a/_docs/reference/config/index.md b/_docs/reference/config/index.md
index a31ddc4d902cb..41e416737fc36 100644
--- a/_docs/reference/config/index.md
+++ b/_docs/reference/config/index.md
@@ -1,11 +1,9 @@
---
title: Configuration
-overview: Detailed information on configuration options.
+description: Detailed information on configuration options.
-order: 20
+weight: 20
-layout: docs
-type: markdown
toc: false
---
diff --git a/_docs/reference/config/istio.mesh.v1alpha1.html b/_docs/reference/config/istio.mesh.v1alpha1.html
index 62d75e831368d..f9310906e09fd 100644
--- a/_docs/reference/config/istio.mesh.v1alpha1.html
+++ b/_docs/reference/config/istio.mesh.v1alpha1.html
@@ -1,10 +1,10 @@
---
title: Service Mesh
-overview: Configuration affecting the service mesh as a whole
+description: Configuration affecting the service mesh as a whole
location: https://istio.io/docs/reference/config/istio.mesh.v1alpha1.html
layout: protoc-gen-docs
redirect_from: /docs/reference/config/service-mesh.html
-number_of_entries: 6
+number_of_entries: 8
---
AuthenticationPolicy
@@ -51,6 +51,13 @@
MeshConfig
MeshConfig defines mesh-wide variables shared by all Envoy instances in the
Istio service mesh.
+
NOTE: This configuration type should be used for the low-level global
+configuration, such as component addresses and port numbers. It should not
+be used for the features of the mesh that can be scoped by service or by
+namespace. Some of the fields in the mesh config are going to be deprecated
+and replaced with several individual configuration types (for example,
+tracing configuration).
+
@@ -64,7 +71,14 @@
MeshConfig
mixerCheckServer
string
-
Deprecated, use mixer_check instead.
+
Address of the server that will be used by the proxies for policy
+check calls. By using different names for mixerCheckServer and
+mixerReportServer, it is possible to have one set of mixer servers handle
+policy check calls while another set of mixer servers handle telemetry
+calls.
+
+
NOTE: Omitting mixerCheckServer while specifying mixerReportServer is
+equivalent to setting disablePolicyChecks to true.
@@ -72,7 +86,8 @@
MeshConfig
mixerReportServer
string
-
Deprecated, use mixer_report instead.
+
Address of the server that will be used by the proxies for policy report
+calls.
Address of the server that will be used by the proxies for policy
-check calls. By using different names for mixerCheck and mixerReport, it
-is possible to have one set of mixer servers handle policy check calls,
-while another set of mixer servers handle telemetry calls.
-
-
NOTE: Omitting mixerCheck while specifying mixerReport is
-equivalent to setting disablePolicyChecks to true.
Address of the server that will be used by the proxies for policy report
-calls.
+
Set the default behavior of the sidecar for handling outbound traffic from the application.
+While the default mode should work out of the box, if your application uses one or more external services that
+are not known apriori, setting the policy to ALLOWANY will cause the sidecars to route traffic to the any
+requested destination.
+Users are strongly encouraged to use ServiceEntries to explicitly declare any external dependencies,
+instead of using allowany.
@@ -298,6 +303,61 @@
MeshConfig.IngressControllerMode
a secondary ingress controller (e.g., in addition to a
cloud-provided ingress controller).
+
+
+
+
The mode used to redirect inbound traffic to Envoy.
-
ServerAddress
+
ProxyConfig.InboundInterceptionMode
-
ServerAddress specifies the address of Istio components like mixer, pilot, etc.
-At least one of the field needs to be specified.
+
The mode used to redirect inbound traffic to Envoy.
+This setting has no effect on outbound traffic: iptables REDIRECT is always used for
+outbound connections.
-
+
-
Field
-
Type
+
Name
Description
-
-
mutualTls
-
string
+
+
REDIRECT
-
The address for mTLS server, e.g., (istio-pilot:15003)
+
The REDIRECT mode uses iptables REDIRECT to NAT and redirect to Envoy. This mode loses
+source IP addresses during redirection.
-
-
plainText
-
string
+
+
TPROXY
-
The address for plain text server, e.g., (istio-pilot:15005)
+
The TPROXY mode uses iptables TPROXY to redirect to Envoy. This mode preserves both the
+source and destination IP addresses and ports, so that they can be used for advanced
+filtering and manipulation. This mode also configures the sidecar to run with the
+CAPNETADMIN capability, which is required to use TPROXY.
diff --git a/_docs/reference/config/istio.mixer.v1.config.client.html b/_docs/reference/config/istio.mixer.v1.config.client.html
deleted file mode 100644
index 5cc3f20e37fc4..0000000000000
--- a/_docs/reference/config/istio.mixer.v1.config.client.html
+++ /dev/null
@@ -1,1281 +0,0 @@
----
-title: Mixer Client
-overview: Configuration state for the Mixer client library
-location: https://istio.io/docs/reference/config/istio.mixer.v1.config.client.html
-layout: protoc-gen-docs
-number_of_entries: 24
----
-
APIKey
-
-
APIKey defines the explicit configuration for generating the
-request.api_key attribute from HTTP requests.
-
-
See https://swagger.io/docs/specification/authentication/api-keys
-for a general overview of API keys as defined by OpenAPI.
-
-
-
-
-
Field
-
Type
-
Description
-
-
-
-
-
query
-
string (oneof)
-
-
API Key is sent as a query parameter. query represents the
-query string parameter name.
-
-
For example, query=api_key should be used with the
-following request:
-
-
GET /something?api_key=abcdef12345
-
-
-
-
-
-
header
-
string (oneof)
-
-
API key is sent in a request header. header represents the
-header name.
-
-
For example, header=X-API-KEY should be used with the
-following request:
If the request includes a JWT it must match one of the JWT listed
-here matched by the issuer. If validation is successfull the
-follow attributes are included in requests to the mixer:
-
-
request.auth.principal - The string of the issuer (`iss`) and
-subject (`sub`) claims within a JWT concatenated with “/”
-with a percent-encoded subject value
-
-request.auth.audiences - This should reflect the audience
-(`aud`) claim within matched JWT.
-
-request.auth.presenter - The authorized presenter of the
-credential. This value should reflect the optional Authorized
-Presenter (`azp`) claim within a JWT
-
-
-
If no match is found the request is rejected with HTTP status
-code 401.
-
-
JWT validation is skipped if the user’s traffic request does not
-include a JWT.
-
-
-
-
-
-
-
EndUserAuthenticationPolicySpecBinding
-
-
EndUserAuthenticationPolicySpecBinding defines the binding between
-EndUserAuthenticationPolicySpecs and one or more IstioService.
REQUIRED. One or more EndUserAuthenticationPolicySpecReference
-that should be mapped to the specified service(s).
-
-
-
-
-
-
-
EndUserAuthenticationPolicySpecReference
-
-
EndUserAuthenticationPolicySpecReference identifies a
-EndUserAuthenticationPolicySpec that is bound to a set of services.
-
-
-
-
-
Field
-
Type
-
Description
-
-
-
-
-
name
-
string
-
-
REQUIRED. The short name of the
-EndUserAuthenticationPolicySpec. This is the resource name
-defined by the metadata name field.
-
-
-
-
-
namespace
-
string
-
-
Optional namespace of the
-EndUserAuthenticationPolicySpec. Defaults to the value of the
-metadata namespace field.
-
-
-
-
-
-
-
HTTPAPISpec
-
-
HTTPAPISpec defines the canonical configuration for generating
-API-related attributes from HTTP requests based on the method and
-uri templated path matches. It is sufficient for defining the API
-surface of a service for the purposes of API attribute
-generation. It is not intended to represent auth, quota,
-documentation, or other information commonly found in other API
-specifications, e.g. OpenAPI.
-
-
Existing standards that define operations (or methods) in terms of
-HTTP methods and paths can be normalized to this format for use in
-Istio. For example, a simple petstore API described by OpenAPIv2
-here
-can be represented with the following HTTPAPISpec.
List of attributes that are generated when any of the HTTP
-patterns match. This list typically includes the “api.service”
-and “api.version” attributes.
List of APIKey that describes how to extract an API-KEY from an
-HTTP request. The first API-Key match found in the list is used,
-i.e. ‘OR’ semantics.
-
-
The following default policies are used to generate the
-request.api_key attribute if no explicit APIKey is defined.
-
-
`query: key, `query: api_key`, and then `header: x-api-key`
-
-
-
-
-
-
-
-
HTTPAPISpecBinding
-
-
HTTPAPISpecBinding defines the binding between HTTPAPISpecs and one or more
-IstioService. For example, the following establishes a binding
-between the HTTPAPISpec petstore and service foo in namespace bar.
REQUIRED. One or more HTTPAPISpec references that should be mapped to
-the specified service(s). The aggregate collection of match
-conditions defined in the HTTPAPISpecs should not overlap.
-
-
-
-
-
-
-
HTTPAPISpecPattern
-
-
HTTPAPISpecPattern defines a single pattern to match against
-incoming HTTP requests. The per-pattern list of attributes is
-generated if both the httpmethod and uritemplate match. In
-addition, the top-level list of attributes in the HTTPAPISpec is also
-generated.
List of attributes that are generated if the HTTP request matches
-the specified httpmethod and uritemplate. This typically
-includes the “api.operation” attribute.
-
-
-
-
-
httpMethod
-
string
-
-
HTTP request method to match against as defined by
-rfc7231. For
-example: GET, HEAD, POST, PUT, DELETE.
-
-
-
-
-
uriTemplate
-
string (oneof)
-
-
URI template to match against as defined by
-rfc6570. For example, the
-following are valid URI templates:
ecmascript style regex-based match as defined by
-EDCA-262. For
-example,
-
-
"^/pets/(.*?)?"
-
-
-
-
-
-
-
-
HTTPAPISpecReference
-
-
HTTPAPISpecReference defines a reference to an HTTPAPISpec. This is
-typically used for establishing bindings between an HTTPAPISpec and an
-IstioService. For example, the following defines an
-HTTPAPISpecReference for service foo in namespace bar.
-
-
- name: foo
- namespace: bar
-
-
-
-
-
-
Field
-
Type
-
Description
-
-
-
-
-
name
-
string
-
-
REQUIRED. The short name of the HTTPAPISpec. This is the resource
-name defined by the metadata name field.
-
-
-
-
-
namespace
-
string
-
-
Optional namespace of the HTTPAPISpec. Defaults to the encompassing
-HTTPAPISpecBinding’s metadata namespace field.
Map of control configuration indexed by destination.service. This
-is used to support per-service configuration for cases where a
-mixerclient serves multiple services.
-
-
-
-
-
defaultDestinationService
-
string
-
-
Default destination service name if none was specified in the
-client request.
Default attributes to forward to upstream. This typically
-includes the “source.ip” and “source.uid” attributes.
-
-
-
-
-
-
-
IstioService
-
-
IstioService identifies a service and optionally service version.
-The FQDN of the service is composed from the name, namespace, and implementation-specific domain suffix
-(e.g. on Kubernetes, “reviews” + “default” + “svc.cluster.local” -> “reviews.default.svc.cluster.local”).
-
-
-
-
-
Field
-
Type
-
Description
-
-
-
-
-
name
-
string
-
-
The short name of the service such as “foo”.
-
-
-
-
-
namespace
-
string
-
-
Optional namespace of the service. Defaults to value of metadata namespace field.
-
-
-
-
-
domain
-
string
-
-
Domain suffix used to construct the service FQDN in implementations that support such specification.
-
-
-
-
-
service
-
string
-
-
The service FQDN.
-
-
-
-
-
labels
-
map<string,string>
-
-
Optional one or more labels that uniquely identify the service version.
-
-
Note: When used for a RouteRule destination, labels MUST be empty.
-
-
-
-
-
-
-
JWT
-
-
JSON Web Token (JWT) token format for authentication as defined by
-https://tools.ietf.org/html/rfc7519. See OAuth
-2.0 and OIDC
-1.0 for how this is used in the whole
-authentication flow.
URL of the provider’s public key set to validate signature of the
-JWT. See OpenID Discovery.
-
-
Optional if the key set document can either (a) be retrieved from
-OpenID Discovery
-of the issuer or (b) inferred from the email domain of the issuer
-(e.g. a Google service account).
If true, forward the entire base64 encoded JWT in the HTTP request.
-If false, remove the JWT from the HTTP request and do not forward to the application.
Zero or more locations to search for JWT in an HTTP request.
-
-
-
-
-
jwksUriEnvoyCluster
-
string
-
-
This field is specific for Envoy proxy implementation.
-It is the cluster name in the Envoy config for the jwks_uri.
-
-
-
-
-
-
-
JWT.Location
-
-
Defines where to extract the JWT from an HTTP request.
-
-
If no explicit location is specified the following default
-locations are tried in order:
-
-
1) The Authorization header using the Bearer schema,
- e.g. Authorization: Bearer <token>. (see
- https://tools.ietf.org/html/rfc6750#section-2.1)
-
-2) `access_token` query parameter (see
-https://tools.ietf.org/html/rfc6750#section-2.3)
-
-
-
-
-
-
Field
-
Type
-
Description
-
-
-
-
-
header
-
string (oneof)
-
-
JWT is sent in a request header. header represents the
-header name.
-
-
For example, if header=x-goog-iap-jwt-assertion, the header
-format will be x-goog-iap-jwt-assertion: .
-
-
-
-
-
query
-
string (oneof)
-
-
JWT is sent in a query parameter. query represents the
-query parameter name.
-
-
For example, query=jwt_token.
-
-
-
-
-
-
-
Quota
-
-
Specifies a quota to use with quota name and amount.
-
-
-
-
-
Field
-
Type
-
Description
-
-
-
-
-
quota
-
string
-
-
The quota name to charge
-
-
-
-
-
charge
-
int64
-
-
The quota amount to charge
-
-
-
-
-
-
-
QuotaRule
-
-
Specifies a rule with list of matches and list of quotas.
-If any clause matched, the list of quotas will be used.
REQUIRED. One or more QuotaSpec references that should be mapped to
-the specified service(s). The aggregate collection of match
-conditions defined in the QuotaSpecs should not overlap.
-
-
-
-
-
-
-
QuotaSpecBinding.QuotaSpecReference
-
-
QuotaSpecReference uniquely identifies the QuotaSpec used in the
-Binding.
-
-
-
-
-
Field
-
Type
-
Description
-
-
-
-
-
name
-
string
-
-
REQUIRED. The short name of the QuotaSpec. This is the resource
-name defined by the metadata name field.
-
-
-
-
-
namespace
-
string
-
-
Optional namespace of the QuotaSpec. Defaults to the value of the
-metadata namespace field.
Specify report interval to send periodical reports for long TCP
-connections. If not specified, the interval is 10 seconds. This interval
-should not be less than 1 second, otherwise it will be reset to 1 second.
-
-
-
-
-
-
-
TransportConfig
-
-
Defines the transport config on how to call Mixer.
Specify refresh interval to write mixer client statistics to Envoy share
-memory. If not specified, the interval is 10 seconds.
-
-
-
-
-
checkCluster
-
string
-
-
Name of the cluster that will forward check calls to a pool of mixer
-servers. Defaults to “mixer_server”. By using different names for
-checkCluster and reportCluster, it is possible to have one set of
-mixer servers handle check calls, while another set of mixer servers
-handle report calls.
-
-
NOTE: Any value other than the default “mixer_server” will require the
-Istio Grafana dashboards to be reconfigured to use the new name.
-
-
-
-
-
reportCluster
-
string
-
-
Name of the cluster that will forward report calls to a pool of mixer
-servers. Defaults to “mixer_server”. By using different names for
-checkCluster and reportCluster, it is possible to have one set of
-mixer servers handle check calls, while another set of mixer servers
-handle report calls.
-
-
NOTE: Any value other than the default “mixer_server” will require the
-Istio Grafana dashboards to be reconfigured to use the new name.
-
-
-
-
-
-
-
TransportConfig.NetworkFailPolicy
-
-
NetworkFailPolicy defines behavior when network connection
-failure occurs.
-
-
-
-
-
Name
-
Description
-
-
-
-
-
FAIL_OPEN
-
-
If network fails, request is passed to the backend.
-
-
-
-
-
FAIL_CLOSE
-
-
If network fails, request is rejected.
-
-
-
-
-
-
-
istio.mixer.v1.Attributes
-
-
Attributes represents a set of typed name/value pairs. Many of Mixer’s
-API either consume and/or return attributes.
-
-
Istio uses attributes to control the runtime behavior of services running in the service mesh.
-Attributes are named and typed pieces of metadata describing ingress and egress traffic and the
-environment this traffic occurs in. An Istio attribute carries a specific piece
-of information such as the error code of an API request, the latency of an API request, or the
-original IP address of a TCP connection. For example:
A given Istio deployment has a fixed vocabulary of attributes that it understands.
-The specific vocabulary is determined by the set of attribute producers being used
-in the deployment. The primary attribute producer in Istio is Envoy, although
-specialized Mixer adapters and services can also generate attributes.
-
-
The common baseline set of attributes available in most Istio deployments is defined
-here.
-
-
Attributes are strongly typed. The supported attribute types are defined by
-ValueType.
-Each type of value is encoded into one of the so-called transport types present
-in this message.
-
-
Defines a map of attributes in uncompressed format.
-Following places may use this message:
-1) Configure Istio/Proxy with static per-proxy attributes, such as source.uid.
-2) Service IDL definition to extract api attributes for active requests.
-3) Forward attributes from client proxy to server proxy for HTTP requests.
Configuration affecting traffic routing. Here are a few terms useful to define
+in the context of traffic routing.
+
+
Service a unit of application behavior bound to a unique name in a
+service registry. Services consist of multiple network endpoints
+implemented by workload instances running on pods, containers, VMs etc.
+
+
Service versions (a.k.a. subsets) - In a continuous deployment
+scenario, for a given service, there can be distinct subsets of
+instances running different variants of the application binary. These
+variants are not necessarily different API versions. They could be
+iterative changes to the same service, deployed in different
+environments (prod, staging, dev, etc.). Common scenarios where this
+occurs include A/B testing, canary rollouts, etc. The choice of a
+particular version can be decided based on various criterion (headers,
+url, etc.) and/or by weights assigned to each version. Each service has
+a default version consisting of all its instances.
+
+
Source - A downstream client calling a service.
+
+
Host - The address used by a client when attempting to connect to a
+service.
+
+
Access model - Applications address only the destination service
+(Host) without knowledge of individual service versions (subsets). The
+actual choice of the version is determined by the proxy/sidecar, enabling the
+application code to decouple itself from the evolution of dependent
+services.
+
+
ConnectionPoolSettings
+
+
Connection pool settings for an upstream host. The settings apply to
+each individual host in the upstream service. See Envoy’s circuit
+breaker
+for more details. Connection pool settings can be applied at the TCP
+level as well as at HTTP level.
+
+
For example, the following rule sets a limit of 100 connections to redis
+service called myredissrv with a connect timeout of 30ms
Describes the Cross-Origin Resource Sharing (CORS) policy, for a given
+service. Refer to
+https://developer.mozilla.org/en-US/docs/Web/HTTP/AccesscontrolCORS
+for further details about cross origin resource sharing. For example,
+the following rule restricts cross origin requests to those originating
+from example.com domain using HTTP POST/GET, and sets the
+Access-Control-Allow-Credentials header to false. In addition, it only
+exposes X-Foo-bar header and sets an expiry period of 1 day.
The list of origins that are allowed to perform CORS requests. The
+content will be serialized into the Access-Control-Allow-Origin
+header. Wildcard * will allow all origins.
+
+
+
+
+
allowMethods
+
string[]
+
+
List of HTTP methods allowed to access the resource. The content will
+be serialized into the Access-Control-Allow-Methods header.
+
+
+
+
+
allowHeaders
+
string[]
+
+
List of HTTP headers that can be used when requesting the
+resource. Serialized to Access-Control-Allow-Methods header.
+
+
+
+
+
exposeHeaders
+
string[]
+
+
A white list of HTTP headers that the browsers are allowed to
+access. Serialized into Access-Control-Expose-Headers header.
Indicates whether the caller is allowed to send the actual request
+(not the preflight) using credentials. Translates to
+Access-Control-Allow-Credentials header.
+
+
+
+
+
+
+
Destination
+
+
Destination indicates the network addressable service to which the
+request/connection will be sent after processing a routing rule. The
+destination.host should unambiguously refer to a service in the service
+registry. Istio’s service registry is composed of all the services found
+in the platform’s service registry (e.g., Kubernetes services, Consul
+services), as well as services declared through the
+ServiceEntry resource.
+
+
Note for Kubernetes users: When short names are used (e.g. “reviews”
+instead of “reviews.default.svc.cluster.local”), Istio will interpret
+the short name based on the namespace of the rule, not the service. A
+rule in the “default” namespace containing a host “reviews will be
+interpreted as “reviews.default.svc.cluster.local”, irrespective of the
+actual namespace associated with the reviews service. To avoid potential
+misconfigurations, it is recommended to always use fully qualified
+domain names over short names.
+
+
The following Kubernetes example routes all traffic by default to pods
+of the reviews service with label “version: v1” (i.e., subset v1), and
+some to subset v2, in a kubernetes environment.
The following VirtualService sets a timeout of 5s for all calls to
+productpage.prod.svc.cluster.local service in Kubernetes. Notice that
+there are no subsets defined in this rule. Istio will fetch all
+instances of productpage.prod.svc.cluster.local service from the service
+registry and populate the sidecar’s load balancing pool. Also, notice
+that this rule is set in the istio-system namespace but uses the fully
+qualified domain name of the productpage service,
+productpage.prod.svc.cluster.local. Therefore the rule’s namespace does
+not have an impact in resolving the name of the productpage service.
To control routing for traffic bound to services outside the mesh, external
+services must first be added to Istio’s internal service registry using the
+ServiceEntry resource. VirtualServices can then be defined to control traffic
+bound to these external services. For example, the following rules define a
+Service for wikipedia.org and set a timeout of 5s for http requests.
REQUIRED. The name of a service from the service registry. Service
+names are looked up from the platform’s service registry (e.g.,
+Kubernetes services, Consul services, etc.) and from the hosts
+declared by ServiceEntry. Traffic forwarded to
+destinations that are not found in either of the two, will be dropped.
+
+
Note for Kubernetes users: When short names are used (e.g. “reviews”
+instead of “reviews.default.svc.cluster.local”), Istio will interpret
+the short name based on the namespace of the rule, not the service. A
+rule in the “default” namespace containing a host “reviews will be
+interpreted as “reviews.default.svc.cluster.local”, irrespective of
+the actual namespace associated with the reviews service. To avoid
+potential misconfigurations, it is recommended to always use fully
+qualified domain names over short names.
+
+
+
+
+
subset
+
string
+
+
The name of a subset within the service. Applicable only to services
+within the mesh. The subset must be defined in a corresponding
+DestinationRule.
Specifies the port on the host that is being addressed. Many services
+only expose a single port or label ports with the protocols they support,
+in these cases it is not required to explicitly select the port. Note that
+selection priority is to first match by name and then match by number.
+
+
Names must comply with DNS label syntax (rfc1035) and therefore cannot
+collide with numbers. If there are multiple ports on a service with
+the same protocol the names should be of the form -.
+
+
+
+
+
+
+
DestinationRule
+
+
DestinationRule defines policies that apply to traffic intended for a
+service after routing has occurred. These rules specify configuration
+for load balancing, connection pool size from the sidecar, and outlier
+detection settings to detect and evict unhealthy hosts from the load
+balancing pool. For example, a simple load balancing policy for the
+ratings service would look as follows:
Version specific policies can be specified by defining a named
+subset and overriding the settings specified at the service level. The
+following rule uses a round robin load balancing policy for all traffic
+going to a subset named testversion that is composed of endpoints (e.g.,
+pods) with labels (version:v3).
Note: Policies specified for subsets will not take effect until
+a route rule explicitly sends traffic to this subset.
+
+
Traffic policies can be customized to specific ports as well. The
+following rule uses the least connection load balancing policy for all
+traffic to port 80, while uses a round robin load balancing setting for
+traffic to the port 9080.
REQUIRED. The name of a service from the service registry. Service
+names are looked up from the platform’s service registry (e.g.,
+Kubernetes services, Consul services, etc.) and from the hosts
+declared by ServiceEntries. Rules defined for
+services that do not exist in the service registry will be ignored.
+
+
Note for Kubernetes users: When short names are used (e.g. “reviews”
+instead of “reviews.default.svc.cluster.local”), Istio will interpret
+the short name based on the namespace of the rule, not the service. A
+rule in the “default” namespace containing a host “reviews will be
+interpreted as “reviews.default.svc.cluster.local”, irrespective of
+the actual namespace associated with the reviews service. To avoid
+potential misconfigurations, it is recommended to always use fully
+qualified domain names over short names.
+
+
Note that the host field applies to both HTTP and TCP services.
One or more named sets that represent individual versions of a
+service. Traffic policies can be overridden at subset level.
+
+
+
+
+
+
+
DestinationWeight
+
+
Each routing rule is associated with one or more service versions (see
+glossary in beginning of document). Weights associated with the version
+determine the proportion of traffic it receives. For example, the
+following rule will route 25% of traffic for the “reviews” service to
+instances with the “v2” tag and the remaining traffic (i.e., 75%) to
+“v1”.
Traffic can also be split across two entirely different services without
+having to define new subsets. For example, the following rule forwards 25% of
+traffic to reviews.com to dev.reviews.com
REQUIRED. Destination uniquely identifies the instances of a service
+to which the request/connection should be forwarded to.
+
+
+
+
+
weight
+
int32
+
+
REQUIRED. The proportion of traffic to be forwarded to the service
+version. (0-100). Sum of weights across destinations SHOULD BE == 100.
+If there is only destination in a rule, the weight value is assumed to
+be 100.
+
+
+
+
+
+
+
Gateway
+
+
Gateway describes a load balancer operating at the edge of the mesh
+receiving incoming or outgoing HTTP/TCP connections. The specification
+describes a set of ports that should be exposed, the type of protocol to
+use, SNI configuration for the load balancer, etc.
+
+
For example, the following Gateway configuration sets up a proxy to act
+as a load balancer exposing port 80 and 9080 (http), 443 (https), and
+port 2379 (TCP) for ingress. The gateway will be applied to the proxy
+running on a pod with labels app: my-gateway-controller. While Istio
+will configure the proxy to listen on these ports, it is the
+responsibility of the user to ensure that external traffic to these
+ports are allowed into the mesh.
The Gateway specification above describes the L4-L6 properties of a load
+balancer. A VirtualService can then be bound to a gateway to control
+the forwarding of traffic arriving at a particular host or gateway port.
+
+
For example, the following VirtualService splits traffic for
+“https://uk.bookinfo.com/reviews”, “https://eu.bookinfo.com/reviews”,
+“http://uk.bookinfo.com:9080/reviews”,
+“http://eu.bookinfo.com:9080/reviews” into two versions (prod and qa) of
+an internal reviews service on port 9080. In addition, requests
+containing the cookie “user: dev-123” will be sent to special port 7777
+in the qa version. The same rule is also applicable inside the mesh for
+requests to the r”eviews.prod.svc.cluster.local” service. This rule is
+applicable across ports 443, 9080. Note that “http://uk.bookinfo.com”
+gets redirected to “https://uk.bookinfo.com” (i.e. 80 redirects to 443).
+
+
apiVersion: networking.istio.io/v1alpha3
+kind: VirtualService
+metadata:
+ name: bookinfo-rule
+spec:
+ hosts:
+ - reviews.prod.svc.cluster.local
+ - uk.bookinfo.com
+ - eu.bookinfo.com
+ gateways:
+ - my-gateway
+ - mesh # applies to all the sidecars in the mesh
+ http:
+ - match:
+ - headers:
+ cookie:
+ user: dev-123
+ route:
+ - destination:
+ port:
+ number: 7777
+ name: reviews.qa.svc.cluster.local
+ - match:
+ uri:
+ prefix: /reviews/
+ route:
+ - destination:
+ port:
+ number: 9080 # can be omitted if its the only port for reviews
+ name: reviews.prod.svc.cluster.local
+ weight: 80
+ - destination:
+ name: reviews.qa.svc.cluster.local
+ weight: 20
+
+
+
The following VirtualService forwards traffic arriving at (external)
+port 27017 from “172.17.16.0/24” subnet to internal Mongo server on port
+5555. This rule is not applicable internally in the mesh as the gateway
+list omits the reserved name mesh.
One or more labels that indicate a specific set of pods/VMs
+on which this gateway configuration should be applied.
+
+
+
+
+
+
+
HTTPFaultInjection
+
+
HTTPFaultInjection can be used to specify one or more faults to inject
+while forwarding http requests to the destination specified in a route.
+Fault specification is part of a VirtualService rule. Faults include
+aborting the Http request from downstream service, and/or delaying
+proxying of requests. A fault rule MUST HAVE delay or abort or both.
+
+
Note: Delay and abort faults are independent of one another, even if
+both are specified simultaneously.
Abort Http request attempts and return error codes back to downstream
+service, giving the impression that the upstream service is faulty.
+
+
+
+
+
+
+
HTTPFaultInjection.Abort
+
+
Abort specification is used to prematurely abort a request with a
+pre-specified error code. The following example will return an HTTP
+400 error code for 10% of the requests to the “ratings” service “v1”.
The httpStatus field is used to indicate the HTTP status code to
+return to the caller. The optional percent field, a value between 0
+and 100, is used to only abort a certain percentage of requests. If
+not specified, all requests are aborted.
+
+
+
+
+
Field
+
Type
+
Description
+
+
+
+
+
percent
+
int32
+
+
Percentage of requests to be aborted with the error code provided (0-100).
+
+
+
+
+
httpStatus
+
int32 (oneof)
+
+
REQUIRED. HTTP status code to use to abort the Http request.
+
+
+
+
+
grpcStatus
+
string (oneof)
+
+
(– NOT IMPLEMENTED –)
+
+
+
+
+
http2Error
+
string (oneof)
+
+
(– NOT IMPLEMENTED –)
+
+
+
+
+
+
+
HTTPFaultInjection.Delay
+
+
Delay specification is used to inject latency into the request
+forwarding path. The following example will introduce a 5 second delay
+in 10% of the requests to the “v1” version of the “reviews”
+service from all pods with label env: prod
The fixedDelay field is used to indicate the amount of delay in
+seconds. An optional percent field, a value between 0 and 100, can
+be used to only delay a certain percentage of requests. If left
+unspecified, all request will be delayed.
+
+
+
+
+
Field
+
Type
+
Description
+
+
+
+
+
percent
+
int32
+
+
Percentage of requests on which the delay will be injected (0-100).
(– Add a delay (based on an exponential function) before forwarding
+the request. mean delay needed to derive the exponential delay
+values –)
+
+
+
+
+
+
+
HTTPMatchRequest
+
+
HttpMatchRequest specifies a set of criterion to be met in order for the
+rule to be applied to the HTTP request. For example, the following
+restricts the rule to match only requests where the URL path
+starts with /ratings/v2/ and the request contains a cookie with value
+user=jason.
The header keys must be lowercase and use hyphen as the separator,
+e.g. x-request-id.
+
+
Header values are case-sensitive and formatted as follows:
+
+
+
exact: "value" for exact string match
+
+
prefix: "value" for prefix-based match
+
+
regex: "value" for ECMAscript style regex-based match
+
+
+
Note: The keys uri, scheme, method, and authority will be ignored.
+
+
+
+
+
port
+
uint32
+
+
Specifies the ports on the host that is being addressed. Many services
+only expose a single port or label ports with the protocols they support,
+in these cases it is not required to explicitly select the port.
+
+
+
+
+
sourceLabels
+
map<string, string>
+
+
One or more labels that constrain the applicability of a rule to
+workloads with the given labels. If the VirtualService has a list of
+gateways specified at the top, it should include the reserved gateway
+mesh in order for this field to be applicable.
+
+
+
+
+
gateways
+
string[]
+
+
Names of gateways where the rule should be applied to. Gateway names
+at the top of the VirtualService (if any) are overridden. The gateway match is
+independent of sourceLabels.
+
+
+
+
+
+
+
HTTPRedirect
+
+
HTTPRedirect can be used to send a 302 redirect response to the caller,
+where the Authority/Host and the URI in the response can be swapped with
+the specified values. For example, the following rule redirects
+requests for /v1/getProductRatings API on the ratings service to
+/v1/bookRatings provided by the bookratings service.
On a redirect, overwrite the Path portion of the URL with this
+value. Note that the entire path will be replaced, irrespective of the
+request URI being matched as an exact path or prefix.
+
+
+
+
+
authority
+
string
+
+
On a redirect, overwrite the Authority/Host portion of the URL with
+this value.
+
+
+
+
+
+
+
HTTPRetry
+
+
Describes the retry policy to use when a HTTP request fails. For
+example, the following rule sets the maximum number of retries to 3 when
+calling ratings:v1 service, with a 2s timeout per retry attempt.
REQUIRED. Number of retries for a given request. The interval
+between retries will be determined automatically (25ms+). Actual
+number of retries attempted depends on the httpReqTimeout.
Timeout per retry attempt for a given request. format: 1h/1m/1s/1ms. MUST BE >=1ms.
+
+
+
+
+
+
+
HTTPRewrite
+
+
HTTPRewrite can be used to rewrite specific parts of a HTTP request
+before forwarding the request to the destination. Rewrite primitive can
+be used only with the DestinationWeights. The following example
+demonstrates how to rewrite the URL prefix for api call (/ratings) to
+ratings service before making the actual API call.
rewrite the path (or the prefix) portion of the URI with this
+value. If the original URI was matched based on prefix, the value
+provided in this field will replace the corresponding matched prefix.
+
+
+
+
+
authority
+
string
+
+
rewrite the Authority/Host header with this value.
+
+
+
+
+
+
+
HTTPRoute
+
+
Describes match conditions and actions for routing HTTP/1.1, HTTP2, and
+gRPC traffic. See VirtualService for usage examples.
Match conditions to be satisfied for the rule to be
+activated. All conditions inside a single match block have AND
+semantics, while the list of match blocks have OR semantics. The rule
+is matched if any one of the match blocks succeed.
A http rule can either redirect or forward (default) traffic. The
+forwarding target can be one of several versions of a service (see
+glossary in beginning of document). Weights associated with the
+service version determine the proportion of traffic it receives.
A http rule can either redirect or forward (default) traffic. If
+traffic passthrough option is specified in the rule,
+route/redirect will be ignored. The redirect primitive can be used to
+send a HTTP 302 redirect to a different URI or Authority.
Rewrite HTTP URIs and Authority headers. Rewrite cannot be used with
+Redirect primitive. Rewrite will be performed before forwarding.
+
+
+
+
+
websocketUpgrade
+
bool
+
+
Indicates that a HTTP/1.1 client connection to this particular route
+should be allowed (and expected) to upgrade to a WebSocket connection.
+The default is false. Istio’s reference sidecar implementation (Envoy)
+expects the first request to this route to contain the WebSocket
+upgrade headers. Otherwise, the request will be rejected. Note that
+Websocket allows secondary protocol negotiation which may then be
+subject to further routing rules based on the protocol selected.
Mirror HTTP traffic to a another destination in addition to forwarding
+the requests to the intended destination. Mirrored traffic is on a
+best effort basis where the sidecar/gateway will not wait for the
+mirrored cluster to respond before returning the response from the
+original destination. Statistics will be generated for the mirrored
+destination.
Cross-Origin Resource Sharing policy (CORS). Refer to
+https://developer.mozilla.org/en-US/docs/Web/HTTP/AccesscontrolCORS
+for further details about cross origin resource sharing.
+
+
+
+
+
appendHeaders
+
map<string, string>
+
+
Additional HTTP headers to add before forwarding a request to the
+destination service.
+
+
+
+
+
+
+
L4MatchAttributes
+
+
L4 connection match attributes. Note that L4 connection matching support
+is incomplete.
+
+
+
+
+
Field
+
Type
+
Description
+
+
+
+
+
destinationSubnet
+
string
+
+
IPv4 or IPv6 ip address of destination with optional subnet. E.g.,
+a.b.c.d/xx form or just a.b.c.d. This is only valid when the
+destination service has several IPs and the application explicitly
+specifies a particular IP.
+
+
+
+
+
port
+
uint32
+
+
Specifies the port on the host that is being addressed. Many services
+only expose a single port or label ports with the protocols they support,
+in these cases it is not required to explicitly select the port.
+
+
+
+
+
sourceSubnet
+
string
+
+
IPv4 or IPv6 ip address of source with optional subnet. E.g., a.b.c.d/xx
+form or just a.b.c.d
+
+
+
+
+
sourceLabels
+
map<string, string>
+
+
One or more labels that constrain the applicability of a rule to
+workloads with the given labels. If the VirtualService has a list of
+gateways specified at the top, it should include the reserved gateway
+mesh in order for this field to be applicable.
+
+
+
+
+
gateways
+
string[]
+
+
Names of gateways where the rule should be applied to. Gateway names
+at the top of the VirtualService (if any) are overridden. The gateway match is
+independent of sourceLabels.
+
+
+
+
+
+
+
LoadBalancerSettings
+
+
Load balancing policies to apply for a specific destination. See Envoy’s
+load balancing
+documentation
+for more details.
+
+
For example, the following rule uses a round robin load balancing policy
+for all traffic going to the ratings service.
Consistent hashing (ketama hash) based load balancer for even load
+distribution/redistribution when the connection pool changes. This
+load balancing policy is applicable only for HTTP-based
+connections. A user specified HTTP header is used as the key with
+xxHash hashing.
+
+
+
+
+
Field
+
Type
+
Description
+
+
+
+
+
httpHeader
+
string
+
+
REQUIRED. The name of the HTTP request header that will be used to
+obtain the hash key. If the request header is not present, the load
+balancer will use a random number as the hash, effectively making
+the load balancing policy random.
+
+
+
+
+
minimumRingSize
+
uint32
+
+
The minimum number of virtual nodes to use for the hash
+ring. Defaults to 1024. Larger ring sizes result in more granular
+load distributions. If the number of hosts in the load balancing
+pool is larger than the ring size, each host will be assigned a
+single virtual node.
+
+
+
+
+
+
+
LoadBalancerSettings.SimpleLB
+
+
Standard load balancing algorithms that require no tuning.
+
+
+
+
+
Name
+
Description
+
+
+
+
+
ROUND_ROBIN
+
+
Round Robin policy. Default
+
+
+
+
+
LEAST_CONN
+
+
The least request load balancer uses an O(1) algorithm which selects
+two random healthy hosts and picks the host which has fewer active
+requests.
+
+
+
+
+
RANDOM
+
+
The random load balancer selects a random healthy host. The random
+load balancer generally performs better than round robin if no health
+checking policy is configured.
+
+
+
+
+
PASSTHROUGH
+
+
This option will forward the connection to the original IP address
+requested by the caller without doing any form of load
+balancing. This option must be used with care. It is meant for
+advanced use cases. Refer to Original Destination load balancer in
+Envoy for further details.
+
+
+
+
+
+
+
OutlierDetection
+
+
A Circuit breaker implementation that tracks the status of each
+individual host in the upstream service. While currently applicable to
+only HTTP services, future versions will support opaque TCP services as
+well. For HTTP services, hosts that continually return errors for API
+calls are ejected from the pool for a pre-defined period of time. See
+Envoy’s outlier
+detection
+for more details.
+
+
The following rule sets a connection pool size of 100 connections and
+1000 concurrent HTTP2 requests, with no more than 10 req/connection to
+“reviews” service. In addition, it configures upstream hosts to be
+scanned every 5 mins, such that any host that fails 7 consecutive times
+with 5XX error code will be ejected for 15 minutes.
Minimum ejection duration. A host will remain ejected for a period
+equal to the product of minimum ejection duration and the number of
+times the host has been ejected. This technique allows the system to
+automatically increase the ejection period for unhealthy upstream
+servers. format: 1h/1m/1s/1ms. MUST BE >=1ms. Default is 30s.
+
+
+
+
+
maxEjectionPercent
+
int32
+
+
Maximum % of hosts in the load balancing pool for the upstream
+service that can be ejected. Defaults to 10%.
+
+
+
+
+
+
+
Port
+
+
Port describes the properties of a specific port of a service.
+
+
+
+
+
Field
+
Type
+
Description
+
+
+
+
+
number
+
uint32
+
+
REQUIRED: A valid non-negative integer port number.
+
+
+
+
+
protocol
+
string
+
+
REQUIRED: The protocol exposed on the port.
+MUST BE one of HTTP|HTTPS|GRPC|HTTP2|MONGO|TCP|TCP-TLS.
+TCP-TLS is used to indicate secure connections to non HTTP services.
+
+
+
+
+
name
+
string
+
+
Label assigned to the port.
+
+
+
+
+
+
+
PortSelector
+
+
PortSelector specifies the name or number of a port to be used for
+matching or selection for final routing.
+
+
+
+
+
Field
+
Type
+
Description
+
+
+
+
+
number
+
uint32 (oneof)
+
+
Valid port number
+
+
+
+
+
name
+
string (oneof)
+
+
Port name
+
+
+
+
+
+
+
Server
+
+
Server describes the properties of the proxy on a given load balancer
+port. For example,
REQUIRED: The Port on which the proxy should listen for incoming
+connections
+
+
+
+
+
hosts
+
string[]
+
+
A list of hosts exposed by this gateway. While typically applicable to
+HTTP services, it can also be used for TCP services using TLS with
+SNI. Standard DNS wildcard prefix syntax is permitted.
+
+
Note: A VirtualService that is bound to a gateway must have one
+or more hosts that match the hosts specified in a server. The match
+could be an exact match or a suffix match with the server’s hosts. For
+example, if the server’s hosts specifies “*.example.com”,
+VirtualServices with hosts dev.example.com, prod.example.com will
+match. However, VirtualServices with hosts example.com or
+newexample.com will not match.
Set of TLS related options that govern the server’s behavior. Use
+these options to control if all http requests should be redirected to
+https, and the TLS modes to use.
+
+
+
+
+
+
+
Server.TLSOptions
+
+
+
+
+
Field
+
Type
+
Description
+
+
+
+
+
httpsRedirect
+
bool
+
+
If set to true, the load balancer will send a 302 redirect for all
+http connections, asking the clients to use HTTPS.
Optional: Indicates whether connections to this port should be
+secured using TLS. The value of this field determines how TLS is
+enforced.
+
+
+
+
+
serverCertificate
+
string
+
+
REQUIRED if mode is SIMPLE or MUTUAL. The path to the file
+holding the server-side TLS certificate to use.
+
+
+
+
+
privateKey
+
string
+
+
REQUIRED if mode is SIMPLE or MUTUAL. The path to the file
+holding the server’s private key.
+
+
+
+
+
caCertificates
+
string
+
+
REQUIRED if mode is MUTUAL. The path to a file containing
+certificate authority certificates to use in verifying a presented
+client side certificate.
+
+
+
+
+
subjectAltNames
+
string[]
+
+
A list of alternate names to verify the subject identity in the
+certificate presented by the client.
+
+
+
+
+
+
+
Server.TLSOptions.TLSmode
+
+
TLS modes enforced by the proxy
+
+
+
+
+
Name
+
Description
+
+
+
+
+
PASSTHROUGH
+
+
Forward the connection to the upstream server selected based on
+the SNI string presented by the client.
+
+
+
+
+
SIMPLE
+
+
Secure connections with standard TLS semantics.
+
+
+
+
+
MUTUAL
+
+
Secure connections to the upstream using mutual TLS by presenting
+client certificates for authentication.
+
+
+
+
+
+
+
ServiceEntry
+
+
ServiceEntry enables adding additional entries into Istio’s internal
+service registry, so that auto-discovered services in the mesh can
+access/route to these manually specified services. A service entry
+describes the properties of a service (DNS name, VIPs ,ports, protocols,
+endpoints). These services could be external to the mesh (e.g., web
+APIs) or mesh-internal services that are not part of the platform’s
+service registry (e.g., a set of VMs talking to services in Kubernetes).
+
+
The following configuration adds a set of MongoDB instances running on
+unmanaged VMs to Istio’s registry, so that these services can be treated
+as any other service in the mesh. The associated DestinationRule is used
+to initiate mTLS connections to the database instances.
The following example demonstrates the use of wildcards in the hosts for
+external services. If the connection has to be routed to the IP address
+requested by the application (i.e. application resolves DNS and attempts
+to connect to a specific IP), the discovery mode must be set to NONE.
For HTTP based services, it is possible to create a VirtualService
+backed by multiple DNS addressable endpoints. In such a scenario, the
+application can use the HTTP_PROXY environment variable to transparently
+reroute API calls for the VirtualService to a chosen backend. For
+example, the following configuration creates a non-existent external
+service called foo.bar.com backed by three domains: us.foo.bar.com:8443,
+uk.foo.bar.com:9443, and in.foo.bar.com:7443
With HTTP_PROXY=http://localhost:443, calls from the application to
+http://foo.bar.com will be upgraded to HTTPS and load balanced across
+the three domains specified above. In other words, a call to
+http://foo.bar.com/baz would be translated to
+https://uk.foo.bar.com/baz.
+
+
+
+
+
Field
+
Type
+
Description
+
+
+
+
+
hosts
+
string[]
+
+
REQUIRED. The hosts associated with the ServiceEntry. Could be a DNS
+name with wildcard prefix (external services only). DNS names in hosts
+will be ignored if the application accesses the service over non-HTTP
+protocols such as mongo/opaque TCP/even HTTPS. In such scenarios, the
+IP addresses specified in the Addresses field or the port will be used
+to uniquely identify the destination.
+
+
+
+
+
addresses
+
string[]
+
+
The virtual IP addresses associated with the service. Could be CIDR
+prefix. For HTTP services, the addresses field will be ignored and
+the destination will be identified based on the HTTP Host/Authority
+header. For non-HTTP protocols such as mongo/opaque TCP/even HTTPS,
+the hosts will be ignored. If one or more IP addresses are specified,
+the incoming traffic will be idenfified as belonging to this service
+if the destination IP matches the IP/CIDRs specified in the addresses
+field. If the Addresses field is empty, traffic will be identified
+solely based on the destination port. In such scenarios, the port on
+which the service is being accessed must not be shared by any other
+service in the mesh. In other words, the sidecar will behave as a
+simple TCP proxy, forwarding incoming traffic on a specified port to
+the specified destination endpoint IP/host.
One or more endpoints associated with the service.
+
+
+
+
+
+
+
ServiceEntry.Endpoint
+
+
Endpoint defines a network address (IP or hostname) associated with
+the mesh service.
+
+
+
+
+
Field
+
Type
+
Description
+
+
+
+
+
address
+
string
+
+
REQUIRED: Address associated with the network endpoint without the
+port ( IP or fully qualified domain name without wildcards). Domain
+names can be used if and only if the resolution is set to DNS.
+
+
+
+
+
ports
+
map<string, uint32>
+
+
Set of ports associated with the endpoint. The ports must be
+associated with a port name that was declared as part of the
+service.
+
+
+
+
+
labels
+
map<string, string>
+
+
One or more labels associated with the endpoint.
+
+
+
+
+
+
+
ServiceEntry.Location
+
+
Location specifies whether the service is part of Istio mesh or
+outside the mesh. Location determines the behavior of several
+features, such as service-to-service mTLS authentication, policy
+enforcement, etc. When communicating with services outside the mesh,
+Istio’s mTLS authentication is disabled, and policy enforcement is
+performed on the client-side as opposed to server-side.
+
+
+
+
+
Name
+
Description
+
+
+
+
+
MESH_EXTERNAL
+
+
Signifies that the service is external to the mesh. Typically used
+to indicate external services consumed through APIs.
+
+
+
+
+
MESH_INTERNAL
+
+
Signifies that the service is part of the mesh. Typically used to
+indicate services added explicitly as part of expanding the service
+mesh to include unmanaged infrastructure (e.g., VMs added to a
+Kubernetes based service mesh).
+
+
+
+
+
+
+
ServiceEntry.Resolution
+
+
Resolution determines how the proxy will resolve the IP addresses of
+the network endpoints associated with the service, so that it can
+route to one of them. The resolution mode specified here has no impact
+on how the application resolves the IP address associated with the
+service. The application may still have to use DNS to resolve the
+service to an IP so that the outbound traffic can be captured by the
+Proxy. Alternatively, for HTTP services, the application could
+directly communicate with the proxy (e.g., by setting HTTP_PROXY) to
+talk to these services.
+
+
+
+
+
Name
+
Description
+
+
+
+
+
NONE
+
+
Assume that incoming connections have already been resolved (to a
+specific destination IP address). Such connections are typically
+routed via the proxy using mechanisms such as IP table REDIRECT/
+eBPF. After performing any routing related transformations, the
+proxy will forward the connection to the IP address to which the
+connection was bound.
+
+
+
+
+
STATIC
+
+
Use the static IP addresses specified in endpoints (see below) as the
+backing instances associated with the service.
+
+
+
+
+
DNS
+
+
Attempt to resolve the IP address by querying the ambient DNS,
+during request processing. If no endpoints are specified, the proxy
+will resolve the DNS address specified in the hosts field, if
+wildcards are not used. If endpoints are specified, the DNS
+addresses specified in the endpoints will be resolved to determine
+the destination IP address.
+
+
+
+
+
+
+
StringMatch
+
+
Describes how to match a given string in HTTP headers. Match is
+case-sensitive.
+
+
+
+
+
Field
+
Type
+
Description
+
+
+
+
+
exact
+
string (oneof)
+
+
exact string match
+
+
+
+
+
prefix
+
string (oneof)
+
+
prefix-based match
+
+
+
+
+
regex
+
string (oneof)
+
+
ECMAscript style regex-based match
+
+
+
+
+
+
+
Subset
+
+
A subset of endpoints of a service. Subsets can be used for scenarios
+like A/B testing, or routing to a specific version of a service. Refer
+to VirtualService documentation for examples of using
+subsets in these scenarios. In addition, traffic policies defined at the
+service-level can be overridden at a subset-level. The following rule
+uses a round robin load balancing policy for all traffic going to a
+subset named testversion that is composed of endpoints (e.g., pods) with
+labels (version:v3).
Traffic policies that apply to this subset. Subsets inherit the
+traffic policies specified at the DestinationRule level. Settings
+specified at the subset level will override the corresponding settings
+specified at the DestinationRule level.
+
+
+
+
+
+
+
TCPRoute
+
+
Describes match conditions and actions for routing TCP traffic. The
+following routing rule forwards traffic arriving at port 27017 for
+mongo.prod.svc.cluster.local from 172.17.16.* subnet to another Mongo
+server on port 5555.
Match conditions to be satisfied for the rule to be
+activated. All conditions inside a single match block have AND
+semantics, while the list of match blocks have OR semantics. The rule
+is matched if any one of the match blocks succeed.
The destination to which the connection should be forwarded to.
+Currently, only one destination is allowed for TCP services. When TCP
+weighted routing support is introduced in Envoy, multiple destinations
+with weights can be specified.
+
+
+
+
+
+
+
TLSSettings
+
+
SSL/TLS related settings for upstream connections. See Envoy’s TLS
+context
+for more details. These settings are common to both HTTP and TCP upstreams.
+
+
For example, the following rule configures a client to use mutual TLS
+for connections to upstream database cluster.
REQUIRED: Indicates whether connections to this port should be secured
+using TLS. The value of this field determines how TLS is enforced.
+
+
+
+
+
clientCertificate
+
string
+
+
REQUIRED if mode is MUTUAL. The path to the file holding the
+client-side TLS certificate to use.
+
+
+
+
+
privateKey
+
string
+
+
REQUIRED if mode is MUTUAL. The path to the file holding the
+client’s private key.
+
+
+
+
+
caCertificates
+
string
+
+
OPTIONAL: The path to the file containing certificate authority
+certificates to use in verifying a presented server certificate. If
+omitted, the proxy will not verify the server’s certificate.
+
+
+
+
+
subjectAltNames
+
string[]
+
+
A list of alternate names to verify the subject identity in the
+certificate. If specified, the proxy will verify that the server
+certificate’s subject alt name matches one of the specified values.
+
+
+
+
+
sni
+
string
+
+
SNI string to present to the server during TLS handshake.
+
+
+
+
+
+
+
TLSSettings.TLSmode
+
+
TLS connection mode
+
+
+
+
+
Name
+
Description
+
+
+
+
+
DISABLE
+
+
Do not setup a TLS connection to the upstream endpoint.
+
+
+
+
+
SIMPLE
+
+
Originate a TLS connection to the upstream endpoint.
+
+
+
+
+
MUTUAL
+
+
Secure connections to the upstream using mutual TLS by presenting
+client certificates for authentication.
+
+
+
+
+
+
+
TrafficPolicy
+
+
Traffic policies to apply for a specific destination, across all
+destination ports. See DestinationRule for examples.
Traffic policies specific to individual ports. Note that port level
+settings will override the destination-level settings. Traffic
+settings specified at the destination-level will not be inherited when
+overridden by port-level settings, i.e. default values will be applied
+to fields omitted in port-level traffic policies.
+
+
+
+
+
+
+
TrafficPolicy.PortTrafficPolicy
+
+
Traffic policies that apply to specific ports of the service
Specifies the port name or number of a port on the destination service
+on which this policy is being applied.
+
+
Names must comply with DNS label syntax (rfc1035) and therefore cannot
+collide with numbers. If there are multiple ports on a service with
+the same protocol the names should be of the form -.
TLS related settings for connections to the upstream service.
+
+
+
+
+
+
+
VirtualService
+
+
A VirtualService defines a set of traffic routing rules to apply when a host is
+addressed. Each routing rule defines matching criteria for traffic of a specific
+protocol. If the traffic is matched, then it is sent to a named destination service
+(or subset/version of it) defined in the registry.
+
+
The source of traffic can also be matched in a routing rule. This allows routing
+to be customized for specific client contexts.
+
+
The following example on Kubernetes, routes all HTTP traffic by default to
+pods of the reviews service with label “version: v1”. In addition,
+HTTP requests containing /wpcatalog/, /consumercatalog/ url prefixes will
+be rewritten to /newcatalog and sent to pods with label “version: v2”.
A subset/version of a route destination is identified with a reference
+to a named service subset which must be declared in a corresponding
+DestinationRule.
REQUIRED. The destination hosts to which traffic is being sent. Could
+be a DNS name with wildcard prefix or an IP address. Depending on the
+platform, short-names can also be used instead of a FQDN (i.e. has no
+dots in the name). In such a scenario, the FQDN of the host would be
+derived based on the underlying platform.
+
+
A host name can be defined by only one VirtualService. A single
+VirtualService can be used to describe traffic properties for multiple
+HTTP and TCP ports.
+
+
Note for Kubernetes users: When short names are used (e.g. “reviews”
+instead of “reviews.default.svc.cluster.local”), Istio will interpret
+the short name based on the namespace of the rule, not the service. A
+rule in the “default” namespace containing a host “reviews will be
+interpreted as “reviews.default.svc.cluster.local”, irrespective of
+the actual namespace associated with the reviews service. To avoid
+potential misconfigurations, it is recommended to always use fully
+qualified domain names over short names.
+
+
The hosts field applies to both HTTP and TCP services. Service inside
+the mesh, i.e., those found in the service registry, must always be
+referred to using their alphanumeric names. IP addresses are allowed
+only for services defined via the Gateway.
+
+
+
+
+
gateways
+
string[]
+
+
The names of gateways and sidecars that should apply these routes. A
+single VirtualService is used for sidecars inside the mesh as well
+as for one or more gateways. The selection condition imposed by this field
+can be overridden using the source field in the match conditions of HTTP/TCP
+routes. The reserved word mesh is used to imply all the sidecars in
+the mesh. When this field is omitted, the default gateway (mesh)
+will be used, which would apply the rule to all sidecars in the
+mesh. If a list of gateway names is provided, the rules will apply
+only to the gateways. To apply the rules to both gateways and sidecars,
+specify mesh as one of the gateway names.
An ordered list of route rules for TCP traffic.
+The first rule matching an incoming request is used.
+
+
+
+
+
+
diff --git a/_docs/reference/config/istio.mixer.v1.config.html b/_docs/reference/config/istio.policy.v1beta1.html
similarity index 70%
rename from _docs/reference/config/istio.mixer.v1.config.html
rename to _docs/reference/config/istio.policy.v1beta1.html
index 3067dae281d11..b90047b7848eb 100644
--- a/_docs/reference/config/istio.mixer.v1.config.html
+++ b/_docs/reference/config/istio.policy.v1beta1.html
@@ -1,17 +1,18 @@
---
title: Policy and Telemetry Rules
-overview: Describes the rules used to configure Mixer's policy and telemetry features.
-location: https://istio.io/docs/reference/config/istio.mixer.v1.config.html
+description: Describes the rules used to configure Mixer's policy and telemetry features.
+location: https://istio.io/docs/reference/config/istio.policy.v1beta1.html
layout: protoc-gen-docs
-redirect_from: /docs/reference/config/mixer/policy-and-telemetry-rules.html
-number_of_entries: 7
+number_of_entries: 9
---
+
Describes the rules used to configure Mixer’s policy and telemetry features.
+
Action
Action describes which Handler to invoke and what data to pass to it for processing.
The following example instructs Mixer to invoke ‘prometheus-handler’ handler and pass it the object
-constructed using the instance ‘RequestCountByService’
+constructed using the instance ‘RequestCountByService’.
The set of attributes this Istio component will be responsible for producing at runtime.
We map from attribute name to the attribute’s specification. The name of an attribute,
@@ -99,7 +100,7 @@
AttributeManifest
Attribute names must be unique within a single Istio deployment. The set of canonical
attributes are described at https://istio.io/docs/reference/attribute-vocabulary.html.
Attributes not in that list should be named with a component-specific suffix such as
-request.count-my.component
Required. The type of data carried by this attribute.
+
+
+
+
+
+
Connection
+
+
Connection allows the operator to specify the endpoint for out-of-process infrastructure backend.
+Connection is part of the handler custom resource and is specified alongside adapter specific configuration.
+
+
+
+
+
Field
+
Type
+
Description
+
+
+
+
+
address
+
string
+
+
The address of the backend.
+
@@ -183,12 +209,52 @@
Handler
Handler allows the operator to configure a specific adapter implementation.
Each adapter implementation defines its own params proto.
-
In the following example we define a metrics handler using the Mixer’s prepackaged
-prometheus adapter. This handler doesn’t require any parameters.
In the following example we define a metrics handler for the prometheus adapter.
+The example is in the form of a kubernetes resource:
+* The metadata.name is the name of the handler
+* The kind refers to the adapter name
+* The spec block represents adapter-specific configuration as well as the connection information
+
+
### Sample-1: No connection specified (for compiled in adapters)
+### Note: if connection information is not specified, the adapter configuration is directly inside
+### `spec` block. This is going to be DEPRECATED in favor of Sample-2
+apiVersion: "config.istio.io/v1alpha2"
+kind: prometheus
+metadata:
+ name: handler
+ namespace: istio-system
+spec:
+ metrics:
+ - name: request_count
+ instance_name: requestcount.metric.istio-system
+ kind: COUNTER
+ label_names:
+ - source_service
+ - source_version
+ - destination_service
+ - destination_version
+---
+### Sample-2: With connection information (for out-of-process adapters)
+### Note: Unlike sample-1, the adapter configuration is parallel to `connection` and is nested inside `param` block.
+apiVersion: "config.istio.io/v1alpha2"
+kind: prometheus
+metadata:
+ name: handler
+ namespace: istio-system
+spec:
+ param:
+ metrics:
+ - name: request_count
+ instance_name: requestcount.metric.istio-system
+ kind: COUNTER
+ label_names:
+ - source_service
+ - source_version
+ - destination_service
+ - destination_version
+ connection:
+ address: localhost:8090
+---
@@ -225,6 +291,15 @@
Handler
Optional. Depends on adapter implementation. Struct representation of a
proto defined by the adapter implementation; this varies depending on the value of field adapter.
Optional. Information on how to connect to the out-of-process adapter.
+This is used if the adapter is not compiled into Mixer binary and is running as a separate process.
+
@@ -241,7 +316,7 @@
Instance
The following example instructs Mixer to construct an instance associated with template
‘istio.mixer.adapter.metric.Metric’. It provides a mapping from the template’s fields to expressions.
Instances produced with this instance can be referenced by Actions using name
-‘RequestCountByService’.
Optional. The actions that will be executed when match evaluates to true.
+
+
+
+
+
+
ValueType
+
+
ValueType describes the types that values in the Istio system can take. These
+are used to describe the type of Attributes at run time, describe the type of
+the result of evaluating an expression, and to describe the runtime type of
+fields of other descriptors.
+
+
+
+
+
Name
+
Description
+
+
+
+
+
VALUE_TYPE_UNSPECIFIED
+
+
Invalid, default value.
+
+
+
+
+
STRING
+
+
An undiscriminated variable-length string.
+
+
+
+
+
INT64
+
+
An undiscriminated 64-bit signed integer.
+
+
+
+
+
DOUBLE
+
+
An undiscriminated 64-bit floating-point value.
+
+
+
+
+
BOOL
+
+
An undiscriminated boolean value.
+
+
+
+
+
TIMESTAMP
+
+
A point in time.
+
+
+
+
+
IP_ADDRESS
+
+
An IP address.
+
+
+
+
+
EMAIL_ADDRESS
+
+
An email address.
+
+
+
+
+
URI
+
+
A URI.
+
+
+
+
+
DNS_NAME
+
+
A DNS name.
+
+
+
+
+
DURATION
+
+
A span between two points in time.
+
+
+
+
+
STRING_MAP
+
+
A map string -> string, typically used by headers.
diff --git a/_docs/reference/config/istio.rbac.v1alpha1.html b/_docs/reference/config/istio.rbac.v1alpha1.html
index 83cfbed8bbaed..c9e431518fd05 100644
--- a/_docs/reference/config/istio.rbac.v1alpha1.html
+++ b/_docs/reference/config/istio.rbac.v1alpha1.html
@@ -1,9 +1,9 @@
---
title: RBAC
-overview: Configuration affecting resource-based access control
+description: Configuration for Role Based Access Control
location: https://istio.io/docs/reference/config/istio.rbac.v1alpha1.html
layout: protoc-gen-docs
-number_of_entries: 6
+number_of_entries: 9
---
Istio RBAC (Role Based Access Control) defines ServiceRole and ServiceRoleBinding
objects.
@@ -11,9 +11,9 @@
A ServiceRole specification includes a list of rules (permissions). Each rule has
the following standard fields:
* services: a list of services.
-* methods: HTTP methods or gRPC methods. Note that gRPC methods should be
- presented in the form of “packageName.serviceName/methodName”.
-* paths: HTTP paths. It is ignored in gRPC case.
+* methods: HTTP methods. In the case of gRPC, this field is ignored because the value is always “POST”.
+* paths: HTTP paths or gRPC methods. Note that gRPC methods should be
+ presented in the form of “packageName.serviceName/methodName”.
In addition to the standard fields, operators can use custom fields in the “constraints”
section. The name of a custom field must match one of the “properties” in the “action” part
@@ -28,8 +28,8 @@
namespace: istio-system
spec:
subject:
- user: request.auth.principal | ""
- groups: request.auth.principal | ""
+ user: source.user | ""
+ groups: ""
properties:
service: source.service | ""
namespace: source.namespace | ""
@@ -119,7 +119,9 @@
AccessRule
paths
string[]
-
Optional. A list of HTTP paths.
+
Optional. A list of HTTP paths or gRPC methods.
+gRPC methods must be presented as fully-qualified name in the form of
+packageName.serviceName/methodName.
Exact match, prefix match, and suffix match are supported for paths.
For example, the path “/books/review” matches
“/books/review” (exact match), or “/books/” (prefix match),
@@ -132,10 +134,9 @@
AccessRule
methods
string[]
-
Required. A list of HTTP methods (e.g., “GET”, “POST”) or gRPC methods.
-gRPC methods must be presented as fully-qualified name in the form of
-packageName.serviceName/methodName.
-If set to [“*”], it applies to any method.
+
Optional. A list of HTTP methods (e.g., “GET”, “POST”).
+It is ignored in gRPC case because the value is always “POST”.
+If set to [“*”] or not specified, it applies to any method.
@@ -184,6 +185,142 @@
AccessRule.Constraint
“v1alpha2” (exact match), or “v1” (prefix match),
or “alpha2” (suffix match).
+
+
+
+
+
+
RbacConfig
+
+
RbacConfig defines the global config to control Istio RBAC behavior.
+This Custom Resource is a singleton where only one Custom Resource should be created globally in
+the mesh and the namespace should be the same to other Istio components, which usually is istio-system.
+Note: This is enforced in both istioctl and server side, new Custom Resource will be rejected if found any
+existing one, the user should either delete the existing one or change the existing one directly.
+
+
Below is an example of RbacConfig object “istio-rbac-config” which enables Istio RBAC for all
+services in the default namespace.
A list of services or namespaces that should be enforced by Istio RBAC policies. Note: This field have
+effect only when mode is ONWITHINCLUSION and will be ignored for any other modes.
A list of services or namespaces that should not be enforced by Istio RBAC policies. Note: This field have
+effect only when mode is ONWITHEXCLUSION and will be ignored for any other modes.
+
+
+
+
+
+
+
RbacConfig.Mode
+
+
+
+
+
Name
+
Description
+
+
+
+
+
OFF
+
+
Disable Istio RBAC completely, any other config in RbacConfig will be ignored and Istio RBAC policies
+will not be enforced.
+
+
+
+
+
ON
+
+
Enable Istio RBAC for all services and namespaces.
+
+
+
+
+
ON_WITH_INCLUSION
+
+
Enable Istio RBAC only for services and namespaces specified in the inclusion field. Any other
+services and namespaces not in the inclusion field will not be enforced by Istio RBAC policies.
+
+
+
+
+
ON_WITH_EXCLUSION
+
+
Enable Istio RBAC for all services and namespaces except those specified in the exclusion field. Any other
+services and namespaces not in the exclusion field will be enforced by Istio RBAC policies.
+
+
+
+
+
+
+
RbacConfig.Target
+
+
Target defines a list of services or namespaces.
+
+
+
+
+
Field
+
Type
+
Description
+
+
+
+
+
services
+
string[]
+
+
A list of services.
+
+
+
+
+
namespaces
+
string[]
+
+
A list of namespaces.
+
@@ -319,7 +456,7 @@
Subject
properties
-
map<string,string>
+
map<string, string>
Optional. The set of properties that identify the subject.
In the above ServiceRoleBinding example, the second subject has two properties:
diff --git a/_docs/reference/config/istio.routing.v1alpha1.html b/_docs/reference/config/istio.routing.v1alpha1.html
index e1d4f685cbcd7..cfb2b0bd10876 100644
--- a/_docs/reference/config/istio.routing.v1alpha1.html
+++ b/_docs/reference/config/istio.routing.v1alpha1.html
@@ -1,6 +1,6 @@
---
-title: Route Rules Alpha 1
-overview: Configuration affecting traffic routing
+title: Route Rules v1alpha1
+description: Configuration affecting traffic routing
location: https://istio.io/docs/reference/config/istio.routing.v1alpha1.html
layout: protoc-gen-docs
redirect_from: /docs/reference/config/traffic-rules/routing-rules.html
@@ -456,7 +456,7 @@
DestinationWeight
labels
-
map<string,string>
+
map<string, string>
Sometimes required. Service version identifier for the destination service.
(– N.B. The map is used instead of pstruct due to lack of serialization support
@@ -1213,7 +1213,7 @@
IstioService
labels
-
map<string,string>
+
map<string, string>
Optional one or more labels that uniquely identify the service version.
Set of HTTP match conditions based on HTTP/1.1, HTTP/2, GRPC request
metadata, such as uri, scheme, authority. The header keys must be
@@ -1780,7 +1780,7 @@
RouteRule
appendHeaders
-
map<string,string>
+
map<string, string>
Additional HTTP headers to add before forwarding a request to the
destnation service.
diff --git a/_docs/reference/config/mixer/attribute-vocabulary.md b/_docs/reference/config/mixer/attribute-vocabulary.md
index b13d49c7e9270..6fe325890e424 100644
--- a/_docs/reference/config/mixer/attribute-vocabulary.md
+++ b/_docs/reference/config/mixer/attribute-vocabulary.md
@@ -1,11 +1,9 @@
---
title: Attribute Vocabulary
-overview: Describes the base attribute vocabulary used for policy and control.
+description: Describes the base attribute vocabulary used for policy and control.
-order: 10
+weight: 10
-layout: docs
-type: markdown
---
{% include home.html %}
@@ -28,7 +26,8 @@ deployments will have agents (Envoy or Mixer adapters) that produce these attrib
| source.domain | string | The domain suffix part of the source service, excluding the name and the namespace. | svc.cluster.local |
| source.uid | string | Platform-specific unique identifier for the client instance of the source service. | kubernetes://redis-master-2353460263-1ecey.my-namespace |
| source.labels | map[string, string] | A map of key-value pairs attached to the client instance. | version => v1 |
-| source.user | string | The identity of the immediate sender of the request, authenticated by mTLS. | service-account-foo |
+| source.user | string | To be deprecated, please refer to the attribute `source.principal`. | service-account-foo |
+| source.principal | string | The identity of the immediate sender of the request, authenticated by mTLS. | service-account-foo |
| destination.ip | ip_address | Server IP address. | 10.0.0.104 |
| destination.port | int64 | The recipient port on the server IP address. | 8080 |
| destination.service | string | The fully qualified name of the service that the server belongs to. | my-svc.my-namespace.svc.cluster.local |
@@ -47,14 +46,17 @@ deployments will have agents (Envoy or Mixer adapters) that produce these attrib
| request.referer | string | The HTTP referer header. | |
| request.scheme | string | URI Scheme of the request | |
| request.size | int64 | Size of the request in bytes. For HTTP requests this is equivalent to the Content-Length header. | |
+| `request.total_size` | int64 | Total size of HTTP request in bytes, including request headers, body and trailers. | |
| request.time | timestamp | The timestamp when the destination receives the request. This should be equivalent to Firebase "now". | |
| request.useragent | string | The HTTP User-Agent header. | |
| response.headers | map[string, string] | HTTP response headers. | |
| response.size | int64 | Size of the response body in bytes | |
+| `response.total_size` | int64 | Total size of HTTP response in bytes, including response headers and body. | |
| response.time | timestamp | The timestamp when the destination produced the response. | |
| response.duration | duration | The amount of time the response took to generate. | |
| response.code | int64 | The response's HTTP status code. | |
| connection.id | string | An ID for a TCP connection with statistically low probability of collision. | |
+| `connection.event` | string | Status of a TCP connection, its value is one of "open", "continue" and "close". | |
| connection.received.bytes | int64 | Number of bytes received by a destination service on a connection since the last Report() for a connection. | |
| connection.received.bytes_total | int64 | Total number of bytes received by a destination service during the lifetime of a connection. | |
| connection.sent.bytes | int64 | Number of bytes sent by a destination service on a connection since the last Report() for a connection. | |
@@ -66,9 +68,12 @@ deployments will have agents (Envoy or Mixer adapters) that produce these attrib
| api.version | string | The API version. | v1alpha1 |
| api.operation | string | Unique string used to identify the operation. The id is unique among all operations described in a specific . | getPetsById |
| api.protocol | string | The protocol type of the API call. Mainly for monitoring/analytics. Note that this is the frontend protocol exposed to the client, not the protocol implemented by the backend service. | "http", “https”, or "grpc" |
-| request.auth.principal | string | The authenticated principal of the request. This is a string of the issuer (`iss`) and subject (`sub`) claims within a JWT concatenated with “/” with a percent-encoded subject value. | accounts.my-svc.com/104958560606 |
+| request.auth.principal | string | The authenticated principal of the request. This is a string of the issuer (`iss`) and subject (`sub`) claims within a JWT concatenated with “/” with a percent-encoded subject value. This attribute may come from the peer or the origin in the Istio authentication policy, depending on the binding rule defined in the Istio authentication policy. | accounts.my-svc.com/104958560606 |
| request.auth.audiences | string | The intended audience(s) for this authentication information. This should reflect the audience (`aud`) claim within a JWT. | ['my-svc.com', 'scopes/read'] |
| request.auth.presenter | string | The authorized presenter of the credential. This value should reflect the optional Authorized Presenter (`azp`) claim within a JWT or the OAuth2 client id. | 123456789012.my-svc.com |
+| request.auth.claims | map[string, string] | all raw string claims from the `origin` JWT | `iss`: `issuer@foo.com`, `sub`: `sub@foo.com`, `aud`: `aud1` |
| request.api_key | string | The API key used for the request. | abcde12345 |
| check.error_code | int64 | The error [code](https://github.com/google/protobuf/blob/master/src/google/protobuf/stubs/status.h#L44) for Mixer Check call. | 5 |
| check.error_message | string | The error message for Mixer Check call. | Could not find the resource |
+| `check.cache_hit` | boolean | Indicates whether Mixer check call hits local cache. | |
+| `quota.cache_hit` | boolean | Indicates whether Mixer quota call hits local cache. | |
diff --git a/_docs/reference/config/mixer/expression-language.md b/_docs/reference/config/mixer/expression-language.md
index 616b1760a4c1b..d265c642ac39f 100644
--- a/_docs/reference/config/mixer/expression-language.md
+++ b/_docs/reference/config/mixer/expression-language.md
@@ -1,21 +1,22 @@
---
title: Expression Language
-overview: Mixer config expression language reference.
+description: Mixer config expression language reference.
-order: 20
+weight: 20
-layout: docs
-type: markdown
---
+{% include home.html %}
+
{% capture mixerConfig %}{{home}}/docs/concepts/policy-and-control/mixer-config.html{% endcapture %}
This page describes how to use the Mixer config expression language (CEXL).
## Background
-Mixer configuration uses an expression language (CEXL) to specify match expressions and [mapping expressions]({{mixerConfig}}#attribute-expressions). CEXL expressions map a set of typed [attributes]({{home}}/docs/concepts/policy-and-control/attributes.html) and constants to a typed [value](https://github.com/istio/api/blob/master/mixer/v1/config/descriptor/value_type.proto#L23).
-
+Mixer configuration uses an expression language (CEXL) to specify match expressions and [mapping expressions]({{mixerConfig}}#attribute-expressions). CEXL expressions map a set of typed [attributes]({{home}}/docs/concepts/policy-and-control/attributes.html) and constants to a typed
+[value](https://github.com/istio/api/blob/master/policy/v1beta1/value_type.proto).
+
## Syntax
CEXL accepts a subset of **[Go expressions](https://golang.org/ref/spec#Expressions)**, which defines the syntax. CEXL implements a subset of the Go operators that constrains the set of accepted Go expressions. CEXL also supports arbitrary parenthesization.
@@ -26,32 +27,34 @@ CEXL supports the following functions.
|Operator/Function |Definition |Example | Description
|-----------------------------------------
-|`==` |Equals |`request.size == 200`
+|`==` |Equals |`request.size == 200`
|`!=` |Not Equals |`request.auth.principal != "admin"`
-|`||` |Logical OR | `(request.size == 200) || (request.auth.principal == "admin")`
-|`&&` |Logical AND | `(request.size == 200) && (request.auth.principal == "admin")`
+|`||` |Logical OR | `(request.size == 200) || (request.auth.principal == "admin")`
+|`&&` |Logical AND | `(request.size == 200) && (request.auth.principal == "admin")`
|`[ ]` |Map Access | `request.headers["x-id"]`
|`|` |First non empty | `source.labels["app"] | source.labels["svc"] | "unknown"`
|`match` | Glob match |`match(destination.service, "*.ns1.svc.cluster.local")` | Matches prefix or suffix based on the location of `*`
+|`email` | Convert a textual e-mail into the `EMAIL_ADDRESS` type | `email("awesome@istio.io")` | Use the `email` function to create an `EMAIL_ADDRESS` literal.
+|`dnsName` | Convert a textual DNS name into the `DNS_NAME` type | `dnsName("www.istio.io")` | Use the `dnsName` function to create a `DNS_NAME` literal.
|`ip` | Convert a textual IPv4 address into the `IP_ADDRESS` type | `source.ip == ip("10.11.12.13")` | Use the `ip` function to create an `IP_ADDRESS` literal.
-|`timestamp` | Convert a textual timestamp in RFC 3339 format into the `TIMESTAMP` type |`timestamp("2015-01-02T15:04:35Z")` | Use the `timestamp` function to create a `TIMESTAMP` literal.
-|`.matches` | Regular expression match | `"svc.*".matches(destination.service)` | Matches `destination.service` against regular expression pattern `"svc.*"`.
-|`.startsWith` | string prefix match | `destination.service.startsWith("acme")` | Checks whether `destination.service` starts with `"acme"`.
+|`timestamp` | Convert a textual timestamp in RFC 3339 format into the `TIMESTAMP` type | `timestamp("2015-01-02T15:04:35Z")` | Use the `timestamp` function to create a `TIMESTAMP` literal.
+|`uri` | Convert a textual URI into the `URI` type | `uri("http://istio.io")` | Use the `uri` function to create a `URI` literal.
+|`.matches` | Regular expression match | `"svc.*".matches(destination.service)` | Matches `destination.service` against regular expression pattern `"svc.*"`.
+|`.startsWith` | string prefix match | `destination.service.startsWith("acme")` | Checks whether `destination.service` starts with `"acme"`.
|`.endsWith` | string postfix match | `destination.service.endsWith("acme")` | Checks whether `destination.service` ends with `"acme"`.
-
## Type checking
CEXL variables are attributes from the typed [attribute vocabulary]({{home}}/docs/reference/config/mixer/attribute-vocabulary.html), constants are implicitly typed and, functions are explicitly typed.
Mixer validates a CEXL expression and resolves it to a type during config validation.
-Selectors must resolve to a boolean value and mapping expressions must resolve to the type they are mapping into. Config validation fails if a selector fails to resolve to a boolean or if a mapping expression resolves to an incorrect type.
+Selectors must resolve to a boolean value and mapping expressions must resolve to the type they are mapping into. Config validation fails if a selector fails to resolve to a boolean or if a mapping expression resolves to an incorrect type.
For example, if an operator specifies a *string* label as `request.size | 200`, validation fails because the expression resolves to an integer.
## Missing attributes
-If an expression uses an attribute that is not available during request processing, the expression evaluation fails. Use the `|` operator to provide a default value if an attribute may be missing.
+If an expression uses an attribute that is not available during request processing, the expression evaluation fails. Use the `|` operator to provide a default value if an attribute may be missing.
For example, the expression `request.auth.principal == "user1"` fails evaluation if the `request.auth.principal` attribute is missing. The `|` (OR) operator addresses the problem: `(request.auth.principal | "nobody" ) == "user1"`.
@@ -60,7 +63,7 @@ For example, the expression `request.auth.principal == "user1"` fails evaluation
|Expression |Return Type |Description
|------------------------------------
|`request.size| 200` | **int** | `request.size` if available, otherwise 200.
-|`request.headers["X-FORWARDED-HOST"] == "myhost"`| **boolean**
+|`request.headers["X-FORWARDED-HOST"] == "myhost"`| **boolean**
|`(request.headers["x-user-group"] == "admin") || (request.auth.principal == "admin")`| **boolean**| True if the user is admin or in the admin group.
|`(request.auth.principal | "nobody" ) == "user1"` | **boolean** | True if `request.auth.principal` is "user1", The expression will not error out if `request.auth.principal` is missing.
|`source.labels["app"]=="reviews" && source.labels["version"]=="v3"`| **boolean** | True if app label is reviews and version label is v3, false otherwise.
diff --git a/_docs/reference/config/mixer/index.md b/_docs/reference/config/mixer/index.md
index f51d6d1f3edac..53bb514d6f3ec 100644
--- a/_docs/reference/config/mixer/index.md
+++ b/_docs/reference/config/mixer/index.md
@@ -1,11 +1,9 @@
---
title: Mixer
-overview: Detailed information on configuration and API exposed by Mixer.
+description: Detailed information on configuration and API exposed by Mixer.
-order: 30
+weight: 30
-layout: docs
-type: markdown
toc: false
---
diff --git a/_docs/reference/config/mixer/istio.mixer.adapter.model.v1beta1.html b/_docs/reference/config/mixer/istio.mixer.adapter.model.v1beta1.html
deleted file mode 100644
index ecd9c9eb80d47..0000000000000
--- a/_docs/reference/config/mixer/istio.mixer.adapter.model.v1beta1.html
+++ /dev/null
@@ -1,91 +0,0 @@
----
-title: Mixer Adapter Model
-overview: Definitions used when creating Mixer templates
-location: https://istio.io/docs/reference/config/mixer/istio.mixer.adapter.model.v1beta1.html
-layout: protoc-gen-docs
-number_of_entries: 8
----
-
This package defines the types that are used when creating Mixer templates. ValueType defined in this pacakge
-is also used by adapters to know the underlying datatype of the instance fields.
-
-
DNSName
-
-
DNSName is used inside templates for fields that are of ValueType “DNS_NAME”
-
-
-
Duration
-
-
Duration is used inside templates for fields that are of ValueType “DURATION”
-
-
-
EmailAddress
-
-
EmailAddress is used inside templates for fields that are of ValueType “EMAIL_ADDRESS”
-DO NOT USE !! Under Development
-
-
-
IPAddress
-
-
IPAddress is used inside templates for fields that are of ValueType “IP_ADDRESS”
-
-
-
TemplateVariety
-
-
The available varieties of templates, controlling the semantics of what an adapter does with each instance.
-
-
-
-
-
Name
-
Description
-
-
-
-
-
TEMPLATE_VARIETY_CHECK
-
-
Makes the template applicable for Mixer’s check calls.
-
-
-
-
-
TEMPLATE_VARIETY_REPORT
-
-
Makes the template applicable for Mixer’s report calls.
-
-
-
-
-
TEMPLATE_VARIETY_QUOTA
-
-
Makes the template applicable for Mixer’s quota calls.
-
-
-
-
-
TEMPLATE_VARIETY_ATTRIBUTE_GENERATOR
-
-
Makes the template applicable for Mixer’s quota calls.
-
-
-
-
-
-
-
TimeStamp
-
-
TimeStamp is used inside templates for fields that are of ValueType “TIMESTAMP”
-
-
-
Uri
-
-
Uri is used inside templates for fields that are of ValueType “URI”
-DO NOT USE ! Under Development
-
-
-
Value
-
-
Value is used inside templates for fields that have dynamic types. The actual datatype
-of the field depends on the datatype of the expression used in the operator configuration.
-
-
diff --git a/_docs/reference/config/mixer/istio.mixer.v1.config.descriptor.html b/_docs/reference/config/mixer/istio.mixer.v1.config.descriptor.html
deleted file mode 100644
index 3a3f8afb09b00..0000000000000
--- a/_docs/reference/config/mixer/istio.mixer.v1.config.descriptor.html
+++ /dev/null
@@ -1,109 +0,0 @@
----
-title: Value Type
-overview: Value types used with templates
-location: https://istio.io/docs/reference/config/mixer/istio.mixer.v1.config.descriptor.html
-layout: protoc-gen-docs
-number_of_entries: 1
----
-
ValueType
-
-
ValueType describes the types that values in the Istio system can take. These
-are used to describe the type of Attributes at run time, describe the type of
-the result of evaluating an expression, and to describe the runtime type of
-fields of other descriptors.
-
-
-
-
-
Name
-
Description
-
-
-
-
-
VALUE_TYPE_UNSPECIFIED
-
-
Invalid, default value.
-
-
-
-
-
STRING
-
-
An undiscriminated variable-length string.
-
-
-
-
-
INT64
-
-
An undiscriminated 64-bit signed integer.
-
-
-
-
-
DOUBLE
-
-
An undiscriminated 64-bit floating-point value.
-
-
-
-
-
BOOL
-
-
An undiscriminated boolean value.
-
-
-
-
-
TIMESTAMP
-
-
A point in time.
-
-
-
-
-
IP_ADDRESS
-
-
An IP address.
-
-
-
-
-
EMAIL_ADDRESS
-
-
An email address.
-
-
-
-
-
URI
-
-
A URI.
-
-
-
-
-
DNS_NAME
-
-
A DNS name.
-
-
-
-
-
DURATION
-
-
A span between two points in time.
-
-
-
-
-
STRING_MAP
-
-
A map string -> string, typically used by headers.
This proto describes the types that can be used inside Mixer templates. These message types are used to specify
-field datatype to express the equivalent ValueType for the expressions the field can be mapped to.
-
-
DNSName
-
-
DNSName is used inside templates for fields that are of ValueType “DNS_NAME”
-
-
-
Duration
-
-
Duration is used inside templates for fields that are of ValueType “DURATION”
-
-
-
EmailAddress
-
-
EmailAddress is used inside templates for fields that are of ValueType “EMAIL_ADDRESS”
-DO NOT USE !! Under Development
-
-
-
IPAddress
-
-
IPAddress is used inside templates for fields that are of ValueType “IP_ADDRESS”
-
-
-
TemplateVariety
-
-
Specifies the varity of the the Template.
-
-
-
-
-
Name
-
Description
-
-
-
-
-
TEMPLATE_VARIETY_CHECK
-
-
-
-
-
TEMPLATE_VARIETY_REPORT
-
-
-
-
-
TEMPLATE_VARIETY_QUOTA
-
-
-
-
-
TEMPLATE_VARIETY_ATTRIBUTE_GENERATOR
-
-
-
-
-
-
-
TimeStamp
-
-
TimeStamp is used inside templates for fields that are of ValueType “TIMESTAMP”
-
-
-
Uri
-
-
Uri is used inside templates for fields that are of ValueType “URI”
-DO NOT USE ! Under Development
-
-
-
Value
-
-
Value is used inside templates for fields that have dynamic types. The actual datatype
-of the field depends on the datatype of the expression used in the operator configuration.
-
-
diff --git a/_docs/reference/config/template/apikey.html b/_docs/reference/config/template/apikey.html
index d406e1d405995..7de4d585ad98d 100644
--- a/_docs/reference/config/template/apikey.html
+++ b/_docs/reference/config/template/apikey.html
@@ -1,9 +1,9 @@
---
title: API Key
-overview: A template that represents a single API key.
+description: A template that represents a single API key.
location: https://istio.io/docs/reference/config/template/apikey.html
layout: protoc-gen-docs
-number_of_entries: 1
+number_of_entries: 2
---
The apikey template represents a single API key, which is used for authorization checks.
TimeStamp is used inside templates for fields that are of ValueType “TIMESTAMP”
+
+
diff --git a/_docs/reference/config/template/authorization.html b/_docs/reference/config/template/authorization.html
index 32a8110c1cf64..656fd18373b91 100644
--- a/_docs/reference/config/template/authorization.html
+++ b/_docs/reference/config/template/authorization.html
@@ -1,9 +1,9 @@
---
title: Authorization
-overview: A template used to represent an access control query.
+description: A template used to represent an access control query.
location: https://istio.io/docs/reference/config/template/authorization.html
layout: protoc-gen-docs
-number_of_entries: 3
+number_of_entries: 4
---
The authorization template defines parameters for performing policy
enforcement within Istio. It is primarily concerned with enabling Mixer
Value is used inside templates for fields that have dynamic types. The actual datatype
+of the field depends on the datatype of the expression used in the operator configuration.
+
+
diff --git a/_docs/reference/config/template/checknothing.html b/_docs/reference/config/template/checknothing.html
index 6d47092d71bc0..86c67016402bb 100644
--- a/_docs/reference/config/template/checknothing.html
+++ b/_docs/reference/config/template/checknothing.html
@@ -1,6 +1,6 @@
---
title: Check Nothing
-overview: A template that carries no data, useful for testing.
+description: A template that carries no data, useful for testing.
location: https://istio.io/docs/reference/config/template/checknothing.html
layout: protoc-gen-docs
number_of_entries: 1
diff --git a/_docs/reference/config/template/index.md b/_docs/reference/config/template/index.md
index 1abe621f4f8b4..3d4d326c6475f 100644
--- a/_docs/reference/config/template/index.md
+++ b/_docs/reference/config/template/index.md
@@ -1,11 +1,9 @@
---
title: Templates
-overview: Generated documentation for Mixer's Templates.
+description: Generated documentation for Mixer's Templates.
-order: 50
+weight: 50
-layout: docs
-type: markdown
toc: false
---
diff --git a/_docs/reference/config/template/kubernetes.html b/_docs/reference/config/template/kubernetes.html
index 733102c27d9e6..831ab7fd9e020 100644
--- a/_docs/reference/config/template/kubernetes.html
+++ b/_docs/reference/config/template/kubernetes.html
@@ -1,9 +1,9 @@
---
title: Kubernetes
-overview: A template that is used to control the production of Kubernetes-specific attributes.
+description: A template that is used to control the production of Kubernetes-specific attributes.
location: https://istio.io/docs/reference/config/template/kubernetes.html
layout: protoc-gen-docs
-number_of_entries: 2
+number_of_entries: 3
---
The kubernetes template holds data that controls the production of Kubernetes-specific
attributes.
IPAddress is used inside templates for fields that are of ValueType “IP_ADDRESS”
+
+
diff --git a/_docs/reference/config/template/listentry.html b/_docs/reference/config/template/listentry.html
index 7de9093137999..b72ddc7b545eb 100644
--- a/_docs/reference/config/template/listentry.html
+++ b/_docs/reference/config/template/listentry.html
@@ -1,6 +1,6 @@
---
title: List Entry
-overview: A template designed to let you perform list checking operations.
+description: A template designed to let you perform list checking operations.
location: https://istio.io/docs/reference/config/template/listentry.html
layout: protoc-gen-docs
number_of_entries: 1
@@ -14,7 +14,7 @@
Template
within a list.
When writing the configuration, the value for the fields associated with this template can either be a
-literal or an expression. Please note that if the datatype of a field is not istio.mixer.v1.template.Value,
+literal or an expression. Please note that if the datatype of a field is not istio.mixer.adapter.model.v1beta1.Value,
then the expression’s inferred type must match the datatype of the field.
Example config:
diff --git a/_docs/reference/config/template/logentry.html b/_docs/reference/config/template/logentry.html
index 9c27d985a6f56..4944624e7df42 100644
--- a/_docs/reference/config/template/logentry.html
+++ b/_docs/reference/config/template/logentry.html
@@ -1,9 +1,9 @@
---
title: Log Entry
-overview: A template that represents a single runtime log entry.
+description: A template that represents a single runtime log entry.
location: https://istio.io/docs/reference/config/template/logentry.html
layout: protoc-gen-docs
-number_of_entries: 1
+number_of_entries: 3
---
The logentry template represents an individual entry within a log.
@@ -12,7 +12,7 @@
Template
The logentry template represents an individual entry within a log.
When writing the configuration, the value for the fields associated with this template can either be a
-literal or an expression. Please note that if the datatype of a field is not istio.mixer.v1.template.Value,
+literal or an expression. Please note that if the datatype of a field is not istio.mixer.adapter.model.v1beta1.Value,
then the expression’s inferred type must match the datatype of the field.
Optional. A set of expressions that will form the dimensions of the monitored resource this log entry is being
recorded on. If the logging backend supports monitored resources, these fields are used to populate that resource.
@@ -95,3 +95,14 @@
Template
+
istio.mixer.adapter.model.v1beta1.TimeStamp
+
+
TimeStamp is used inside templates for fields that are of ValueType “TIMESTAMP”
+
+
+
istio.mixer.adapter.model.v1beta1.Value
+
+
Value is used inside templates for fields that have dynamic types. The actual datatype
+of the field depends on the datatype of the expression used in the operator configuration.
+
+
diff --git a/_docs/reference/config/template/metric.html b/_docs/reference/config/template/metric.html
index ab04ba3217774..e98bcbf5525b9 100644
--- a/_docs/reference/config/template/metric.html
+++ b/_docs/reference/config/template/metric.html
@@ -1,9 +1,9 @@
---
title: Metric
-overview: A template that represents a single runtime metric.
+description: A template that represents a single runtime metric.
location: https://istio.io/docs/reference/config/template/metric.html
layout: protoc-gen-docs
-number_of_entries: 1
+number_of_entries: 2
---
The metric template is designed to let you describe runtime metric to dispatch to
monitoring backends.
@@ -13,7 +13,7 @@
Template
The metric template represents a single piece of data to report.
When writing the configuration, the value for the fields associated with this template can either be a
-literal or an expression. Please note that if the datatype of a field is not istio.mixer.v1.template.Value,
+literal or an expression. Please note that if the datatype of a field is not istio.mixer.adapter.model.v1beta1.Value,
then the expression’s inferred type must match the datatype of the field.
Optional. A set of expressions that will form the dimensions of the monitored resource this metric is being reported on.
If the metric backend supports monitored resources, these fields are used to populate that resource. Otherwise
@@ -82,3 +82,9 @@
Template
+
istio.mixer.adapter.model.v1beta1.Value
+
+
Value is used inside templates for fields that have dynamic types. The actual datatype
+of the field depends on the datatype of the expression used in the operator configuration.
+
+
diff --git a/_docs/reference/config/template/quota.html b/_docs/reference/config/template/quota.html
index 326dfc9e82703..a0992056703e7 100644
--- a/_docs/reference/config/template/quota.html
+++ b/_docs/reference/config/template/quota.html
@@ -1,9 +1,9 @@
---
title: Quota
-overview: A template that represents a quota allocation request
+description: A template that represents a quota allocation request
location: https://istio.io/docs/reference/config/template/quota.html
layout: protoc-gen-docs
-number_of_entries: 1
+number_of_entries: 2
---
The quota template represents an item for which to check quota.
@@ -12,7 +12,7 @@
Template
The quota template represents a piece of data to check Quota for.
When writing the configuration, the value for the fields associated with this template can either be a
-literal or an expression. Please note that if the datatype of a field is not istio.mixer.v1.template.Value,
+literal or an expression. Please note that if the datatype of a field is not istio.mixer.adapter.model.v1beta1.Value,
then the expression’s inferred type must match the datatype of the field.
The unique identity of the particular quota to manipulate.
@@ -50,3 +50,9 @@
Template
+
istio.mixer.adapter.model.v1beta1.Value
+
+
Value is used inside templates for fields that have dynamic types. The actual datatype
+of the field depends on the datatype of the expression used in the operator configuration.
+
+
diff --git a/_docs/reference/config/template/reportnothing.html b/_docs/reference/config/template/reportnothing.html
index 867758f96e9f9..cde494b04ea10 100644
--- a/_docs/reference/config/template/reportnothing.html
+++ b/_docs/reference/config/template/reportnothing.html
@@ -1,6 +1,6 @@
---
title: Report Nothing
-overview: A template that carries no data, useful for testing.
+description: A template that carries no data, useful for testing.
location: https://istio.io/docs/reference/config/template/reportnothing.html
layout: protoc-gen-docs
number_of_entries: 1
diff --git a/_docs/reference/config/template/servicecontrolreport.html b/_docs/reference/config/template/servicecontrolreport.html
index 0e345f60c4cad..ccd238154e1a8 100644
--- a/_docs/reference/config/template/servicecontrolreport.html
+++ b/_docs/reference/config/template/servicecontrolreport.html
@@ -1,9 +1,9 @@
---
title: Service Control Report
-overview: A template used by the Google Service Control adapter.
+description: A template used by the Google Service Control adapter.
location: https://istio.io/docs/reference/config/template/servicecontrolreport.html
layout: protoc-gen-docs
-number_of_entries: 1
+number_of_entries: 3
---
Duration is used inside templates for fields that are of ValueType “DURATION”
+
+
+
istio.mixer.adapter.model.v1beta1.TimeStamp
+
+
TimeStamp is used inside templates for fields that are of ValueType “TIMESTAMP”
+
+
diff --git a/_docs/reference/index.md b/_docs/reference/index.md
index 6001dfc271da8..24ffc2e4d7f49 100644
--- a/_docs/reference/index.md
+++ b/_docs/reference/index.md
@@ -1,12 +1,10 @@
---
title: Reference
-overview: The Reference section contains detailed authoritative reference material such as command-line options, configuration options, and API calling parameters.
+description: The Reference section contains detailed authoritative reference material such as command-line options, configuration options, and API calling parameters.
index: true
-order: 60
+weight: 60
-layout: docs
-type: markdown
toc: false
---
diff --git a/_docs/reference/writing-config.md b/_docs/reference/writing-config.md
deleted file mode 100644
index 00bd14a69e0b9..0000000000000
--- a/_docs/reference/writing-config.md
+++ /dev/null
@@ -1,235 +0,0 @@
----
-title: Writing Configuration
-overview: How to write Istio config YAML content.
-
-order: 70
-
-layout: docs
-type: markdown
----
-
-This page describes how to write configuration that conforms to Istio's schemas. All configuration schemas in Istio are defined as [protobuf messages](https://developers.google.com/protocol-buffers/docs/proto3). When in doubt, search for the protos.
-
-## Translating to YAML
-
-There's an implicit mapping from protobuf to YAML using [protobuf's mapping to JSON](https://developers.google.com/protocol-buffers/docs/proto3#json). Below are a few examples showing common mappings you'll encounter writing configuration in Istio.
-
-**Important things to note:**
-- YAML fields are implicitly strings
-- Proto `repeated` fields map to YAML lists; each element in a YAML list is prefixed by a dash (`-`)
-- Proto `message`s map to objects; in YAML objects are field names all at the same indentation level
-- YAML is whitespace sensitive and must use spaces; tabs are never allowed
-
-### `map` and `message` fields
-
-
-
-*Note that when numeric literals are used as strings (like `value` above) they must be enclosed in quotes. Quotation marks (`"`) are optional for normal strings.*
-
-### `repeated` fields
-
-
-
-*Note that YAML parsing will handle both `snake_case` and `lowerCamelCase` field names. `lowerCamelCase` is the canonical version in YAML.*
-
-### Nested `message` fields
-
-
-name: My Monitored Resource
-labels:
-- name: label one
- valueType: STRING
-- name: second label
- valueType: DOUBLE
-
-
-
-
-
-
-### `Timestamp`, `Duration`, and 64 bit integer fields
-
-The protobuf spec special cases the JSON/YAML representations of a few well-known protobuf messages. 64 bit integer types are also special due to the fact that JSON numbers are implicitly doubles, which cannot represent all valid 64 bit integer values.
-
-
-
-Specifically, the [protobuf spec declares](https://developers.google.com/protocol-buffers/docs/proto3#json):
-
-| Proto | JSON/YAML | Example | Notes |
-| --- | --- | --- | --- |
-| Timestamp | string | "1972-01-01T10:00:20.021Z" | Uses RFC 3339, where generated output will always be Z-normalized and uses 0, 3, 6 or 9 fractional digits. |
-| Duration | string | "1.000340012s", "1s" | Generated output always contains 0, 3, 6, or 9 fractional digits, depending on required precision. Accepted are any fractional digits (also none) as long as they fit into nano-seconds precision. |
-| int64, fixed64, uint64 | string | "1", "-10" | JSON value will be a decimal string. Either numbers or strings are accepted.|
-
-
-
-
-## What's next
-* TODO: link to overall mixer config concept guide (how the config pieces fit together)
-
-
diff --git a/_docs/setup/cloudfoundry/index.md b/_docs/setup/cloudfoundry/index.md
index 6543c4ae919b5..9c8ea47d470fb 100644
--- a/_docs/setup/cloudfoundry/index.md
+++ b/_docs/setup/cloudfoundry/index.md
@@ -1,11 +1,9 @@
---
title: Cloud Foundry
-overview: Instructions for installing the Istio control plane in Cloud Foundry.
+description: Instructions for installing the Istio control plane in Cloud Foundry.
-order: 40
+weight: 40
-layout: docs
-type: markdown
toc: false
---
diff --git a/_docs/setup/cloudfoundry/install.md b/_docs/setup/cloudfoundry/install.md
index 86838a54ea4a4..740d239c8dc19 100644
--- a/_docs/setup/cloudfoundry/install.md
+++ b/_docs/setup/cloudfoundry/install.md
@@ -1,11 +1,9 @@
---
title: Installation
-overview: Instructions for installing the Istio control plane in Cloud Foundry.
+description: Instructions for installing the Istio control plane in Cloud Foundry.
-order: 10
+weight: 10
-layout: docs
-type: markdown
---
We are working with the Cloud Foundry developers to integrate Istio
diff --git a/_docs/setup/consul/index.md b/_docs/setup/consul/index.md
index 0e8cdd166a5c2..b2eef23d2b5a4 100644
--- a/_docs/setup/consul/index.md
+++ b/_docs/setup/consul/index.md
@@ -1,11 +1,9 @@
---
title: Nomad & Consul
-overview: Instructions for installing the Istio control plane in a Consul based environment, with or without Nomad.
+description: Instructions for installing the Istio control plane in a Consul based environment, with or without Nomad.
-order: 20
+weight: 20
-layout: docs
-type: markdown
toc: false
---
diff --git a/_docs/setup/consul/install.md b/_docs/setup/consul/install.md
index 64a72c13704d0..b848de243e15a 100644
--- a/_docs/setup/consul/install.md
+++ b/_docs/setup/consul/install.md
@@ -1,24 +1,22 @@
---
title: Installation
-overview: Instructions for installing the Istio control plane in a Consul based environment, with or without Nomad.
+description: Instructions for installing the Istio control plane in a Consul based environment, with or without Nomad.
-order: 30
+weight: 30
-layout: docs
-type: markdown
---
-> Note: Setup on Nomad has not been tested.
+> Setup on Nomad has not been tested.
-Using Istio in a non-kubernetes environment involves a few key tasks:
+Using Istio in a non-Kubernetes environment involves a few key tasks:
1. Setting up the Istio control plane with the Istio API server
-2. Adding the Istio sidecar to every instance of a service
-3. Ensuring requests are routed through the sidecars
+1. Adding the Istio sidecar to every instance of a service
+1. Ensuring requests are routed through the sidecars
## Setting up the Control Plane
-Istio control plane consists of four main services: Pilot, Mixer, CA, and
+Istio control plane consists of four main services: Pilot, Mixer, Citadel, and
the API server.
### API Server
@@ -27,10 +25,8 @@ Istio's API server (based on Kubernetes' API server) provides key functions
such as configuration management and Role-Based Access Control. The API
server requires an
[etcd cluster](https://kubernetes.io/docs/getting-started-guides/scratch/#etcd)
-as a persistent store. Detailed instructions for setting up the API server can
-be found
-[here](https://kubernetes.io/docs/getting-started-guides/scratch/#apiserver-controller-manager-and-scheduler).
-Documentation on set of startup options for the Kubernetes API server can be found [here](https://kubernetes.io/docs/admin/kube-apiserver/)
+as a persistent store. See the
+[instructions for setting up the API server](https://kubernetes.io/docs/getting-started-guides/scratch/#apiserver-controller-manager-and-scheduler).
#### Local Install
@@ -71,21 +67,20 @@ services:
environment:
- SERVICE_IGNORE=1
command: [
- "kube-apiserver", "--etcd-servers", "http://etcd:2379",
- "--service-cluster-ip-range", "10.99.0.0/16",
- "--insecure-port", "8080",
- "-v", "2",
+ "kube-apiserver", "--etcd-servers", "http://etcd:2379",
+ "--service-cluster-ip-range", "10.99.0.0/16",
+ "--insecure-port", "8080",
+ "-v", "2",
"--insecure-bind-address", "0.0.0.0"
]
```
-
### Other Istio Components
-Debian packages for Istio Pilot, Mixer, and CA are available through the
+Debian packages for Istio Pilot, Mixer, and Citadel are available through the
Istio release. Alternatively, these components can be run as Docker
containers (docker.io/istio/pilot, docker.io/istio/mixer,
-docker.io/istio/istio-ca). Note that these components are stateless and can
+docker.io/istio/citadel). Note that these components are stateless and can
be scaled horizontally. Each of these components depends on the Istio API
server, which in turn depends on the etcd cluster for persistence. To
achieve high availability, each control plane service could be run as a
@@ -94,7 +89,6 @@ Nomad, where the
[service stanza](https://www.nomadproject.io/docs/job-specification/service.html)
can be used to describe the desired properties of the control plane services.
-
## Adding Sidecars to Service Instances
Each instance of a service in an application must be accompanied by the
@@ -122,7 +116,7 @@ Part of the sidecar installation should involve setting up appropriate IP
Table rules to transparently route application's network traffic through
the Istio sidecars. The IP table script to setup such forwarding can be
found in the
-[here](https://github.com/istio/istio/blob/master/pilot/docker/prepare_proxy.sh).
+[here](https://raw.githubusercontent.com/istio/istio/master/tools/deb/istio-iptables.sh).
-> Note: This script must be executed before starting the application or
-> the sidecar process.
+> This script must be executed before starting the application or
+> the sidecar process.
diff --git a/_docs/setup/consul/quick-start.md b/_docs/setup/consul/quick-start.md
index 85e1822020c40..adfbe2ec70dec 100644
--- a/_docs/setup/consul/quick-start.md
+++ b/_docs/setup/consul/quick-start.md
@@ -1,18 +1,15 @@
---
title: Quick Start on Docker
-overview: Quick Start instructions to setup the Istio service mesh with Docker Compose.
+description: Quick Start instructions to setup the Istio service mesh with Docker Compose.
-order: 10
+weight: 10
-layout: docs
-type: markdown
---
{% include home.html %}
Quick Start instructions to install and configure Istio in a Docker Compose setup.
-
## Prerequisites
* [Docker](https://docs.docker.com/engine/installation/#cloud)
@@ -23,49 +20,51 @@ Quick Start instructions to install and configure Istio in a Docker Compose setu
1. Go to the [Istio release](https://github.com/istio/istio/releases) page to download the
installation file corresponding to your OS. If you are using a MacOS or Linux system, you can also
run the following command to download and extract the latest release automatically:
- ```bash
- curl -L https://git.io/getLatestIstio | sh -
+
+ ```command
+ $ curl -L https://git.io/getLatestIstio | sh -
```
1. Extract the installation file and change the directory to the file location. The
- installation directory contains:
- * Sample applications in `samples/`
- * The `istioctl` client binary in the `bin/` directory. `istioctl` is used for creating routing rules and policies.
- * The `istio.VERSION` configuration file
+installation directory contains:
+
+ * Sample applications in `samples/`
+ * The `istioctl` client binary in the `bin/` directory. `istioctl` is used for creating routing rules and policies.
+ * The `istio.VERSION` configuration file
1. Add the `istioctl` client to your PATH.
- For example, run the following command on a MacOS or Linux system:
+For example, run the following command on a MacOS or Linux system:
- ```bash
- export PATH=$PWD/bin:$PATH
+ ```command
+ $ export PATH=$PWD/bin:$PATH
```
1. For Linux users, configure the `DOCKER_GATEWAY` environment variable
- ```bash
- export DOCKER_GATEWAY=172.28.0.1:
+ ```command
+ $ export DOCKER_GATEWAY=172.28.0.1:
```
1. Change directory to the root of the Istio installation directory.
1. Bring up the Istio control plane containers:
- ```bash
- docker-compose -f install/consul/istio.yaml up -d
+ ```command
+ $ docker-compose -f install/consul/istio.yaml up -d
```
1. Confirm that all docker containers are running:
- ```bash
- docker ps -a
+ ```command
+ $ docker ps -a
```
> If the Istio Pilot container terminates, ensure that you run the `istioctl context-create` command and re-run the command from the previous step.
-
+
1. Configure `istioctl` to use mapped local port for the Istio API server:
- ```bash
- istioctl context-create --api-server http://localhost:8080
+ ```command
+ $ istioctl context-create --api-server http://localhost:8080
```
## Deploy your application
@@ -73,24 +72,24 @@ Quick Start instructions to install and configure Istio in a Docker Compose setu
You can now deploy your own application or one of the sample applications provided with the
installation like [Bookinfo]({{home}}/docs/guides/bookinfo.html).
-> Note 1: Since there is no concept of pods in a Docker setup, the Istio
-> sidecar runs in the same container as the application. We will use
-> [Registrator](http://gliderlabs.github.io/registrator/latest/) to
+> Since there is no concept of pods in a Docker setup, the Istio
+> sidecar runs in the same container as the application. We will
+> use [Registrator](https://gliderlabs.github.io/registrator/latest/) to
> automatically register instances of services in the Consul service
> registry.
+>
+> The application must use HTTP/1.1 or HTTP/2.0 protocol for all its HTTP traffic because HTTP/1.0 is not supported.
-> Note 2: the application must use HTTP/1.1 or HTTP/2.0 protocol for all its HTTP traffic because HTTP/1.0 is not supported.
-
-```bash
-docker-compose -f .yaml up -d
+```command
+$ docker-compose -f .yaml up -d
```
## Uninstalling
-1. Uninstall Istio core components by removing the docker containers:
+Uninstall Istio core components by removing the docker containers:
-```bash
-docker-compose -f install/consul/istio.yaml down
+```command
+$ docker-compose -f install/consul/istio.yaml down
```
## What's next
diff --git a/_docs/setup/eureka/index.md b/_docs/setup/eureka/index.md
index 14ad5fe21fec5..51cacef1ebb0a 100644
--- a/_docs/setup/eureka/index.md
+++ b/_docs/setup/eureka/index.md
@@ -1,11 +1,9 @@
---
title: Eureka
-overview: Instructions for installing the Istio control plane in a Eureka based environment.
+description: Instructions for installing the Istio control plane in a Eureka based environment.
-order: 30
+weight: 30
-layout: docs
-type: markdown
toc: false
---
diff --git a/_docs/setup/eureka/install.md b/_docs/setup/eureka/install.md
index 6cb1a0276be15..6c56dadf813e4 100644
--- a/_docs/setup/eureka/install.md
+++ b/_docs/setup/eureka/install.md
@@ -1,22 +1,20 @@
---
title: Installation
-overview: Instructions for installing the Istio control plane in an Eureka based environment.
+description: Instructions for installing the Istio control plane in an Eureka based environment.
-order: 30
+weight: 30
-layout: docs
-type: markdown
---
-Using Istio in a non-kubernetes environment involves a few key tasks:
+Using Istio in a non-Kubernetes environment involves a few key tasks:
1. Setting up the Istio control plane with the Istio API server
-2. Adding the Istio sidecar to every instance of a service
-3. Ensuring requests are routed through the sidecars
+1. Adding the Istio sidecar to every instance of a service
+1. Ensuring requests are routed through the sidecars
## Setting up the control plane
-Istio control plane consists of four main services: Pilot, Mixer, CA, and
+Istio control plane consists of four main services: Pilot, Mixer, Citadel, and
the API server.
### API server
@@ -27,9 +25,7 @@ server requires an
[etcd cluster](https://kubernetes.io/docs/getting-started-guides/scratch/#etcd)
as a persistent store. Detailed instructions for setting up the API server can
be found
-[here](https://kubernetes.io/docs/getting-started-guides/scratch/#apiserver-controller-manager-and-scheduler).
-Documentation on set of startup options for the Kubernetes API server can be found
-[here](https://kubernetes.io/docs/admin/kube-apiserver/)
+[here](https://kubernetes.io/docs/getting-started-guides/scratch/#apiserver-controller-manager-and-scheduler).
#### Local install
@@ -69,25 +65,23 @@ services:
environment:
- SERVICE_IGNORE=1
command: [
- "kube-apiserver", "--etcd-servers", "http://etcd:2379",
- "--service-cluster-ip-range", "10.99.0.0/16",
- "--insecure-port", "8080",
- "-v", "2",
+ "kube-apiserver", "--etcd-servers", "http://etcd:2379",
+ "--service-cluster-ip-range", "10.99.0.0/16",
+ "--insecure-port", "8080",
+ "-v", "2",
"--insecure-bind-address", "0.0.0.0"
]
```
-
### Other Istio components
-Debian packages for Istio Pilot, Mixer, and CA are available through the
+Debian packages for Istio Pilot, Mixer, and Citadel are available through the
Istio release. Alternatively, these components can be run as Docker
containers (docker.io/istio/pilot, docker.io/istio/mixer,
-docker.io/istio/istio-ca). Note that these components are stateless and can
+docker.io/istio/citadel). Note that these components are stateless and can
be scaled horizontally. Each of these components depends on the Istio API
server, which in turn depends on the etcd cluster for persistence.
-
## Adding sidecars to service instances
Each instance of a service in an application must be accompanied by the
@@ -97,12 +91,12 @@ into these components. For example, if your infrastructure uses VMs, the
Istio sidecar process must be run on each VM that needs to be part of the
service mesh.
-## Routing traffic through Istio Sidecar
+## Routing traffic through the Istio sidecar
Part of the sidecar installation should involve setting up appropriate IP
Table rules to transparently route application's network traffic through
the Istio sidecars. The IP table script to setup such forwarding can be
-found [here](https://github.com/istio/istio/blob/master/pilot/docker/prepare_proxy.sh).
+found [here](https://raw.githubusercontent.com/istio/istio/master/tools/deb/istio-iptables.sh).
-> Note: This script must be executed before starting the application or
-> the sidecar process.
+> This script must be executed before starting the application or
+> the sidecar process.
diff --git a/_docs/setup/eureka/quick-start.md b/_docs/setup/eureka/quick-start.md
index 3be8cc2b36a9a..1b15ccf7fa4c1 100644
--- a/_docs/setup/eureka/quick-start.md
+++ b/_docs/setup/eureka/quick-start.md
@@ -1,18 +1,15 @@
---
title: Quick Start on Docker
-overview: Quick Start instructions to setup the Istio service mesh with Docker Compose.
+description: Quick Start instructions to setup the Istio service mesh with Docker Compose.
-order: 10
+weight: 10
-layout: docs
-type: markdown
---
{% include home.html %}
Quick Start instructions to install and configure Istio in a Docker Compose setup.
-
## Prerequisites
* [Docker](https://docs.docker.com/engine/installation/#cloud)
@@ -21,44 +18,47 @@ Quick Start instructions to install and configure Istio in a Docker Compose setu
## Installation steps
1. Go to the [Istio release](https://github.com/istio/istio/releases) page to download the
- installation file corresponding to your OS. If you are using a MacOS or Linux system, you can also
- run the following command to download and extract the latest release automatically:
- ```bash
- curl -L https://git.io/getLatestIstio | sh -
+installation file corresponding to your OS. If you are using a MacOS or Linux system, you can also
+run the following command to download and extract the latest release automatically:
+
+ ```command
+ $ curl -L https://git.io/getLatestIstio | sh -
```
1. Extract the installation file and change the directory to the file location. The
- installation directory contains:
- * Sample applications in `samples/`
- * The `istioctl` client binary in the `bin/` directory. `istioctl` is used for creating routing rules and policies.
- * The `istio.VERSION` configuration file
+installation directory contains:
+
+ * Sample applications in `samples/`
+ * The `istioctl` client binary in the `bin/` directory. `istioctl` is used for creating routing rules and policies.
+ * The `istio.VERSION` configuration file
1. Add the `istioctl` client to your PATH.
- For example, run the following command on a MacOS or Linux system:
+For example, run the following command on a MacOS or Linux system:
- ```bash
- export PATH=$PWD/bin:$PATH
+ ```command
+ $ export PATH=$PWD/bin:$PATH
```
1. Change directory to the root of the Istio installation directory.
1. Bring up the Istio control plane containers:
- ```bash
- docker-compose -f install/eureka/istio.yaml up -d
+ ```command
+ $ docker-compose -f install/eureka/istio.yaml up -d
```
1. Confirm that all docker containers are running:
- ```bash
- docker ps -a
+ ```command
+ $ docker ps -a
```
- > If the Istio Pilot container terminates, ensure that you run the `istioctl context-create` comamnd and re-run the command from the previous step.
-
+
+ > If the Istio Pilot container terminates, ensure that you run the `istioctl context-create` command and re-run the command from the previous step.
+
1. Configure `istioctl` to use mapped local port for the Istio API server:
- ```bash
- istioctl context-create --context istio-local --api-server http://localhost:8080
+ ```command
+ $ istioctl context-create --context istio-local --api-server http://localhost:8080
```
## Deploy your application
@@ -66,24 +66,24 @@ Quick Start instructions to install and configure Istio in a Docker Compose setu
You can now deploy your own application or one of the sample applications provided with the
installation like [Bookinfo]({{home}}/docs/guides/bookinfo.html).
-> Note 1: Since there is no concept of pods in a Docker setup, the Istio
-> sidecar runs in the same container as the application. We will use
-> [Registrator](http://gliderlabs.github.io/registrator/latest/) to
-> automatically register instances of services in the Consul service
+> Since there is no concept of pods in a Docker setup, the Istio
+> sidecar runs in the same container as the application. We will
+> use [Registrator](https://gliderlabs.github.io/registrator/latest/) to
+> automatically register instances of services in the Eureka service
> registry.
+>
+> The application must use HTTP/1.1 or HTTP/2.0 protocol for all its HTTP traffic because HTTP/1.0 is not supported.
-> Note 2: the application must use HTTP/1.1 or HTTP/2.0 protocol for all its HTTP traffic because HTTP/1.0 is not supported.
-
-```bash
-docker-compose -f .yaml up -d
+```command
+$ docker-compose -f .yaml up -d
```
## Uninstalling
-1. Uninstall Istio core components by removing the docker containers:
+Uninstall Istio core components by removing the docker containers:
-```bash
-docker-compose -f install/eureka/istio.yaml down
+```command
+$ docker-compose -f install/eureka/istio.yaml down
```
## What's next
diff --git a/_docs/setup/index.md b/_docs/setup/index.md
index 2fbb1e459a25f..84b81bdfde555 100644
--- a/_docs/setup/index.md
+++ b/_docs/setup/index.md
@@ -1,11 +1,9 @@
---
title: Setup
-overview: Setup contains instructions for installing the Istio control plane in various environments (e.g., Kubernetes, Consul, etc.), as well as instructions for installing the sidecar in the application deployment.
+description: Setup contains instructions for installing the Istio control plane in various environments (e.g., Kubernetes, Consul, etc.), as well as instructions for installing the sidecar in the application deployment.
-order: 15
+weight: 15
-layout: docs
-type: markdown
toc: false
---
diff --git a/_docs/setup/kubernetes/advanced-install.md b/_docs/setup/kubernetes/advanced-install.md
index 62e2084d6262c..902a89112b29d 100644
--- a/_docs/setup/kubernetes/advanced-install.md
+++ b/_docs/setup/kubernetes/advanced-install.md
@@ -1,11 +1,9 @@
---
title: Advanced Install Options
-overview: Instructions for customizing the Istio installation.
+description: Instructions for customizing the Istio installation.
-order: 20
+weight: 20
draft: true
-layout: docs
-type: markdown
---
{% include home.html %}
@@ -20,7 +18,6 @@ L7 routing capabilities such as version-aware routing, header based
routing, gRPC/HTTP2 proxying, tracing, etc. Deploy Istio Pilot only and
disable other components. Do not deploy the Istio initializer.
-
## Ingress Controller with Telemetry & Policies
By deploying Istio Pilot and Mixer, the Ingress controller configuration
diff --git a/_docs/setup/kubernetes/ansible-install.md b/_docs/setup/kubernetes/ansible-install.md
index 1933c9f4186d2..cc6c720c14900 100644
--- a/_docs/setup/kubernetes/ansible-install.md
+++ b/_docs/setup/kubernetes/ansible-install.md
@@ -1,124 +1,107 @@
---
-title: Installing with Ansible
-overview: Instructions on using the included Ansible playbook to perform installation.
+title: Installation with Ansible
+description: Install Istio with the included Ansible playbook.
-order: 40
+weight: 40
-layout: docs
-type: markdown
---
-{% include home.html %}
-The Ansible scenario defined within this project will allow you to :
+{% include home.html %}
-- Deploy Istio on Kubernetes or Openshift by specifying different parameters (version, enable auth, deploy bookinfo, ...)
-- Specify the addons to be deployed such as `Grafana`, `Prometheus`, `Servicegraph`, `Zipkin` or `Jaeger`
+Instructions for the installation and configuration of Istio using Ansible.
## Prerequisites
-- [Ansible 2.4](http://docs.ansible.com/ansible/latest/intro_installation.html)
+The following instructions require [Ansible 2.4](https://docs.ansible.com/ansible/latest/intro_installation.html). Additionally Kubernetes **1.7.3 or newer** is required.
-Refer to the Ansible Installation Doc on how to install Ansible on your machine.
-To use [Minishift](https://docs.openshift.org/latest/minishift/command-ref/minishift_start.html) or [Minikube](https://kubernetes.io/docs/getting-started-guides/minikube/) for local clusters, please refer to their respective documentation.
+The following prerequisites must be met if using OpenShift.
-Furthermore, the following requirements must be met for the respective clusters
-* Kubernetes:
- - Minimum Version: `1.7.2`
- - `kubectl` configured to be able to access the cluster
-* Openshift
- - Minimum Version: `3.7.0`
- - `oc` configured to be able to access the cluster
- - User has logged in to the cluster
- - User has `admin` role on the Openshift platform
+* Minimum Version: **3.7.0**
+* **oc** configured to be able to access the cluster
+* User has logged in to the cluster
+* User has `admin` role on OpenShift
-## Execution
+## Deploy with Ansible
-**Important**: All invocations of the Ansible playbooks need to take place at the `install/ansible` path of the project.
-Failing to do so will result in unexpected errors
+**Important**: All execution of the Ansible playbooks must take place in the `install/ansible` path of Istio.
-The simplest execution command looks like the following:
-
-```bash
-ansible-playbook main.yml
-```
+This playbook will download and install Istio locally on your machine. To deploy the default settings of
+Istio on OpenShift, the following command may be used:
-Remarks:
-- This Ansible playbook is idempotent. If you find examples of lacking idempotency please file a bug.
-- The default parameters that apply to this role can be found in `istio/defaults/main.yml`.
-
-The full list of configurable parameters is as follows:
-
-| Parameter | Description | Values |
-| --- | --- | --- |
-| `cluster_flavour` | Defines whether the target cluster is a Kubernetes or an Openshift cluster. | Valid values are `k8s` and `ocp` (default) |
-| `github_api_token` | The API token used for authentication when calling the GitHub API | Any valid GitHub API token or empty (default) |
-| `cmd_path` | Can be used when the user does not have the `oc` or `kubectl` binary on the PATH | Defaults to expecting the binary is on the path |
-| `istio.release_tag_name` | Should be a valid Istio release version. If left empty, the latest Istio release will be installed | `0.2.12`, `0.3.0`, `0.4.0`, `0.5.0`, `0.5.1` |
-| `istio.dest` | The directory of the target machine where Istio will be installed | `~/.istio` (default) |
-| `istio.auth` | Boolean value to install Istio using MUTUAL_TLS | `true` and `false` (default) |
-| `istio.namespace` | The namespace where Istio will be installed | `istio-system` (default) |
-| `istio.addon` | Which Istio addons should be installed as well | This field is an array field, which by default contains `grafana`, `prometheus`, `zipkin` and `servicegraph` |
-| `istio.jaeger` | Whether or not Jaeger tracing should also be installed | `true` and `false` (default)|
-| `istio.delete_resources` | Boolean value to delete resources created under the Istio namespace | `true` and `false` (default)|
-| `istio.samples` | Array containing the names of the samples that should be installed | Valid names are: `bookinfo`, `helloworld`, `httpbin`, `sleep`
-
-
-An example of an invocation where we want to deploy Jaeger instead of Zipkin would be:
-```bash
-ansible-playbook main.yml -e '{"istio": {"jaeger": true}}'
+```command
+$ ansible-playbook main.yml
```
+## Customization with Ansible
-This playbook will take care of downloading and installing Istio locally on your machine, before deploying the necessary Kubernetes / Openshift
-pods, services etc. on to the cluster.
-
-### Note on istio.delete_resources
+The Ansible playbook ships with reasonable defaults.
-Activating the `istio.delete_resources` flag will result in any Istio related resources being deleted from the cluster before Istio is reinstalled.
+The currently exposed options are:
-In order to avoid any inconsistency issues, this flag should only be used to reinstall the same version of Istio on a cluster. If a new version
-of Istio need to be reinstalled, then it is advisable to delete the `istio-system` namespace before executing the playbook (in which case the
-`istio.delete_resources` flag does not need to be activated)
+| Parameter | Description | Values | Default |
+| --- | --- | --- | --- |
+| `cluster_flavour` | Define the target cluster type | `k8s` or `ocp` | `ocp` |
+| `github_api_token` | A valid GitHub API authentication token used for authenticating with GitHub | A valid GitHub API token | empty |
+| `cmd_path` | Override the path to `kubectl` or `oc` | A valid path to a `kubectl` or `oc` binary | `$PATH/oc` |
+| `istio.release_tag_name` | Istio release version to install | Any valid Istio release version | the latest Istio release version |
+| `istio.dest` | The directory of the target machine where Istio will be installed | Any directory with read+write permissions | `~/.istio` |
+| `istio.auth` | Install with mutual TLS | `true` or `false` | `false` |
+| `istio.namespace` | Kubernetes namespace where Istio will be installed | any namespace may be specified | `istio-system` |
+| `istio.addon` | Istio addons to install | array containing any of `grafana`, `prometheus`, `zipkin`, `jaeger`, or `servicegraph` | all addons are enabled by default |
+| `istio.delete_resources` | Delete resources created under Istio namespace | `true` or `false` | false |
+| `istio.samples` | Array containing the names of the samples that should be installed | `bookinfo`, `helloworld`, `httpbin`, `sleep` | none |
-## Typical use cases
+## Default installation
-The following commands are some examples of how a user could install Istio using this Ansible role:
+Operator installs Istio using all defaults on OpenShift:
-- User executes installs Istio accepting all defaults
-```bash
-ansible-playbook main.yml
+```command
+$ ansible-playbook main.yml
```
-- User installs Istio on to a Kubernetes cluster
-```bash
-ansible-playbook main.yml -e '{"cluster_flavour": "k8s"}'
+## Operational overrides
+
+There may be circumstances in which defaults require overrides.
+
+The following commands describe how an operator could use overrides with this Ansible playbook:
+
+Operator installs Istio on Kubernetes:
+
+```command
+$ ansible-playbook main.yml -e '{"cluster_flavour": "k8s"}'
```
-- User installs Istio on to a Kubernetes cluster and the path to `kubectl` is expicitly set (perhaps it's not on the PATH)
-```bash
-ansible-playbook main.yml -e '{"cluster_flavour": "k8s", "cmd_path": "~/kubectl"}'
+Operator installs Istio on Kubernetes and the path to `kubectl` is explicitly set:
+
+```command
+$ ansible-playbook main.yml -e '{"cluster_flavour": "k8s", "cmd_path": "~/kubectl"}'
```
-- User wants to install Istio on Openshift with settings other than the default
-```bash
-ansible-playbook main.yml -e '{"istio": {"release_tag_name": "0.4.0", "auth": true, "jaeger": true, "delete_resources": true}}'
+Operator installs Istio on OpenShift with settings other than the default:
+
+```command
+$ ansible-playbook main.yml -e '{"istio": {"release_tag_name": "0.6.0", "auth": true, "delete_resources": true}}'
```
-- User wants to install Istio on Openshift but with custom add-on settings
-```bash
-ansible-playbook main.yml -e '{"istio": {"delete_resources": true, "addon": ["grafana", "prometheus"]}}'
+Operator installs Istio on OpenShift with customized addons:
+
+```command
+$ ansible-playbook main.yml -e '{"istio": {"delete_resources": true, "addon": ["grafana", "prometheus", "jaeger"]}}'
```
-- User wants to install Istio on Openshift and additionally wants to deploy some of the samples
-```bash
-ansible-playbook main.yml -e '{"istio": {"samples": ["helloworld", "bookinfo"]}}'
+Operator installs Istio on OpenShift and additionally wants to deploy some of the samples:
+
+```command
+$ ansible-playbook main.yml -e '{"istio": {"samples": ["helloworld", "bookinfo"]}}'
```
-The list of available addons can be found at `istio/vars.main.yml` under the name `istio_all_addons`.
-Jaeger is not installed using the `addons` property, but can be installed by enabling `"jaeger": true` like in one of the previous examples.
-It should be noted that when Jaeger is enabled, Zipkin is disabled whether or not it's been selected in the addons section.
+> When Jaeger is enabled, Zipkin is disabled even when Zipkin is selected in the addons.
+
+## Uninstalling
+
+If a different version of Istio is desired, delete the `istio-system` namespace before executing the playbook.
+In this case, the `istio.delete_resources` flag does not need to be set.
-## Adding istioctl to PATH
+Setting `istio.delete_resources` to true will delete the Istio control plane from the cluster.
-After executing the playbook if it is desired that the `istioctl` command line tool be added to the PATH,
-search for `Add Istio to PATH` in the output and execute the commands that are outputted
\ No newline at end of file
+> In order to avoid any inconsistencies, this flag should only be used to reinstall the same version of Istio on a cluster.
diff --git a/_docs/setup/kubernetes/helm-install.md b/_docs/setup/kubernetes/helm-install.md
new file mode 100644
index 0000000000000..de8048d3f1c47
--- /dev/null
+++ b/_docs/setup/kubernetes/helm-install.md
@@ -0,0 +1,114 @@
+---
+title: Installation with Helm
+description: Install Istio with the included Helm chart.
+
+weight: 30
+
+redirect_from: /docs/setup/kubernetes/helm.html
+
+---
+
+{% include home.html %}
+
+Quick start instructions for the setup and configuration of Istio using the Helm package manager.
+
+
+Installation of Istio prior to version 0.8.0 with Helm is unstable and not recommended.
+
+## Prerequisites
+
+* Kubernetes **1.7.3 or newer** is required.
+* Helm **2.7.2 or newer** is required.
+* If you want to manage Istio releases with [Tiller](https://github.com/kubernetes/helm#helm-in-a-handbasket),
+the capability to install service accounts is required.
+* Using [automatic sidecar injection]({{home}}/docs/setup/kubernetes/sidecar-injection.html#automatic-sidecar-injection) describes Kubernetes environmental requirements.
+
+## Deploy Istio using Helm
+
+There are two techniques for using Helm to deploy Istio. The first
+technique is to use `helm template` to render a manifest and use `kubectl`
+to create it.
+
+The second technique uses Helm's Tiller service to manage the lifecycle
+of Istio.
+
+### Render Kubernetes manifest with Helm and deploy with kubectl
+
+This is the most heavily tested method of deploying Istio. During the
+continuous integration automated testing and release process, the
+`helm` binary in `template` mode is used to render the various manifests
+produced for Istio.
+
+1. Create an `istio.yaml` Kubernetes manifest:
+ ```command
+ $ helm template install/kubernetes/helm/istio --name istio --namespace istio-system --set prometheus.enabled=true > $HOME/istio.yaml
+ ```
+
+1. Create the Istio control plane from `istio.yaml` manifest:
+ ```command
+ $ kubectl create ns istio-system
+ $ kubectl create -f $HOME/istio.yaml
+ ```
+
+### Alternatively, use Helm and Tiller to manage the Istio deployment
+
+
+Upgrading Istio using Helm is not validated.
+
+1. If a service account has not already been installed for Helm, please install one:
+ ```command
+ $ kubectl create -f install/kubernetes/helm/helm-service-account.yaml
+ ```
+
+1. Initialize Helm:
+ ```command
+ $ helm init --service-account tiller
+ ```
+
+1. Create the Helm chart:
+ ```command
+ $ helm install install/kubernetes/helm/istio --name istio --namespace istio-system
+ ```
+
+## Customization with Helm
+
+The Helm chart ships with reasonable defaults. There may be circumstances in which defaults require overrides.
+To override Helm values, use `--set key=value` argument during the `helm install` command. Multiple `--set` operations
+may be used in the same Helm operation.
+
+Helm charts expose configuration options which are currently in alpha. The currently exposed options are explained in the
+following table:
+
+| Parameter | Description | Values | Default |
+| --- | --- | --- | --- |
+| `global.hub` | Specifies the HUB for most images used by Istio | registry/namespace | `docker.io/istionightly` |
+| `global.tag` | Specifies the TAG for most images used by Istio | valid image tag | `circleci-nightly` |
+| `global.proxy.image` | Specifies the proxy image name | valid proxy name | `proxy` |
+| `global.imagePullPolicy` | Specifies the image pull policy | valid image pull policy | `IfNotPresent` |
+| `global.securityEnabled` | Specifies whether Istio CA should be installed | true/false | `true` |
+| `global.controlPlaneSecurityEnabled` | Specifies whether control plane mTLS is enabled | true/false | `false` |
+| `global.mtls.enabled` | Specifies whether mTLS is enabled by default between services | true/false | `false` |
+| `global.mtls.mtlsExcludedServices` | List of FQDNs to exclude from mTLS | a list of FQDNs | `- kubernetes.default.svc.cluster.local` |
+| `global.rbacEnabled` | Specifies whether to create Istio RBAC rules or not | true/false | `true` |
+| `global.refreshInterval` | Specifies the mesh discovery refresh interval | integer followed by s | `10s` |
+| `global.arch.amd64` | Specifies the scheduling policy for `amd64` architectures | 0 = never, 1 = least preferred, 2 = no preference, 3 = most preferred | `2` |
+| `global.arch.s390x` | Specifies the scheduling policy for `s390x` architectures | 0 = never, 1 = least preferred, 2 = no preference, 3 = most preferred | `2` |
+| `global.arch.ppc64le` | Specifies the scheduling policy for `ppc64le` architectures | 0 = never, 1 = least preferred, 2 = no preference, 3 = most preferred | `2` |
+| `galley.enabled` | Specifies whether Galley should be installed for server-side config validation. Requires k8s >= 1.9 | true/false | `false` |
+
+> The Helm chart also offers significant customization options per individual
+service. Customize these per-service options at your own risk.
+The per-service options are exposed via the
+[`values.yaml` file](https://raw.githubusercontent.com/istio/istio/master/install/kubernetes/helm/istio/values.yaml).
+
+## Uninstall Istio
+
+* Uninstall using kubectl:
+```command
+$ kubectl delete -f $HOME/istio.yaml
+```
+
+* Uninstall using Helm:
+```command
+$ helm delete --purge istio
+```
diff --git a/_docs/setup/kubernetes/helm.md b/_docs/setup/kubernetes/helm.md
deleted file mode 100644
index 88babbccd6791..0000000000000
--- a/_docs/setup/kubernetes/helm.md
+++ /dev/null
@@ -1,76 +0,0 @@
----
-title: Istio Helm Chart Instructions
-overview: Instructions for the setup and configuration of Istio using the Helm package manager.
-
-order: 30
-
-layout: docs
-type: markdown
----
-
-{% include home.html %}
-
-Quick Start instructions for the setup and configuration of Istio using the Helm package manager.
-
-**Warning: Helm charts are currently broken in 0.5.0**
-
-## Prerequisites
-
-The following instructions require you have access to Helm **2.7.2 or newer** in your Kubernetes environment or
-alternately the ability to modify RBAC rules required to install Helm. Additionally Kubernetes **1.7.3 or newer**
-is also required. Finally this Helm chart **does not** yet implement automatic sidecar injection.
-
-## Deploy with Helm
-
-1. If a service account has not already been installed for Helm, please install one:
- ```bash
- kubectl create -f install/kubernetes/helm/helm-service-account.yaml
- ```
-
-1. Initialize Helm:
- ```bash
- helm init --service-account tiller
- ```
-
-1. Create the Helm chart:
- ```bash
- helm install install/kubernetes/helm/istio --name istio
- ```
-
-## Customization with Helm
-
-The Helm chart ships with reasonable defaults. There may be circumstances in which defaults require overrides.
-To override Helm values, use `--set key=value` argument during the `helm install` command. Multiple `--set` operations
-may be used in the same Helm operation.
-
-Helm charts expose configuration options which are currently in alpha. The currently exposed options are explained in the
-following table:
-
-| Helm Variable | Possible Values | Default Value | Purpose of Key |
-|------------------------------|--------------------|----------------------------|------------------------------------------------|
-| global.namespace | any Kubernetes ns | istio-system | Specifies the namespace for Istio |
-| global.initializer.enabled | true/false | true | Specifies whether to use the Initializer |
-| global.proxy.hub | registry+namespace | release registry/namespace | Specifies the HUB for the proxy image |
-| global.proxy.tag | image tag | release unique hash | Specifies the TAG for the proxy image |
-| global.proxy.debug | true/false | false | Specifies whether proxy is run in debug mode |
-| global.pilot.hub | registry+namespace | release registry/namespace | Specifies the HUB for the pilot image |
-| global.pilot.tag | image tag | release unique hash | Specifies the TAG for the pilot image |
-| global.pilot.enabled | true/false | true | Specifies whether pilot is enabled/disabled |
-| global.security.hub | registry+namespace | release registry/namespace | Specifies the HUB for the ca image |
-| global.security.tag | image tag | release unique hash | Specifies the TAG for the ca image |
-| global.security.enabled | true/false | false | Specifies whether security is enabled/disabled |
-| global.mixer.hub | registry+namespace | release registry/namespace | Specifies the HUB for the mixer image |
-| global.mixer.tag | image tag | release unique hash | Specifies the TAG for the mixer image |
-| global.mixer.enabled | true/false | true | Specifies whether mixer is enabled/disabled |
-| global.hyperkube.hub | registry+namesapce | quay.io/coreos/hyperkube | Specifies the HUB for the hyperkube image |
-| global.hyperkube.tag | image tag | v1.7.6_coreos.0 | Specifies the TAG for the hyperkube image |
-| global.ingress.use_nodeport | true/false | false | Specifies whether to use nodeport or LB |
-| global.ingress.nodeport_port | 32000-32767 | 32000 | If nodeport is used, specifies its port |
-
-## Uninstalling
-
-* Uninstall Istio:
-
- ```bash
- helm delete --purge istio
- ```
diff --git a/_docs/setup/kubernetes/img/dm_gcp_iam.png b/_docs/setup/kubernetes/img/dm_gcp_iam.png
index 394520e4fcdbc..48826f38c5905 100644
Binary files a/_docs/setup/kubernetes/img/dm_gcp_iam.png and b/_docs/setup/kubernetes/img/dm_gcp_iam.png differ
diff --git a/_docs/setup/kubernetes/img/dm_gcp_iam_role.png b/_docs/setup/kubernetes/img/dm_gcp_iam_role.png
new file mode 100644
index 0000000000000..33517381eb9a7
Binary files /dev/null and b/_docs/setup/kubernetes/img/dm_gcp_iam_role.png differ
diff --git a/_docs/setup/kubernetes/index.md b/_docs/setup/kubernetes/index.md
index 64059bbd11ebb..71ca6e948a97f 100644
--- a/_docs/setup/kubernetes/index.md
+++ b/_docs/setup/kubernetes/index.md
@@ -1,11 +1,9 @@
---
title: Kubernetes
-overview: Instructions for installing the Istio control plane on Kubernetes and adding VMs into the mesh.
+description: Instructions for installing the Istio control plane on Kubernetes and adding VMs into the mesh.
-order: 10
+weight: 10
-layout: docs
-type: markdown
redirect_from: "/docs/tasks/installing-istio.html"
toc: false
---
diff --git a/_docs/setup/kubernetes/mesh-expansion.md b/_docs/setup/kubernetes/mesh-expansion.md
index a5e978135b026..6c6a6729b6077 100644
--- a/_docs/setup/kubernetes/mesh-expansion.md
+++ b/_docs/setup/kubernetes/mesh-expansion.md
@@ -1,11 +1,9 @@
---
-title: Istio Mesh Expansion
-overview: Instructions for integrating VMs and bare metal hosts into an Istio mesh deployed on Kubernetes.
+title: Mesh Expansion
+description: Instructions for integrating VMs and bare metal hosts into an Istio mesh deployed on Kubernetes.
-order: 60
+weight: 60
-layout: docs
-type: markdown
---
{% include home.html %}
@@ -21,7 +19,7 @@ typically requires a VPC or a VPN, as well as a container network that
provides direct (without NAT or firewall deny) routing to the endpoints. The machine
is not required to have access to the cluster IP addresses assigned by Kubernetes.
-* The Istio control plane services (Pilot, Mixer, CA) and Kubernetes DNS server must be accessible
+* The Istio control plane services (Pilot, Mixer, Citadel) and Kubernetes DNS server must be accessible
from the VMs. This is typically done using an [Internal Load
Balancer](https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer).
You can also use NodePort, run Istio components on VMs, or use custom network configurations,
@@ -39,85 +37,76 @@ You should customize it based on your provisioning tools and DNS requirements.
### Preparing the Kubernetes cluster for expansion
-* Setup Internal Load Balancers (ILBs) for Kube DNS, Pilot, Mixer and CA. This step is specific to
+* Setup Internal Load Balancers (ILBs) for Kube DNS, Pilot, Mixer and Citadel. This step is specific to
each cloud provider, so you may need to edit annotations.
-```
-kubectl apply -f install/kubernetes/mesh-expansion.yaml
-```
+ ```command
+ $ kubectl apply -f install/kubernetes/mesh-expansion.yaml
+ ```
* Generate the Istio 'cluster.env' configuration to be deployed in the VMs. This file contains
the cluster IP address ranges to intercept.
-```bash
-export GCP_OPTS="--zone MY_ZONE --project MY_PROJECT"
-```
-```bash
-install/tools/setupMeshEx.sh generateClusterEnv MY_CLUSTER_NAME
-```
+ ```command
+ $ export GCP_OPTS="--zone MY_ZONE --project MY_PROJECT"
+ $ install/tools/setupMeshEx.sh generateClusterEnv MY_CLUSTER_NAME
+ ```
-Example generated file:
+ Here's an example generated file
-```bash
-cat cluster.env
-```
-```
-ISTIO_SERVICE_CIDR=10.63.240.0/20
-```
+ ```command
+ $ cat cluster.env
+ ISTIO_SERVICE_CIDR=10.63.240.0/20
+ ```
* Generate DNS configuration file to be used in the VMs. This will allow apps on the VM to resolve
cluster service names, which will be intercepted by the sidecar and forwarded.
-```bash
-# Make sure your kubectl context is set to your cluster
-install/tools/setupMeshEx.sh generateDnsmasq
-```
+ ```command
+ $ install/tools/setupMeshEx.sh generateDnsmasq
+ ```
-Example generated file:
+ Here's an example generated file
-```bash
-cat kubedns
-```
-```
-server=/svc.cluster.local/10.150.0.7
-address=/istio-mixer/10.150.0.8
-address=/istio-pilot/10.150.0.6
-address=/istio-ca/10.150.0.9
-address=/istio-mixer.istio-system/10.150.0.8
-address=/istio-pilot.istio-system/10.150.0.6
-address=/istio-ca.istio-system/10.150.0.9
-```
+ ```command
+ $ cat kubedns
+ server=/svc.cluster.local/10.150.0.7
+ address=/istio-mixer/10.150.0.8
+ address=/istio-pilot/10.150.0.6
+ address=/istio-citadel/10.150.0.9
+ address=/istio-mixer.istio-system/10.150.0.8
+ address=/istio-pilot.istio-system/10.150.0.6
+ address=/istio-citadel.istio-system/10.150.0.9
+ ```
### Setting up the machines
As an example, you can use the following "all inclusive" script to copy
and install the setup:
-```bash
-# Check what the script does to see that it meets your needs.
-export GCP_OPTS="--zone MY_ZONE --project MY_PROJECT"
-# change to the namespace you wish to use for VMs but 'vm' is what the bookinfo guide assumes
-export SERVICE_NAMESPACE=vm
+```command
+$ export GCP_OPTS="--zone MY_ZONE --project MY_PROJECT"
+$ export SERVICE_NAMESPACE=vm
```
If you are running on a GCE VM, run
-```bash
-install/tools/setupMeshEx.sh gceMachineSetup VM_NAME
+```command
+$ install/tools/setupMeshEx.sh gceMachineSetup VM_NAME
```
Otherwise, run
-```bash
-install/tools/setupMeshEx.sh machineSetup VM_NAME
+
+```command
+$ install/tools/setupMeshEx.sh machineSetup VM_NAME
```
GCE provides better user experience since node agent can always relies on
-GCE metadata instance document to authenticate to Istio CA. For everything
+GCE metadata instance document to authenticate to Citadel. For everything
else, e.g., on-prem or raw VM, we have to bootstrap a key/cert as credential,
which typically has a limited lifetime. And when the cert expires, you have to
rerun the above command.
-
Or the equivalent manual steps:
------ Manual setup steps begin ------
@@ -129,88 +118,93 @@ Save the files as `/etc/dnsmasq.d/kubedns` and `/var/lib/istio/envoy/cluster.env
adding it to `/etc/resolv.conf` directly or via DHCP scripts. To verify, check that the VM can resolve
names and connect to pilot, for example:
-On the VM/external host:
-```bash
-host istio-pilot.istio-system
-```
-Example generated message:
-```
-# Verify you get the same address as shown as "EXTERNAL-IP" in 'kubectl get svc -n istio-system istio-pilot-ilb'
-istio-pilot.istio-system has address 10.150.0.6
-```
-Check that you can resolve cluster IPs. The actual address will depend on your deployment.
-```bash
-host istio-pilot.istio-system.svc.cluster.local.
-```
-Example generated message:
-```
-istio-pilot.istio-system.svc.cluster.local has address 10.63.247.248
-```
-Check istio-ingress similarly:
-```bash
-host istio-ingress.istio-system.svc.cluster.local.
-```
-Example generated message:
-```
-istio-ingress.istio-system.svc.cluster.local has address 10.63.243.30
-```
+ On the VM/external host:
+
+ ```command
+ $ host istio-pilot.istio-system
+ ```
+
+ Example generated message:
+
+ ```plain
+ $ istio-pilot.istio-system has address 10.150.0.6
+ ```
+
+ Check that you can resolve cluster IPs. The actual address will depend on your deployment.
+
+ ```command
+ $ host istio-pilot.istio-system.svc.cluster.local.
+ ```
+
+ Example generated message:
+
+ ```plain
+ istio-pilot.istio-system.svc.cluster.local has address 10.63.247.248
+ ```
+
+ Check istio-ingress similarly:
+
+ ```command
+ $ host istio-ingress.istio-system.svc.cluster.local.
+ ```
+
+ Example generated message:
+
+ ```plain
+ istio-ingress.istio-system.svc.cluster.local has address 10.63.243.30
+ ```
* Verify connectivity by checking whether the VM can connect to Pilot and to an endpoint.
-```bash
-curl 'http://istio-pilot.istio-system:8080/v1/registration/istio-pilot.istio-system.svc.cluster.local|http-discovery'
-```
-```
-{
- "hosts": [
+ ```command
+ $ curl 'http://istio-pilot.istio-system:8080/v1/registration/istio-pilot.istio-system.svc.cluster.local|http-discovery'
+ ```
+
+ ```json
{
- "ip_address": "10.60.1.4",
- "port": 8080
+ "hosts": [
+ {
+ "ip_address": "10.60.1.4",
+ "port": 8080
+ }
+ ]
}
- ]
-}
-```
-```bash
-# On the VM, use the address above. It will directly connect to the pod running istio-pilot.
-curl 'http://10.60.1.4:8080/v1/registration/istio-pilot.istio-system.svc.cluster.local|http-discovery'
-```
+ ```
+
+ On the VM, use the address above. It will directly connect to the pod running istio-pilot.
+ ```command
+ $ curl 'http://10.60.1.4:8080/v1/registration/istio-pilot.istio-system.svc.cluster.local|http-discovery'
+ ```
* Extract the initial Istio authentication secrets and copy them to the machine. The default
-installation of Istio includes Istio CA and will generate Istio secrets even if
+installation of Istio includes Citadel and will generate Istio secrets even if
the automatic 'mTLS'
setting is disabled (it creates secret for each service account, and the secret
is named as `istio.`). It is recommended that you perform this
step to make it easy to enable mTLS in the future and to upgrade to a future version
that will have mTLS enabled by default.
-```bash
-# ACCOUNT defaults to 'default', or SERVICE_ACCOUNT environment variable
-# NAMESPACE defaults to current namespace, or SERVICE_NAMESPACE environment variable
-# (this step is done by machineSetup)
-# On a mac either brew install base64 or set BASE64_DECODE="/usr/bin/base64 -D"
-install/tools/setupMeshEx.sh machineCerts ACCOUNT NAMESPACE
-```
-
-The generated files (`key.pem`, `root-cert.pem`, `cert-chain.pem`) must be copied to /etc/certs on each machine, readable by istio-proxy.
-
-* Install Istio Debian files and start 'istio' and 'istio-auth-node-agent' services.
-Get the debian packages from [github releases](https://github.com/istio/istio/releases) or:
-
- ```bash
- # Note: This will be replaced with an 'apt-get' command once the repositories are setup.
+ `ACCOUNT` defaults to 'default', or `SERVICE_ACCOUNT` environment variable
+ `NAMESPACE` defaults to current namespace, or `SERVICE_NAMESPACE` environment variable
+ (this step is done by machineSetup)
+ On a Mac either `brew install base64` or `set BASE64_DECODE="/usr/bin/base64 -D"`
- source istio.VERSION # defines version and URLs env var
- curl -L ${PILOT_DEBIAN_URL}/istio-agent.deb > ${ISTIO_STAGING}/istio-agent.deb
- curl -L ${AUTH_DEBIAN_URL}/istio-auth-node-agent.deb > ${ISTIO_STAGING}/istio-auth-node-agent.deb
- curl -L ${PROXY_DEBIAN_URL}/istio-proxy.deb > ${ISTIO_STAGING}/istio-proxy.deb
+ ```command
+ install/tools/setupMeshEx.sh machineCerts ACCOUNT NAMESPACE
+ ```
- dpkg -i istio-proxy-envoy.deb
- dpkg -i istio-agent.deb
- dpkg -i istio-auth-node-agent.deb
+ The generated files (`key.pem`, `root-cert.pem`, `cert-chain.pem`) must be copied to /etc/certs on each machine, readable by istio-proxy.
- systemctl start istio
- systemctl start istio-auth-node-agent
+* Install Istio Debian files and start 'istio' and 'istio-auth-node-agent' services.
+Get the debian packages from [GitHub releases](https://github.com/istio/istio/releases) or:
+
+ ```command
+ $ source istio.VERSION # defines version and URLs env var
+ $ curl -L ${PILOT_DEBIAN_URL}/istio-sidecar.deb > istio-sidecar.deb
+ $ dpkg -i istio-sidecar.deb
+ $ systemctl start istio
+ $ systemctl start istio-auth-node-agent
```
------ Manual setup steps end ------
@@ -218,29 +212,27 @@ Get the debian packages from [github releases](https://github.com/istio/istio/re
After setup, the machine should be able to access services running in the Kubernetes cluster
or other mesh expansion machines.
-```bash
-# Assuming you install bookinfo in 'bookinfo' namespace
-curl productpage.bookinfo.svc.cluster.local:9080
-```
+```command
+$ curl productpage.bookinfo.svc.cluster.local:9080
```
+```plain
... html content ...
```
Check that the processes are running:
-```bash
-ps aux |grep istio
-```
-```
+
+```command
+$ ps aux |grep istio
root 6941 0.0 0.2 75392 16820 ? Ssl 21:32 0:00 /usr/local/istio/bin/node_agent --logtostderr
root 6955 0.0 0.0 49344 3048 ? Ss 21:32 0:00 su -s /bin/bash -c INSTANCE_IP=10.150.0.5 POD_NAME=demo-vm-1 POD_NAMESPACE=default exec /usr/local/bin/pilot-agent proxy > /var/log/istio/istio.log istio-proxy
istio-p+ 7016 0.0 0.1 215172 12096 ? Ssl 21:32 0:00 /usr/local/bin/pilot-agent proxy
istio-p+ 7094 4.0 0.3 69540 24800 ? Sl 21:32 0:37 /usr/local/bin/envoy -c /etc/istio/proxy/envoy-rev1.json --restart-epoch 1 --drain-time-s 2 --parent-shutdown-time-s 3 --service-cluster istio-proxy --service-node sidecar~10.150.0.5~demo-vm-1.default~default.svc.cluster.local
```
+
Istio auth node agent is healthy:
-```bash
-sudo systemctl status istio-auth-node-agent
-```
-```
+
+```command
+$ sudo systemctl status istio-auth-node-agent
● istio-auth-node-agent.service - istio-auth-node-agent: The Istio auth node agent
Loaded: loaded (/lib/systemd/system/istio-auth-node-agent.service; disabled; vendor preset: enabled)
Active: active (running) since Fri 2017-10-13 21:32:29 UTC; 9s ago
@@ -264,26 +256,26 @@ Oct 13 21:32:29 demo-vm-1 node_agent[6941]: I1013 21:32:29.862575 6941 nodeag
* Configure the sidecar to intercept the port. This is configured in ``/var/lib/istio/envoy/sidecar.env`,
using the ISTIO_INBOUND_PORTS environment variable.
- Example (on the VM running the service):
+ Example (on the VM running the service):
-```bash
-echo "ISTIO_INBOUND_PORTS=27017,3306,8080" > /var/lib/istio/envoy/sidecar.env
-systemctl restart istio
-```
+ ```command
+ $ echo "ISTIO_INBOUND_PORTS=27017,3306,8080" > /var/lib/istio/envoy/sidecar.env
+ $ systemctl restart istio
+ ```
* Manually configure a selector-less service and endpoints. The 'selector-less' service is used for
services that are not backed by Kubernetes pods.
Example, on a machine with permissions to modify Kubernetes services:
-```bash
-# istioctl register servicename machine-ip portname:port
-istioctl -n onprem register mysql 1.2.3.4 3306
-istioctl -n onprem register svc1 1.2.3.4 http:7000
-```
+
+ ```command
+ $ istioctl -n onprem register mysql 1.2.3.4 3306
+ $ istioctl -n onprem register svc1 1.2.3.4 http:7000
+ ```
After the setup, Kubernetes pods and other mesh expansions should be able to access the
services running on the machine.
-## Putting it all together
+## What's next
-See the [Bookinfo Mesh Expansion]({{home}}/docs/guides/integrating-vms.html) guide.
+* See the [Bookinfo Mesh Expansion]({{home}}/docs/guides/integrating-vms.html) guide.
diff --git a/_docs/setup/kubernetes/multicluster-install.md b/_docs/setup/kubernetes/multicluster-install.md
new file mode 100644
index 0000000000000..9071e61c27bd8
--- /dev/null
+++ b/_docs/setup/kubernetes/multicluster-install.md
@@ -0,0 +1,210 @@
+---
+title: Istio Multicluster
+description: Install Istio with multicluster support.
+
+weight: 65
+
+---
+
+{% include home.html %}
+
+Instructions for the installation of Istio multicluster.
+
+## Prerequisites
+
+* Two or more Kubernetes clusters with **1.7.3 or newer**.
+
+* The ability to deploy the [Istio control plane]({{home}}/docs/setup/kubernetes/quick-start.html)
+on **one** Kubernetes cluster.
+
+* The usage of an RFC1918 network, VPN, or alternative more advanced network techniques
+to meet the following requirements:
+
+ * Individual cluster Pod CIDR ranges and service CIDR ranges must be unique
+across the multicluster environment and may not overlap.
+
+ * All pod CIDRs in every cluster must be routable to each other.
+
+ * All Kubernetes control plane API servers must be routable to each other.
+
+* Helm **2.7.2 or newer**. The use of Tiller is optional.
+
+* Currently only [manual sidecar injection]({{home}}/docs/setup/kubernetes/sidecar-injection.html#manual-sidecar-injection)
+has been validated with multicluster.
+
+## Caveats and known problems
+
+
+All known caveats and known problems with multicluster for the 0.8 release are [tracked here](https://github.com/istio/istio/issues/4822).
+
+## Overview
+
+Multicluster functions by enabling Kubernetes control planes running
+a remote configuration to connect to **one** Istio control plane.
+Once one or more remote Kubernetes clusters are connected to the
+Istio control plane, Envoy can then communicate with the **single**
+Istio control plane and form a mesh network across multiple Kubernetes
+clusters.
+
+## Deploy all Kubernetes clusters to be used in the mesh
+
+After deployment of remote clusters, each one will have a
+credentials file associated with the admin context typically
+located in `$HOME/.kube/config`.
+
+## Gather credential files from remote
+
+> `${CLUSTER_NAME}` here is defined as the name of the remote
+cluster used and must be unique across the mesh. Some Kubernetes
+installers do not set this value uniquely. In this case, manual
+modification of the `${CLUSTER_NAME}` fields must be done.
+
+For each remote cluster, execute the following steps:
+
+1. Determine a name for the remote cluster that is unique across
+all clusters in the mesh. Substitute the chosen name for the
+remaining steps in `${CLUSTER_NAME}`.
+
+1. Copy the credentials file form the remote Kubernetes cluster
+to the local Istio control plane cluster directory
+`$HOME/multicluster/${CLUSTER_NAME}`. The `${CLUSTER_NAME}` must
+be unique per remote.
+
+1. Modify the name of the remote cluster's credential file field
+`clusters.cluster.name` to match `${CLUSTER_NAME}`.
+
+1. Modify the name of the remote cluster's credential file field
+`contexts.context.cluster` to match `${CLUSTER_NAME}`.
+
+## Instantiate the credentials for each remote cluster
+
+> Execute this work on the cluster intended to run the Istio control
+plane.
+
+> Istio can be installed in a different namespace other than
+istio-system.
+
+Create a namespace for instantiating the secrets:
+
+```command
+$ kubectl create ns istio-system
+```
+
+> Ordering currently matters. Secrets must be created prior to
+the deployment of the Istio control plane. Creating secrets
+after Istio is started will not register the secrets with Istio
+properly.
+
+> The local cluster running the Istio control plane does not need
+it's secrets stored and labeled. The local node is always aware of
+it's Kubernetes credentials, but the local node is not aware of
+the remote nodes' credentials.
+
+Create a secret and label it properly for each remote cluster:
+
+```command
+$ pushd $HOME/multicluster
+$ kubectl create secret generic ${CLUSTER_NAME} --from-file ${CLUSTER_NAME} -n istio-system
+$ kubectl label secret ${CLUSTER_NAME} istio/multiCluster=true -n istio-system
+$ popd
+```
+
+## Deploy the local Istio control plane
+
+Install the [Istio control plane]({{home}}/docs/setup/kubernetes/quick-start.html#installation-steps)
+on **one** Kubernetes cluster.
+
+## Install the Istio remote on every remote cluster
+
+The istio-remote component must be deployed to each remote Kubernetes
+cluster. There are two approaches to installing the remote. The remote
+can be installed and managed entirely by Helm and Tiller, or via Helm and
+kubectl.
+
+### Set environment variables for Pod IPs from Istio control plane needed by remote
+
+> Please wait for the Istio control plane to finish initializing
+before proceeding to steps in this section.
+
+> These operations must be run on the Istio control plane cluster
+to capture the Pilot, Policy, and Statsd Pod IP endpoints.
+
+> If Helm is used with Tiller on each remote, copy the environment
+variables to each node before using Helm to connect the remote
+cluster to the Istio control plane.
+
+```command
+$ export PILOT_POD_IP=$(kubectl -n istio-system get pod -l istio=pilot -o jsonpath='{.items[0].status.podIP}')
+$ export POLICY_POD_IP=$(kubectl -n istio-system get pod -l istio=mixer -o jsonpath='{.items[0].status.podIP}')
+$ export STATSD_POD_IP=$(kubectl -n istio-system get pod -l istio=statsd-prom-bridge -o jsonpath='{.items[0].status.podIP}')
+```
+
+### Use kubectl with Helm to connect the remote cluster to the local
+
+1. Use the helm template command on a remote to specify the Istio control plane service endpoints:
+
+ ```command
+ $ helm template install/kubernetes/helm/istio-remote --namespace istio-system --name istio-remote --set global.pilotEndpoint=${PILOT_POD_IP} --set global.policyEndpoint=${POLICY_POD_IP} --set global.statsdEndpoint=${STATSD_POD_IP} > $HOME/istio-remote.yaml
+ ```
+
+1. Create a namespace for remote Istio.
+ ```command
+ $ kubectl create ns istio-system
+ ```
+1. Instantiate the remote cluster's connection to the Istio control plane:
+
+ ```command
+ $ kubectl create -f $HOME/istio-remote.yaml
+ ```
+
+### Alternatively use Helm and Tiller to connect the remote cluster to the local
+
+1. If a service account has not already been installed for Helm, please
+install one:
+
+ ```command
+ $ kubectl create -f install/kubernetes/helm/helm-service-account.yaml
+ ```
+
+1. Initialize Helm:
+
+ ```command
+ $ helm init --service-account tiller
+ ```
+
+1. Install the Helm chart:
+
+ ```command
+ $ helm install install/kubernetes/helm/istio-remote --name istio-remote --set global.pilotEndpoint=${PILOT_POD_IP} --set global.policyEndpoint=${POLICY_POD_IP} --set global.statsdEndpoint=${STATSD_POD_IP} --namespace istio-system
+ ```
+
+### Helm configuration parameters
+
+> The `pilotEndpoint`, `policyEndpoint`, `statsdEndpoint` need to be resolvable via Kubernetes.
+The simplest approach to enabling resolution for these variables is to specify the Pod IP of
+the various services. One problem with this is Pod IP's change during the lifetime of the
+service.
+
+The `isito-remote` Helm chart requires the three specific variables to be configured as defined in the following table:
+
+| Helm Variable | Accepted Values | Default | Purpose of Value |
+| --- | --- | --- | --- |
+| `global.pilotEndpoint` | A valid IP address | istio-pilot.istio-system | Specifies the Istio control plane's pilot Pod IP address |
+| `global.policyEndpoint` | A valid IP address | istio-policy.istio-system | Specifies the Istio control plane's policy Pod IP address |
+| `global.statsdEndpoint` | A valid IP address | istio-statsd-prom-bridge.istio-system | Specifies the Istio control plane's statsd Pod IP address |
+
+## Uninstalling
+
+> The uninstall method must match the installation method (`Helm and kubectl` or `Helm and Tiller` based).
+
+### Use kubectl to uninstall istio-remote
+
+```command
+$ kubectl delete -f $HOME/istio-remote.yaml
+```
+
+### Alternatively use Helm and Tiller to uninstall istio-remote
+
+```command
+$ helm delete --purge istio-remote
+```
diff --git a/_docs/setup/kubernetes/quick-start-gke-dm.md b/_docs/setup/kubernetes/quick-start-gke-dm.md
index 1adf248374159..3942c0d79bd01 100644
--- a/_docs/setup/kubernetes/quick-start-gke-dm.md
+++ b/_docs/setup/kubernetes/quick-start-gke-dm.md
@@ -1,22 +1,18 @@
---
title: Quick Start with Google Kubernetes Engine
-overview: Quick Start instructions to setup the Istio service using Google Kubernetes Engine (GKE)
+description: Quick Start instructions to setup the Istio service using Google Kubernetes Engine (GKE)
-order: 11
+weight: 11
-layout: docs
-type: markdown
---
{% include home.html %}
-
Quick Start instructions to install and run Istio in [Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/) (GKE) using [Google Cloud Deployment Manager](https://cloud.google.com/deployment-manager/).
This Quick Start creates a new GKE [zonal cluster](https://cloud.google.com/kubernetes-engine/versioning-and-upgrades#versions_available_for_new_cluster_masters), installs Istio and then deploys the [Bookinfo]({{home}}/docs/guides/bookinfo.html) sample
application. It uses Deployment Manager to automate the steps detailed in the [Istio on Kubernetes setup guide]({{home}}/docs/setup/kubernetes/quick-start.html) for Kubernetes Engine
-
## Prerequisites
- This sample requires a valid Google Cloud Platform project with billing enabled. If you are not an existing GCP user, you may be able to enroll for a $300 US [Free Trial](https://cloud.google.com/free/) credit.
@@ -31,11 +27,16 @@ application. It uses Deployment Manager to automate the steps detailed in the [
To set this, navigate to the **IAM** section of the [Cloud Console](https://console.cloud.google.com/iam-admin/iam/project) as shown below and find your default GCE/GKE service account in the following form: `projectNumber-compute@developer.gserviceaccount.com`: by default it should just have the **Editor** role. Then in the **Roles** drop-down list for that account, find the **Kubernetes Engine** group and select the role **Kubernetes Engine Admin**. The **Roles** listing for your account will change to **Multiple**.
- {% include figure.html width="100%" ratio="30%"
- img='./img/dm_gcp_iam.png'
- alt='GCP-IAM Permissions'
- title='GCP-IAM Permissions'
- caption='GKE-IAM Permissions'
+ {% include image.html width="100%" ratio="30%"
+ link="./img/dm_gcp_iam.png"
+ caption="GKE-IAM Service"
+ %}
+
+Then add the ```Kubernetes Engine Admin``` Role
+
+ {% include image.html width="100%" ratio="37%"
+ link="./img/dm_gcp_iam_role.png"
+ caption="GKE-IAM Role"
%}
## Setup
@@ -44,24 +45,22 @@ application. It uses Deployment Manager to automate the steps detailed in the [
1. Once you have an account and project enabled, click the following link to open the Deployment Manager.
- - [Istio GKE Deployment Manager](https://accounts.google.com/signin/v2/identifier?service=cloudconsole&continue=https://console.cloud.google.com/launcher/config?templateurl=https://raw.githubusercontent.com/istio/istio/master/install/gcp/deployment_manager/istio-cluster.jinja&followup=https://console.cloud.google.com/launcher/config?templateurl=https://raw.githubusercontent.com/istio/istio/master/install/gcp/deployment_manager/istio-cluster.jinja&flowName=GlifWebSignIn&flowEntry=ServiceLogin)
+ [Istio GKE Deployment Manager](https://accounts.google.com/signin/v2/identifier?service=cloudconsole&continue=https://console.cloud.google.com/launcher/config?templateurl=https://raw.githubusercontent.com/istio/istio/master/install/gcp/deployment_manager/istio-cluster.jinja&followup=https://console.cloud.google.com/launcher/config?templateurl=https://raw.githubusercontent.com/istio/istio/master/install/gcp/deployment_manager/istio-cluster.jinja&flowName=GlifWebSignIn&flowEntry=ServiceLogin)
- We recommend that you leave the default settings as the rest of this tutorial shows how to access the installed features. By default the tool creates a
+ We recommend that you leave the default settings as the rest of this tutorial shows how to access the installed features. By default the tool creates a
GKE alpha cluster with the specified settings, then installs the Istio [control plane]({{home}}/docs/concepts/what-is-istio/overview.html#architecture), the
[Bookinfo]({{home}}/docs/guides/bookinfo.html) sample app,
[Grafana]({{home}}/docs/tasks/telemetry/using-istio-dashboard.html) with
[Prometheus]({{home}}/docs/tasks/telemetry/querying-metrics.html),
[ServiceGraph]({{home}}/docs/tasks/telemetry/servicegraph.html),
and [Zipkin]({{home}}/docs/tasks/telemetry/distributed-tracing.html#zipkin).
- You'll find out more about how to access all of these below. This script will enable istio auto-injection on the ```default``` namespace only.
+ You'll find out more about how to access all of these below. This script will enable Istio auto-injection on the ```default``` namespace only.
-2. Click **Deploy**:
+1. Click **Deploy**:
- {% include figure.html width="100%" ratio="67.17%"
- img='./img/dm_launcher.png'
- alt='GKE-Istio Launcher'
- title='GKE-Istio Launcher'
- caption='GKE-Istio Launcher'
+ {% include image.html width="100%" ratio="67.17%"
+ link="./img/dm_launcher.png"
+ caption="GKE-Istio Launcher"
%}
Wait until Istio is fully deployed. Note that this can take up to five minutes.
@@ -70,37 +69,32 @@ application. It uses Deployment Manager to automate the steps detailed in the [
Once deployment is complete, do the following on the workstation where you've installed `gcloud`:
-1. Bootstrap kubectl for the cluster you just created and confirm the cluster is
- running and istio is enabled
-
- ```
- gcloud container clusters list
- ```
+1. Bootstrap `kubectl` for the cluster you just created and confirm the cluster is
+running and Istio is enabled
- ```
+ ```command
+ $ gcloud container clusters list
NAME ZONE MASTER_VERSION MASTER_IP MACHINE_TYPE NODE_VERSION NUM_NODES STATUS
istio-cluster us-central1-a v1.9.2-gke.1 130.211.216.64 n1-standard-2 v1.9.2-gke.1 3 RUNNING
```
In this case, the cluster name is ```istio-cluster```
-2. Now acquire the credentials for this cluster
+1. Now acquire the credentials for this cluster
- ```
- gcloud container clusters get-credentials istio-cluster --zone=us-central1-a
+ ```command
+ $ gcloud container clusters get-credentials istio-cluster --zone=us-central1-a
```
## Verify installation
Verify Istio is installed in its own namespace
-```bash
-kubectl get deployments,ing -n istio-system
-```
-```
+```command
+$kubectl get deployments,ing -n istio-system
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
deploy/grafana 1 1 1 1 3m
-deploy/istio-ca 1 1 1 1 3m
+deploy/istio-citadel 1 1 1 1 3m
deploy/istio-ingress 1 1 1 1 3m
deploy/istio-initializer 1 1 1 1 3m
deploy/istio-mixer 1 1 1 1 3m
@@ -110,14 +104,10 @@ deploy/servicegraph 1 1 1 1 3m
deploy/zipkin 1 1 1 1 3m
```
-
Now confirm that the Bookinfo sample application is also installed:
-
-```bash
-kubectl get deployments,ing
-```
-```
+```command
+$ kubectl get deployments,ing
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
deploy/details-v1 1 1 1 1 3m
deploy/productpage-v1 1 1 1 1 3m
@@ -134,86 +124,75 @@ Note down the IP and Port assigned to Bookinfo product page. (in the example abo
You can also view the installation using the ***Kubernetes Engine -> Workloads** section on the [Cloud Console](https://console.cloud.google.com/kubernetes/workload):
-{% include figure.html width="100%" ratio="65.37%"
- img='./img/dm_kubernetes_workloads.png'
- alt='GKE-Workloads'
- title='GKE-Workloads'
- caption='GKE-Workloads'
+{% include image.html width="100%" ratio="65.37%"
+ link="./img/dm_kubernetes_workloads.png"
+ caption="GKE-Workloads"
%}
### Access the Bookinfo sample
1. Set up an environment variable for Bookinfo's external IP address:
- ```bash
- kubectl get ingress -o wide
- ```
- ```bash
- export GATEWAY_URL=35.202.120.89
+ ```command
+ $ kubectl get ingress -o wide
+ $ export GATEWAY_URL=35.202.120.89
```
-2. Verify you can access the Bookinfo ```http://${GATEWAY_URL}/productpage```:
+1. Verify you can access the Bookinfo ```http://${GATEWAY_URL}/productpage```:
- {% include figure.html width="100%" ratio="45.04%"
- img='./img/dm_bookinfo.png'
- alt='Bookinfo'
- title='Bookinfo'
- caption='Bookinfo'
+ {% include image.html width="100%" ratio="45.04%"
+ link="./img/dm_bookinfo.png"
+ caption="Bookinfo"
%}
-3. Now send some traffic to it:
- ```bash
- for i in {1..100}; do curl -o /dev/null -s -w "%{http_code}\n" http://${GATEWAY_URL}/productpage; done
+1. Now send some traffic to it:
+ ```command
+ $ for i in {1..100}; do curl -o /dev/null -s -w "%{http_code}\n" http://${GATEWAY_URL}/productpage; done
```
## Verify installed Istio plugins
Once you have verified that the Istio control plane and sample application are working, try accessing the installed Istio plugins.
-If you are using Cloud Shell rather than the installed `gcloud` client, you can port forward and proxy using its [Web Preview](https://cloud.google.com/shell/docs/using-web-preview#previewing_the_application) feature. For example, to access Grafana from Cloud Shell, change the kubectl port mapping from 3000:3000 to 8080:3000. You can simultaneously preview four other consoles via Web Preview proxied on ranges 8080 to 8084.
+If you are using Cloud Shell rather than the installed `gcloud` client, you can port forward and proxy using its [Web Preview](https://cloud.google.com/shell/docs/using-web-preview#previewing_the_application) feature. For example, to access Grafana from Cloud Shell, change the `kubectl` port mapping from 3000:3000 to 8080:3000. You can simultaneously preview four other consoles via Web Preview proxied on ranges 8080 to 8084.
### Grafana
Set up a tunnel to Grafana:
-```bash
-kubectl -n istio-system port-forward $(kubectl -n istio-system get pod -l app=grafana -o jsonpath='{.items[0].metadata.name}') 3000:3000 &
+```command
+$ kubectl -n istio-system port-forward $(kubectl -n istio-system get pod -l app=grafana -o jsonpath='{.items[0].metadata.name}') 3000:3000 &
```
then
-```
+```plain
http://localhost:3000/dashboard/db/istio-dashboard
```
You should see some statistics for the requests you sent earlier.
-{% include figure.html width="100%" ratio="48.49%"
- img='./img/dm_grafana.png'
- alt='Grafana'
- title='Grafana'
- caption='Grafana'
+{% include image.html width="100%" ratio="48.49%"
+ link="./img/dm_grafana.png"
+ caption="Grafana"
%}
For more details about using Grafana, see [About the Grafana Add-on]({{home}}/docs/tasks/telemetry/using-istio-dashboard.html#about-the-grafana-add-on).
-
### Prometheus
Prometheus is installed with Grafana. You can view Istio and application metrics using the console as follows:
-```bash
-kubectl -n istio-system port-forward $(kubectl -n istio-system get pod -l app=prometheus -o jsonpath='{.items[0].metadata.name}') 9090:9090 &
+```command
+$ kubectl -n istio-system port-forward $(kubectl -n istio-system get pod -l app=prometheus -o jsonpath='{.items[0].metadata.name}') 9090:9090 &
```
View the console at:
-```
+```plain
http://localhost:9090/graph
```
-{% include figure.html width="100%" ratio="43.88%"
- img='./img/dm_prometheus.png'
- alt='Prometheus'
- title='Prometheus'
- caption='Prometheus'
+{% include image.html width="100%" ratio="43.88%"
+ link="./img/dm_prometheus.png"
+ caption="Prometheus"
%}
For more details, see [About the Prometheus Add-on]({{home}}/docs/tasks/telemetry/querying-metrics.html#about-the-prometheus-add-on).
@@ -222,20 +201,19 @@ For more details, see [About the Prometheus Add-on]({{home}}/docs/tasks/telemetr
Set up a tunnel to ServiceGraph:
-```bash
-kubectl -n istio-system port-forward $(kubectl -n istio-system get pod -l app=servicegraph -o jsonpath='{.items[0].metadata.name}') 8088:8088 &
+```command
+$ kubectl -n istio-system port-forward $(kubectl -n istio-system get pod -l app=servicegraph -o jsonpath='{.items[0].metadata.name}') 8088:8088 &
```
+
You should see the Bookinfo service topology at
-```
+```plain
http://localhost:8088/dotviz
```
-{% include figure.html width="100%" ratio="53.33%"
- img='./img/dm_servicegraph.png'
- alt='ServiceGraph'
- title='ServiceGraph'
- caption='ServiceGraph'
+{% include image.html width="100%" ratio="53.33%"
+ link="./img/dm_servicegraph.png"
+ caption="ServiceGraph"
%}
For more details, see [About the ServiceGraph Add-on]({{home}}/docs/tasks/telemetry/servicegraph.html#about-the-servicegraph-add-on).
@@ -244,21 +222,19 @@ For more details, see [About the ServiceGraph Add-on]({{home}}/docs/tasks/teleme
Set up a tunnel to Zipkin:
-```bash
-kubectl port-forward -n istio-system $(kubectl get pod -n istio-system -l app=zipkin -o jsonpath='{.items[0].metadata.name}') 9411:9411 &
+```command
+$ kubectl port-forward -n istio-system $(kubectl get pod -n istio-system -l app=zipkin -o jsonpath='{.items[0].metadata.name}') 9411:9411 &
```
You should see the trace statistics sent earlier:
-```
+```plain
http://localhost:9411
```
-{% include figure.html width="100%" ratio="57.00%"
- img='./img/dm_zipkin.png'
- alt='Zipkin'
- title='Zipkin'
- caption='Zipkin'
+{% include image.html width="100%" ratio="57.00%"
+ link="./img/dm_zipkin.png"
+ caption="Zipkin"
%}
For more details on tracing see [Understanding what happened]({{home}}/docs/tasks/telemetry/distributed-tracing.html#understanding-what-happened).
@@ -274,6 +250,7 @@ on our workstation or within Cloud Shell.
1. Navigate to the Deployments section of the Cloud Console at [https://console.cloud.google.com/deployments](https://console.cloud.google.com/deployments)
-2. Select the deployment and click **Delete**.
+1. Select the deployment and click **Delete**.
-3. Deployment Manager will remove all the deployed GKE artifacts - however, items such as Ingress and LoadBalancers will remain. You can delete those artifacts by again going to the cloud console under [**Network Services** -> **LoadBalancers**](https://console.cloud.google.com/net-services/loadbalancing/loadBalancers/list)
+1. Deployment Manager will remove all the deployed GKE artifacts - however, items such as Ingress and LoadBalancers will remain. You can delete those artifacts
+by again going to the cloud console under [**Network Services** -> **LoadBalancers**](https://console.cloud.google.com/net-services/loadbalancing/loadBalancers/list)
diff --git a/_docs/setup/kubernetes/quick-start.md b/_docs/setup/kubernetes/quick-start.md
index 64e48548b6b83..5997e91f66244 100644
--- a/_docs/setup/kubernetes/quick-start.md
+++ b/_docs/setup/kubernetes/quick-start.md
@@ -1,24 +1,23 @@
---
title: Quick Start
-overview: Quick Start instructions to setup the Istio service mesh in a Kubernetes cluster.
+description: Quick start instructions to setup the Istio service mesh in a Kubernetes cluster.
-order: 10
+weight: 10
-layout: docs
-type: markdown
---
{% include home.html %}
-Quick Start instructions to install and configure Istio in a Kubernetes cluster.
-
+Quick start instructions to install and configure Istio in a Kubernetes cluster.
## Prerequisites
The following instructions require you have access to a Kubernetes **1.7.3 or newer** cluster
-with [RBAC (Role-Based Access Control)](https://kubernetes.io/docs/admin/authorization/rbac/) enabled. You will also need `kubectl` **1.7.3 or newer** installed. If you wish to enable [automatic sidecar injection]({{home}}/docs/setup/kubernetes/sidecar-injection.html#automatic-sidecar-injection), you need Kubernetes version 1.9 or greater.
+with [RBAC (Role-Based Access Control)](https://kubernetes.io/docs/admin/authorization/rbac/) enabled. You will also need `kubectl` **1.7.3 or newer** installed.
+
+If you wish to enable [automatic sidecar injection]({{home}}/docs/setup/kubernetes/sidecar-injection.html#automatic-sidecar-injection) or server-side configuration validation, you need Kubernetes version 1.9 or greater.
- > Note: If you installed Istio 0.1.x,
+ > If you installed Istio 0.1.x,
> [uninstall](https://archive.istio.io/v0.1/docs/tasks/installing-istio.html#uninstalling)
> it completely before installing the newer version (including the Istio sidecar
> for all Istio enabled application pods).
@@ -28,49 +27,185 @@ with [RBAC (Role-Based Access Control)](https://kubernetes.io/docs/admin/authori
match the version supported by your cluster (version 1.7 or later for CRD
support).
-* Depending on your Kubernetes provider:
+### [Minikube](https://github.com/kubernetes/minikube/releases)
- * To install Istio locally, install the latest version of
-[Minikube](https://kubernetes.io/docs/getting-started-guides/minikube/) (version 0.22.1 or later).
+To install Istio locally, install the latest version of
+[Minikube](https://kubernetes.io/docs/getting-started-guides/minikube/) (version 0.25.0 or later).
- * [Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/)
+```command
+$ minikube start \
+ --extra-config=controller-manager.ClusterSigningCertFile="/var/lib/localkube/certs/ca.crt" \
+ --extra-config=controller-manager.ClusterSigningKeyFile="/var/lib/localkube/certs/ca.key" \
+ --extra-config=apiserver.Admission.PluginNames=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota \
+ --kubernetes-version=v1.9.0
+```
- * Retrieve your credentials for kubectl (replace `` with the name of the cluster you want to use,
- and `` with the zone where that cluster is located):
- ```bash
- gcloud container clusters get-credentials --zone --project
- ```
+### [Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/)
- * Grant cluster admin permissions to the current user (admin permissions are required to create the necessary RBAC rules for Istio):
- ```bash
- kubectl create clusterrolebinding cluster-admin-binding --clusterrole=cluster-admin --user=$(gcloud config get-value core/account)
- ```
+Create a new cluster.
- * [IBM Cloud Container Service](https://www.ibm.com/cloud-computing/bluemix/containers)
+```command
+$ gcloud container clusters create \
+ --cluster-version=1.9.4-gke.1 \
+ --zone \
+ --project
+```
- * Retrieve your credentials for kubectl (replace `` with the name of the cluster you want to use):
- ```bash
- $(bx cs cluster-config |grep "export KUBECONFIG")
- ```
+Retrieve your credentials for `kubectl`.
- * [IBM Cloud Private](https://www.ibm.com/cloud-computing/products/ibm-cloud-private/) version 2.1 or later
+```command
+$ gcloud container clusters get-credentials \
+ --zone \
+ --project
+```
+
+Grant cluster admin permissions to the current user (admin permissions are required to create the necessary RBAC rules for Istio).
- * Config `kubectl` CLI based on steps [here](https://www.ibm.com/support/knowledgecenter/SSBS6K_2.1.0/manage_cluster/cfc_cli.html) for how to access the IBM Cloud Private Cluster.
+```command
+$ kubectl create clusterrolebinding cluster-admin-binding \
+ --clusterrole=cluster-admin \
+ --user=$(gcloud config get-value core/account)
+```
- * [Openshift Origin](https://www.openshift.org) version 3.7 or later
+### [IBM Cloud Container Service (IKS)](https://www.ibm.com/cloud/container-service)
- * Openshift by default does not allow containers running with UID 0. Enable containers running
+Kubernetes 1.9 is generally available on IBM Cloud Container Service (IKS).
+
+At the time of writing it is not the default version, so to create a new lite cluster:
+
+```command
+$ bx cs cluster-create --name --kube-version 1.9.3
+```
+
+Or create a new paid cluster:
+
+```command
+$ bx cs cluster-create --location location --machine-type u2c.2x4 --name --kube-version 1.9.3
+```
+
+Retrieve your credentials for `kubectl` (replace `` with the name of the cluster you want to use):
+
+```bash
+$(bx cs cluster-config |grep "export KUBECONFIG")
+```
+
+### [IBM Cloud Private](https://www.ibm.com/us-en/marketplace/ibm-cloud-private) (version 2.1 or later)
+
+Configure `kubectl` CLI based on steps [here](https://www.ibm.com/support/knowledgecenter/SSBS6K_2.1.0/manage_cluster/cfc_cli.html) for how to access the IBM Cloud Private Cluster.
+
+### [OpenShift Origin](https://www.openshift.org) (version 3.7 or later)
+
+OpenShift by default does not allow containers running with UID 0. Enable containers running
with UID 0 for Istio's service accounts for ingress as well the Prometheus and Grafana addons:
- ```bash
- oc adm policy add-scc-to-user anyuid -z istio-ingress-service-account -n istio-system
- oc adm policy add-scc-to-user anyuid -z istio-grafana-service-account -n istio-system
- oc adm policy add-scc-to-user anyuid -z istio-prometheus-service-account -n istio-system
- ```
- * Service account that runs application pods need privileged security context constraints as part of sidecar injection.
- ```bash
- oc adm policy add-scc-to-user privileged -z default -n
- ```
+```command
+$ oc adm policy add-scc-to-user anyuid -z istio-ingress-service-account -n istio-system
+$ oc adm policy add-scc-to-user anyuid -z grafana -n istio-system
+$ oc adm policy add-scc-to-user anyuid -z prometheus -n istio-system
+```
+
+Service account that runs application pods need privileged security context constraints as part of sidecar injection.
+
+```command
+$ oc adm policy add-scc-to-user privileged -z default -n
+```
+
+### AWS (w/Kops)
+
+When you install a new cluster with Kubernetes version 1.9, prerequisite for `admissionregistration.k8s.io/v1beta1` enabled is covered.
+
+Nevertheless the list of admission controllers needs to be updated.
+
+```command
+$ kops edit cluster $YOURCLUSTER
+```
+
+Add following in the configuration file just opened:
+
+```yaml
+kubeAPIServer:
+ admissionControl:
+ - NamespaceLifecycle
+ - LimitRanger
+ - ServiceAccount
+ - PersistentVolumeLabel
+ - DefaultStorageClass
+ - DefaultTolerationSeconds
+ - MutatingAdmissionWebhook
+ - ValidatingAdmissionWebhook
+ - ResourceQuota
+ - NodeRestriction
+ - Priority
+```
+
+Perform the update
+
+```command
+$ kops update cluster
+$ kops update cluster --yes
+```
+
+Launch the rolling update
+
+```command
+$ kops rolling-update cluster
+$ kops rolling-update cluster --yes
+```
+
+Validate with `kubectl` client on kube-api pod, you should see new admission controller:
+
+```command
+$ for i in `kubectl get pods -nkube-system | grep api | awk '{print $1}'` ; do kubectl describe pods -nkube-system $i | grep "/usr/local/bin/kube-apiserver" ; done
+```
+
+Output should be:
+
+```plain
+[...] --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority [...]
+```
+
+### Azure
+
+You need to use `ACS-Engine` to deploy you cluster. After following [these instructions](https://github.com/Azure/acs-engine/blob/master/docs/acsengine.md#install) to get and install the `acs-engine` binary, use the following command to download Istio `api model definition`:
+
+```command
+$ wget https://raw.githubusercontent.com/Azure/acs-engine/master/examples/service-mesh/istio.json
+```
+
+Use the following command to deploy your cluster using the `istio.json` template. You can find references to the parameters in the [official docs](https://github.com/Azure/acs-engine/blob/master/docs/kubernetes/deploy.md#step-3-edit-your-cluster-definition).
+
+| Parameter | Expected value |
+|-------------------------------------|----------------------------|
+| `subscription_id` | Azure Subscription Id |
+| `dns_prefix` | Cluster DNS Prefix |
+| `location` | Cluster Location |
+
+```command
+$ acs-engine deploy --subscription-id --dns-prefix --location --auto-suffix --api-model istio.json
+```
+
+After a few minutes you should find your cluster on your Azure subscription in a resource group called `-`. Let's say my `dns-prefix` is `myclustername`, a valid resource group and unique cluster id would be `mycluster-5adfba82`. Using this `-` cluster id you can copy your `kubeconfig` file to your machine from the `_output` folder generated by `acs-engine`:
+
+```command
+$ cp _output/-/kubeconfig/kubeconfig..json ~/.kube/config
+```
+
+For example:
+
+```command
+$ cp _output/mycluster-5adfba82/kubeconfig/kubeconfig.westus2.json ~/.kube/config
+```
+
+To check if the right Istio flags were deployed, use:
+```command
+$ kubectl describe pod --namespace kube-system $(kubectl get pods --namespace kube-system | grep api | cut -d ' ' -f 1) | grep admission-control
+```
+
+You should see `MutatingAdmissionWebhook` and `ValidatingAdmissionWebhook` flags:
+
+```plain
+ --admission-control=...,MutatingAdmissionWebhook,...,ValidatingAdmissionWebhook,...
+```
## Installation steps
@@ -80,84 +215,96 @@ namespace, and can manage services from all other namespaces.
1. Go to the [Istio release](https://github.com/istio/istio/releases) page to download the
installation file corresponding to your OS. If you are using a MacOS or Linux system, you can also
run the following command to download and extract the latest release automatically:
-```bash
-curl -L https://git.io/getLatestIstio | sh -
-```
+
+ ```command
+ $ curl -L https://git.io/getLatestIstio | sh -
+ ```
1. Extract the installation file and change the directory to the file location. The
- installation directory contains:
- * Installation `.yaml` files for Kubernetes in `install/`
- * Sample applications in `samples/`
- * The `istioctl` client binary in the `bin/` directory. `istioctl` is used when manually injecting Envoy as a sidecar proxy and for creating routing rules and policies.
- * The `istio.VERSION` configuration file
+installation directory contains:
+ * Installation `.yaml` files for Kubernetes in `install/`
+ * Sample applications in `samples/`
+ * The `istioctl` client binary in the `bin/` directory. `istioctl` is used when manually injecting Envoy as a sidecar proxy and for creating routing rules and policies.
+ * The `istio.VERSION` configuration file
-1. Change directory to istio package. For example, if the package is istio-{{ site.data.istio.version }}
-```bash
-cd istio-{{ site.data.istio.version }}
-```
+1. Change directory to istio package. For example, if the package is istio-{{site.data.istio.version}}
+
+ ```command
+ $ cd istio-{{site.data.istio.version}}
+ ```
1. Add the `istioctl` client to your PATH.
- For example, run the following command on a MacOS or Linux system:
-```bash
-export PATH=$PWD/bin:$PATH
-```
+For example, run the following command on a MacOS or Linux system:
+
+ ```command
+ $ export PATH=$PWD/bin:$PATH
+ ```
1. Install Istio's core components. Choose one of the two _**mutually exclusive**_ options below or alternately install
- with the [Helm Chart]({{home}}/docs/setup/kubernetes/helm.html):
+with the [Helm Chart]({{home}}/docs/setup/kubernetes/helm-install.html):
- a) Install Istio without enabling [mutual TLS authentication]({{home}}/docs/concepts/security/mutual-tls.html) between sidecars.
- Choose this option for clusters with existing applications, applications where services with an
- Istio sidecar need to be able to communicate with other non-Istio Kubernetes services, and
- applications that use [liveliness and readiness probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/),
- headless services, or StatefulSets.
-```bash
-kubectl apply -f install/kubernetes/istio.yaml
-```
+ a) Install Istio without enabling [mutual TLS authentication]({{home}}/docs/concepts/security/mutual-tls.html) between sidecars.
+ Choose this option for clusters with existing applications, applications where services with an
+ Istio sidecar need to be able to communicate with other non-Istio Kubernetes services, and
+ applications that use [liveness and readiness probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/),
+ headless services, or StatefulSets.
- _**OR**_
+ ```command
+ $ kubectl apply -f install/kubernetes/istio.yaml
+ ```
- b) Install Istio and enable [mutual TLS authentication]({{home}}/docs/concepts/security/mutual-tls.html) between sidecars.:
-```bash
-kubectl apply -f install/kubernetes/istio-auth.yaml
-```
+ _**OR**_
+
+ b) Install Istio and enable [mutual TLS authentication]({{home}}/docs/concepts/security/mutual-tls.html) between sidecars. This option is mostly for new clusters, i.e., all applications have sidecars injected during their deployment. For existing applications, please choose the above option and enable mutual TLS using [authentication policy]({{home}}/docs/tasks/security/authn-policy.html):
+
+ ```command
+ $ kubectl apply -f install/kubernetes/istio-auth.yaml
+ ```
- Both options create the `istio-system` namespace along with the required RBAC permissions,
- and deploy Istio-Pilot, Istio-Mixer, Istio-Ingress, and Istio-CA (Certificate Authority).
+ Both options create the `istio-system` namespace along with the required RBAC permissions,
+ and deploy Istio-Pilot, Istio-Mixer, Istio-Ingress, and Istio-CA (Certificate Authority).
1. *Optional:* If your cluster has Kubernetes version 1.9 or greater, and you wish to enable automatic proxy injection,
install the [sidecar injector webhook]({{home}}/docs/setup/kubernetes/sidecar-injection.html#automatic-sidecar-injection).
## Verifying the installation
-1. Ensure the following Kubernetes services are deployed: `istio-pilot`, `istio-mixer`,
- `istio-ingress`.
-```bash
-kubectl get svc -n istio-system
-```
-```bash
-NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
-istio-ingress 10.83.245.171 35.184.245.62 80:32730/TCP,443:30574/TCP 5h
-istio-pilot 10.83.251.173 8080/TCP,8081/TCP 5h
-istio-mixer 10.83.244.253 9091/TCP,9094/TCP,42422/TCP 5h
-```
-
- Note: If your cluster is running in an environment that does not support an external load balancer
+1. Ensure the following Kubernetes services are deployed: `istio-pilot`, `istio-ingress`,
+`istio-policy`, `istio-telemetry`, `prometheus`.
+
+ ```command
+ $ kubectl get svc -n istio-system
+ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+ citadel-ilb LoadBalancer 10.35.251.104 10.138.0.43 8060:32031/TCP 47m
+ istio-citadel ClusterIP 10.35.253.23 8060/TCP,9093/TCP 47m
+ istio-ingress LoadBalancer 10.35.245.4 35.203.191.37 80:32765/TCP,443:32304/TCP 47m
+ istio-pilot ClusterIP 10.35.255.168 15003/TCP,15005/TCP,15007/TCP,15010/TCP,15011/TCP,8080/TCP,9093/TCP 47m
+ istio-pilot-ilb LoadBalancer 10.35.252.183 10.138.0.40 15005:30035/TCP,8080:30494/TCP 47m
+ istio-policy ClusterIP 10.35.247.90 9091/TCP,15004/TCP,9093/TCP 47m
+ istio-statsd-prom-bridge ClusterIP 10.35.243.13 9102/TCP,9125/UDP 47m
+ istio-telemetry ClusterIP 10.35.248.71 9091/TCP,15004/TCP,9093/TCP,42422/TCP 47m
+ mixer-ilb LoadBalancer 10.35.240.250 10.138.0.42 15004:30427/TCP 47m
+ prometheus ClusterIP 10.35.255.10 9090/TCP 47m
+ ```
+
+ > If your cluster is running in an environment that does not support an external load balancer
(e.g., minikube), the `EXTERNAL-IP` of `istio-ingress` says ``. You must access the
application using the service NodePort, or use port-forwarding instead.
-2. Ensure the corresponding Kubernetes pods are deployed and all containers are up and running:
- `istio-pilot-*`, `istio-mixer-*`, `istio-ingress-*`, `istio-ca-*`,
- and, optionally, `istio-sidecar-injector-*`.
-```bash
-kubectl get pods -n istio-system
-```
-```bash
-istio-ca-3657790228-j21b9 1/1 Running 0 5h
-istio-ingress-1842462111-j3vcs 1/1 Running 0 5h
-istio-sidecar-injector-184129454-zdgf5 1/1 Running 0 5h
-istio-pilot-2275554717-93c43 1/1 Running 0 5h
-istio-mixer-2104784889-20rm8 2/2 Running 0 5h
-```
+1. Ensure the corresponding Kubernetes pods are deployed and all containers are up and running:
+`istio-pilot-*`, `istio-mixer-*`, `istio-ingress-*`, `istio-citadel-*`,
+and, optionally, `istio-sidecar-injector-*`.
+
+ ```command
+ $ kubectl get pods -n istio-system
+ istio-citadel-b454d647d-92jrv 1/1 Running 0 46m
+ istio-ingress-768b9fb68b-jdxfk 1/1 Running 0 46m
+ istio-pilot-b87b8c56b-kggmk 2/2 Running 0 46m
+ istio-policy-58f9bfc796-8vlq4 2/2 Running 0 46m
+ istio-statsd-prom-bridge-6dbb7dcc7f-gzlq7 1/1 Running 0 46m
+ istio-telemetry-55b8c8b44f-fwb69 2/2 Running 0 46m
+ prometheus-586d95b8d9-grk6j 1/1 Running 0 46m
+ ```
## Deploy your application
@@ -166,49 +313,53 @@ installation like [Bookinfo]({{home}}/docs/guides/bookinfo.html).
Note: the application must use HTTP/1.1 or HTTP/2.0 protocol for all its HTTP traffic because HTTP/1.0 is not supported.
If you started the [Istio-sidecar-injector]({{home}}/docs/setup/kubernetes/sidecar-injection.html#automatic-sidecar-injection),
-as shown above, you can deploy the application directly using `kubectl create`.
+as shown above, you can deploy the application directly using `kubectl create`.
The Istio-Sidecar-injector will automatically inject Envoy containers into your application pods assuming running in namespaces labeled with `istio-injection=enabled`
-```bash
-kubectl label namespace istio-injection=enabled
-kubectl create -n -f .yaml
+```command
+$ kubectl label namespace istio-injection=enabled
+$ kubectl create -n -f .yaml
```
If you do not have the Istio-sidecar-injector installed, you must
use [istioctl kube-inject]({{home}}/docs/reference/commands/istioctl.html#istioctl kube-inject) to
-manuallly inject Envoy containers in your application pods before deploying them:
-```bash
-kubectl create -f <(istioctl kube-inject -f .yaml)
+manually inject Envoy containers in your application pods before deploying them:
+
+```command
+$ kubectl create -f <(istioctl kube-inject -f .yaml)
```
## Uninstalling
* Uninstall Istio sidecar injector:
- If you installed Istio with sidecar injector enabled, uninstall it:
-```bash
-kubectl delete -f install/kubernetes/istio-sidecar-injector-with-ca-bundle.yaml
-```
+ If you installed Istio with sidecar injector enabled, uninstall it:
-* Uninstall Istio core components. For the {{ site.data.istio.version }} release, the uninstall
- deletes the RBAC permissions, the `istio-system` namespace, and hierarchically all resources under it.
- It is safe to ignore errors for non-existent resources because they may have been deleted hierarchically.
+ ```command
+ $ kubectl delete -f install/kubernetes/istio-sidecar-injector-with-ca-bundle.yaml
+ ```
- a) If you installed Istio with mutual TLS authentication disabled:
-```bash
-kubectl delete -f install/kubernetes/istio.yaml
-```
+* Uninstall Istio core components. For the {{site.data.istio.version}} release, the uninstall
+deletes the RBAC permissions, the `istio-system` namespace, and hierarchically all resources under it.
+It is safe to ignore errors for non-existent resources because they may have been deleted hierarchically.
- _**OR**_
+ a) If you installed Istio with mutual TLS authentication disabled:
- b) If you installed Istio with mutual TLS authentication enabled:
-```bash
-kubectl delete -f install/kubernetes/istio-auth.yaml
-```
+ ```command
+ $ kubectl delete -f install/kubernetes/istio.yaml
+ ```
+
+ _**OR**_
+
+ b) If you installed Istio with mutual TLS authentication enabled:
+
+ ```command
+ $ kubectl delete -f install/kubernetes/istio-auth.yaml
+ ```
## What's next
* See the sample [Bookinfo]({{home}}/docs/guides/bookinfo.html) application.
-* See how to [test Istio mutual TLS Authentication]({{home}}/docs/tasks/security/mutual-tls.html).
+* See how to [test mutual TLS authentication]({{home}}/docs/tasks/security/mutual-tls.html).
diff --git a/_docs/setup/kubernetes/sidecar-injection.md b/_docs/setup/kubernetes/sidecar-injection.md
index 0ab7dd6eee0f9..c3cb65f71c907 100644
--- a/_docs/setup/kubernetes/sidecar-injection.md
+++ b/_docs/setup/kubernetes/sidecar-injection.md
@@ -1,19 +1,23 @@
---
-title: Installing Istio Sidecar
-overview: Instructions for installing the Istio sidecar in application pods automatically using the sidecar injector webhook or manually using istioctl CLI.
+title: Installing the Istio Sidecar
+description: Instructions for installing the Istio sidecar in application pods automatically using the sidecar injector webhook or manually using istioctl CLI.
-order: 50
+weight: 50
-layout: docs
-type: markdown
---
{% include home.html %}
-_NOTE_: The following requires Istio 0.5.0 or greater. See [https://archive.istio.io/v0.4/docs/setup/kubernetes/sidecar-injection](https://archive.istio.io/v0.4/docs/setup/kubernetes/sidecar-injection) for Istio versions 0.4.0 or older.
+> The following requires Istio 0.5 or greater. See
+> [https://archive.istio.io/v0.4/docs/setup/kubernetes/sidecar-injection](https://archive.istio.io/v0.4/docs/setup/kubernetes/sidecar-injection)
+> for Istio 0.4 or prior.
+>
+> In previous releases, the Kubernetes initializer feature was used for automatic proxy injection. This was an Alpha feature, subject to change/removal,
+> and not enabled by default in Kubernetes. Starting in Kubernetes 1.9 it was replaced by a beta feature called
+> [mutating webhooks](https://kubernetes.io/docs/admin/admission-controllers/#mutatingadmissionwebhook-beta-in-19), which is now enabled by default in
+> Kubernetes 1.9 and beyond. Starting with Istio 0.5.0 the automatic proxy injection uses mutating webhooks, and support for injection by initializer has been
+> removed. Users who cannot upgrade to Kubernetes 1.9 should use manual injection.
-_NOTE_: In previous releases, the Kubernetes initializer feature was used for automatic proxy injection. This was an Alpha feature, subject to change/removal, and not enabled by default in Kubernetes. Starting in Kubernetes 1.9 it was replaced by a beta feature called [mutating webhooks](https://kubernetes.io/docs/admin/admission-controllers/#mutatingadmissionwebhook-beta-in-19), which is now enabled by default in Kubernetes 1.9 and beyond. Starting in Istio 0.5.0 the automatic proxy injection uses mutating webhooks, and support for injection by initializer has been removed. Users who cannot uprade to Kubernetes 1.9 should use manual injection.
-
-# Pod Spec Requirements
+## Pod spec requirements
In order to be a part of the service mesh, each pod in the Kubernetes
cluster must satisfy the following requirements:
@@ -43,55 +47,45 @@ cluster must satisfy the following requirements:
ways of injecting the Istio sidecar into a pod: manually using `istioctl`
CLI tool or automatically using the Istio Initializer. Note that the
sidecar is not involved in traffic between containers in the same pod.
-
-# Injection
-Manual injection modifies the controller configuration, e.g. deployment. It
+## Injection
+
+Manual injection modifies the controller configuration, e.g. deployment. It
does this by modifying the pod template spec such that *all* pods for that
deployment are created with the injected sidecar. Adding/Updating/Removing
the sidecar requires modifying the entire deployment.
-Automatic injection injects at pod creation time. The controller resource is
+Automatic injection injects at pod creation time. The controller resource is
unmodified. Sidecars can be updated selectively by manually deleting a pods or
systematically with a deployment rolling update.
-Manual and automatic injection use the same templated configuration. Automatic
-injection loads the configuration from the `istio-inject` ConfigMap in the
-`istio-system` namespace. Manual injection can load from a local file or from
+Manual and automatic injection use the same templated configuration. Automatic
+injection loads the configuration from the `istio-sidecar-injector` ConfigMap in the
+`istio-system` namespace. Manual injection can load from a local file or from
the ConfigMap.
-Two variants of the injection configuration are provided with the default
-install: `istio-sidecar-injector-configmap-release.yaml`
-and `istio-sidecar-injector-configmap-debug.yaml`. The injection configmap includes
-the default injection policy and sidecar injection template. The debug version
-includes debug proxy images and additional logging and core dump functionality using
-for debugging the sidecar proxy.
-
-## Manual sidecar injection
+### Manual sidecar injection
-Use the built-in defaults template and dynamically fetch the mesh
+Use the built-in defaults template and dynamically fetch the mesh
configuration from the `istio` ConfigMap. Additional parameter overrides
are available via flags (see `istioctl kube-inject --help`).
-```bash
-kubectl apply -f <(~istioctl kube-inject -f samples/sleep/sleep.yaml)
+```command
+$ kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml)
```
`kube-inject` can also be run without access to a running Kubernetes
cluster. Create local copies of the injection and mesh configmap.
-```bash
-kubectl create -f install/kubernetes/istio-sidecar-injector-configmap-release.yaml \
- --dry-run \
- -o=jsonpath='{.data.config}' > inject-config.yaml
-
-kubectl -n istio-system get configmap istio -o=jsonpath='{.data.mesh}' > mesh-config.yaml
+```command
+$ istioctl kube-inject --emitTemplate > inject-config.yaml
+$ kubectl -n istio-system get configmap istio -o=jsonpath='{.data.mesh}' > mesh-config.yaml
```
- `
+
Run `kube-inject` over the input file.
-```bash
-istioctl kube-inject \
+```command
+$ istioctl kube-inject \
--injectConfigFile inject-config.yaml \
--meshConfigFile mesh-config.yaml \
--filename samples/sleep/sleep.yaml \
@@ -100,256 +94,105 @@ istioctl kube-inject \
Deploy the injected YAML file.
-```bash
-kubectl apply -f sleep-injected.yaml
+```command
+$ kubectl apply -f sleep-injected.yaml
```
Verify that the sidecar has been injected into the deployment.
-```bash
-kubectl get deployment sleep -o wide
-```
-```
+```command
+$ kubectl get deployment sleep -o wide
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
sleep 1 1 1 1 2h sleep,istio-proxy tutum/curl,unknown/proxy:unknown app=sleep
```
-## Automatic sidecar injection
+### Automatic sidecar injection
-Sidecars can be automatically added to applicable Kubernetes pods using a
-[mutating webhook admission controller](https://kubernetes.io/docs/admin/admission-controllers/#validatingadmissionwebhook-alpha-in-18-beta-in-19), available in Kubernetes 1.9 and above. Specifically, verify that the kube-apiserver process has the `admission-control` flag set with the `MutatingAdmissionWebhook` and `ValidatingAdmissionWebhook` admission controllers added and listed in the correct order.
+Sidecars can be automatically added to applicable Kubernetes pods using a
+[mutating webhook admission controller](https://kubernetes.io/docs/admin/admission-controllers/#validatingadmissionwebhook-alpha-in-18-beta-in-19). This feature requires Kubernetes 1.9 or later. Verify that the kube-apiserver process has the `admission-control` flag set with the `MutatingAdmissionWebhook` and `ValidatingAdmissionWebhook` admission controllers added and listed in the correct order and the admissionregistration API is enabled.
-### Prerequisites
-
-A Kubernetes 1.9 cluster is required, with the `admissionregistration.k8s.io/v1beta1` API enabled. This is enabled by default on most instllations. If you want to check, you can grep:
-
-```bash
-kubectl api-versions | grep admissionregistration
-```
-
-You should see
-```
+```command
+$ kubectl api-versions | grep admissionregistration
admissionregistration.k8s.io/v1beta1
+admissionregistration.k8s.io/v2beta2
```
-#### Google Kubernetes Engine (GKE)
-
-Kubernetes 1.9 is generally available on Google Kubernetes Engine (GKE). At the time of writing it is not the default version, so to create a cluster:
-
-```bash
-gcloud container clusters create \
- --cluster-version=1.9.2-gke.1
- --zone
- --project
-```
-```bash
-gcloud container clusters get-credentials \
- --zone \
- --project
-```
-```bash
-kubectl create clusterrolebinding cluster-admin-binding \
- --clusterrole=cluster-admin \
- --user=$(gcloud config get-value core/account)
-```
-
-#### minikube
-
-Minikube version v0.25.0 or later is required for Kubernetes v1.9. Get the latest version from [https://github.com/kubernetes/minikube/releases](https://github.com/kubernetes/minikube/releases).
-
-```bash
-minikube start \
- --extra-config=controller-manager.ClusterSigningCertFile="/var/lib/localkube/certs/ca.crt" \
- --extra-config=controller-manager.ClusterSigningKeyFile="/var/lib/localkube/certs/ca.key" \
- --extra-config=apiserver.Admission.PluginNames=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota \
- --kubernetes-version=v1.9.0
-```
-
-### AWS (with Kops)
-
-When you install a new cluster with Kubernetes version 1.9, prerequisite for `admissionregistration.k8s.io/v1beta1` enabled is covered.
-
-Nevertheless the list of admission controllers needs to be updated.
-
-```bash
-kops edit cluster $YOURCLUSTER
-```
-
-Add following in the configuration file just openned:
-
-```bash
-kubeAPIServer:
- admissionControl:
- - NamespaceLifecycle
- - LimitRanger
- - ServiceAccount
- - PersistentVolumeLabel
- - DefaultStorageClass
- - DefaultTolerationSeconds
- - MutatingAdmissionWebhook
- - ValidatingAdmissionWebhook
- - ResourceQuota
- - Initializers
- - NodeRestriction
- - Priority
-```
-
-Perform the update
-
-```bash
-kops update cluster
-kops update cluster --yes
-```
-
-Launch the rolling update
-
-```bash
-kops rolling-update cluster
-kops rolling-update cluster --yes
-```
-
-Validate with a `ps` on master node, you should see new admission controller
-
-```bash
-/bin/sh -c /usr/local/bin/kube-apiserver --address=127.0.0.1 --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,Initializers,NodeRestriction,Priority [...]
-```
-
-
-### Installing the Webhook
-
-_NOTE_: The [0.5.0](https://github.com/istio/istio/releases/tag/0.5.0) and [0.5.1](https://github.com/istio/istio/releases/tag/0.5.1) releases are missing scripts to provision webhook certificates. Download the missing files from [here](https://raw.githubusercontent.com/istio/istio/master/install/kubernetes/webhook-create-signed-cert.sh) and [here](https://raw.githubusercontent.com/istio/istio/master/install/kubernetes/webhook-patch-ca-bundle.sh). Subsqeuent releases (> 0.5.1) should include these missing files.
-
-Install base Istio.
-
-```bash
-kubectl apply -f install/kubernetes/istio.yaml
-```
-
-Webhooks requires a signed cert/key pair. Use `install/kubernetes/webhook-create-signed-cert.sh` to generate
-a cert/key pair signed by the Kubernetes' CA. The resulting cert/key file is stored as a Kubernetes
-secret for the sidecar injector webhook to consume.
-
-_Note_: Kubernetes CA approval requires permissions to create and approve CSR. See
-[https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster ](https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster) and
-[install/kubernetes/webhook-create-signed-cert.sh](https://raw.githubusercontent.com/istio/istio/master/install/kubernetes/webhook-create-signed-cert.sh) for more information.
-
-```bash
-./install/kubernetes/webhook-create-signed-cert.sh \
- --service istio-sidecar-injector \
- --namespace istio-system \
- --secret sidecar-injector-certs
-```
-
-Install the sidecar injection configmap.
+See the Kubernetes [quick start]({{home}}/docs/setup/kubernetes/quick-start.html) guide for instructions on installing Kubernetes version >= 1.9.
-```bash
-kubectl apply -f install/kubernetes/istio-sidecar-injector-configmap-release.yaml
-```
-
-Set the `caBundle` in the webhook install YAML that the Kubernetes api-server
-uses to invoke the webhook.
+Note that unlike manual injection, automatic injection occurs at the pod-level. You won't see any change to the deployment itself. Instead you'll want to check individual pods (via `kubectl describe`) to see the injected proxy.
-```bash
-cat install/kubernetes/istio-sidecar-injector.yaml | \
- ./install/kubernetes/webhook-patch-ca-bundle.sh > \
- install/kubernetes/istio-sidecar-injector-with-ca-bundle.yaml
-```
+#### Installing the webhook
-Install the sidecar injector webhook.
+To enable the sidecar injection webhook, you can use [Helm]({{home}}/docs/setup/kubernetes/helm-install.html)
+to install Istio with the option sidecar-injector.enabled set to true. E.g.
-```bash
-kubectl apply -f install/kubernetes/istio-sidecar-injector-with-ca-bundle.yaml
+```command
+$ helm install --namespace=istio-system --set sidecar-injector.enabled=true install/kubernetes/helm/istio
```
-The sidecar injector webhook should now be running.
+Alternatively, you can also use Helm to generate the yaml file and install it manually. E.g.
-```bash
-kubectl -n istio-system get deployment -listio=sidecar-injector
+```command
+$ helm template --namespace=istio-system --set sidecar-injector.enabled=true install/kubernetes/helm/istio > istio.yaml
+$ kubectl create ns istio-system
+$ kubectl apply -f istio.yaml
```
-```
-NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
-istio-sidecar-injector 1 1 1 1 1d
-```
-
-NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector (see https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors). The default webhook configuration uses `istio-injection=enabled`.
-View namespaces showing `istio-injection` label and verify the `default` namespace is not labeled.
-
-```bash
-kubectl get namespace -L istio-injection
-```
-```
-NAME STATUS AGE ISTIO-INJECTION
-default Active 1h
-istio-system Active 1h
-kube-public Active 1h
-kube-system Active 1h
-```
+In addition, there are some other configuration parameters defined for sidecar
+injector webhook service in `values.yaml`. You can override the default
+values to customize the installation.
#### Deploying an app
Deploy sleep app. Verify both deployment and pod have a single container.
-```bash
-kubectl apply -f samples/sleep/sleep.yaml
-```
-```bash
-kubectl get deployment -o wide
-```
-```
+```command
+$ kubectl apply -f samples/sleep/sleep.yaml
+$ kubectl get deployment -o wide
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
sleep 1 1 1 1 12m sleep tutum/curl app=sleep
```
-```bash
-kubectl get pod
-```
-```
+
+```command
+$ kubectl get pod
NAME READY STATUS RESTARTS AGE
sleep-776b7bcdcd-7hpnk 1/1 Running 0 4
```
Label the `default` namespace with `istio-injection=enabled`
-```bash
-kubectl label namespace default istio-injection=enabled
-```
-```bash
-kubectl get namespace -L istio-injection
-```
-```
+```command
+$ kubectl label namespace default istio-injection=enabled
+$ kubectl get namespace -L istio-injection
NAME STATUS AGE ISTIO-INJECTION
default Active 1h enabled
-istio-system Active 1h
-kube-public Active 1h
-kube-system Active 1h
+istio-system Active 1h
+kube-public Active 1h
+kube-system Active 1h
```
Injection occurs at pod creation time. Kill the running pod and verify a new pod is created with the injected sidecar. The original pod has 1/1 READY containers and the pod with injected sidecar has 2/2 READY containers.
-```bash
-kubectl delete pod sleep-776b7bcdcd-7hpnk
-```
-```bash
-kubectl get pod
-```
-```
+```command
+$ kubectl delete pod sleep-776b7bcdcd-7hpnk
+$ kubectl get pod
NAME READY STATUS RESTARTS AGE
sleep-776b7bcdcd-7hpnk 1/1 Terminating 0 1m
sleep-776b7bcdcd-bhn9m 2/2 Running 0 7s
```
-Disable injection for the `default` namespace and verify new pods are created without the sidecar.
+View detailed state of the injected pod. You should see the injected `istio-proxy` container and corresponding volumes. Be sure to substitute the correct name for the `Running` pod below.
-```bash
-kubectl label namespace default istio-injection-
-```
-```bash
-kubectl delete pod sleep-776b7bcdcd-bhn9m
-```
-```bash
-kubectl get pod
-```
+```command
+$ kubectl describe pod sleep-776b7bcdcd-bhn9m
```
+
+Disable injection for the `default` namespace and verify new pods are created without the sidecar.
+
+```command
+$ kubectl label namespace default istio-injection-
+$ kubectl delete pod sleep-776b7bcdcd-bhn9m
+$ kubectl get pod
NAME READY STATUS RESTARTS AGE
sleep-776b7bcdcd-bhn9m 2/2 Terminating 0 2m
sleep-776b7bcdcd-gmvnr 1/1 Running 0 2s
@@ -357,32 +200,51 @@ sleep-776b7bcdcd-gmvnr 1/1 Running 0 2s
#### Understanding what happened
-[admissionregistration.k8s.io/v1alpha1#MutatingWebhookConfiguration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.9/#mutatingwebhookconfiguration-v1beta1-admissionregistration)
-configures when the webhook is invoked by Kubernetes. The default
-supplied with Istio selects pods in namespaces with label `istio-injection=enabled`.
+[admissionregistration.k8s.io/v1beta1#MutatingWebhookConfiguration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#mutatingwebhookconfiguration-v1beta1-admissionregistration)
+configures when the webhook is invoked by Kubernetes. The default
+supplied with Istio selects pods in namespaces with label `istio-injection=enabled`.
This can be changed by modifying the MutatingWebhookConfiguration in
`install/kubernetes/istio-sidecar-injector-with-ca-bundle.yaml`.
-The `istio-inject` ConfigMap in the `istio-system` namespace the default
+The `istio-sidecar-injector` ConfigMap in the `istio-system` namespace has the default
injection policy and sidecar injection template.
##### _**policy**_
-
+
`disabled` - The sidecar injector will not inject the sidecar into
-pods by default. Add the `sidecar.istio.io/inject` annotation with
+pods by default. Add the `sidecar.istio.io/inject` annotation with
value `true` to the pod template spec to enable injection.
-
+
`enabled` - The sidecar injector will inject the sidecar into pods by
-default. Add the `sidecar.istio.io/inject` annotation with
+default. Add the `sidecar.istio.io/inject` annotation with
value `false` to the pod template spec to disable injection.
-
+
+The following example uses the `sidecar.istio.io/inject` annotation to disable sidecar injection.
+
+```yaml
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: ignored
+spec:
+ template:
+ metadata:
+ annotations:
+ sidecar.istio.io/inject: "false"
+ spec:
+ containers:
+ - name: ignored
+ image: tutum/curl
+ command: ["/bin/sleep","infinity"]
+```
+
##### _**template**_
-
-The sidecar injection template uses [https://golang.org/pkg/text/template](https://golang.org/pkg/text/template) which,
-when parsed and exectuted, is decoded to the following
-struct containing the list of containers and volumes to inject into the pod.
-
-```golang
+
+The sidecar injection template uses [https://golang.org/pkg/text/template](https://golang.org/pkg/text/template) which,
+when parsed and executed, is decoded to the following
+struct containing the list of containers and volumes to inject into the pod.
+
+```go
type SidecarInjectionSpec struct {
InitContainers []v1.Container `yaml:"initContainers"`
Containers []v1.Container `yaml:"containers"`
@@ -390,20 +252,20 @@ type SidecarInjectionSpec struct {
}
```
-The template is applied to the following data structure at runtime.
-
-```golang
+The template is applied to the following data structure at runtime.
+
+```go
type SidecarTemplateData struct {
- ObjectMeta *metav1.ObjectMeta
- Spec *v1.PodSpec
+ ObjectMeta *metav1.ObjectMeta
+ Spec *v1.PodSpec
ProxyConfig *meshconfig.ProxyConfig // Defined by https://istio.io/docs/reference/config/service-mesh.html#proxyconfig
- MeshConfig *meshconfig.MeshConfig // Defined by https://istio.io/docs/reference/config/service-mesh.html#meshconfig
+ MeshConfig *meshconfig.MeshConfig // Defined by https://istio.io/docs/reference/config/service-mesh.html#meshconfig
}
```
-`ObjectMeta` and `Spec` are from the pod. `ProxyConfig` and `MeshConfig`
-are from the `istio` ConfigMap in the `istio-system` namespace. Templates can conditional
-define injected containers and volumes with this data.
+`ObjectMeta` and `Spec` are from the pod. `ProxyConfig` and `MeshConfig`
+are from the `istio` ConfigMap in the `istio-system` namespace. Templates can conditional
+define injected containers and volumes with this data.
For example, the following template snippet from `install/kubernetes/istio-sidecar-injector-configmap-release.yaml`
@@ -428,7 +290,7 @@ containers:
```
{% endraw %}
-expands to
+expands to
```yaml
containers:
@@ -444,13 +306,13 @@ containers:
- --serviceCluster
- sleep
```
-
+
when applied over a pod defined by the pod template spec in [samples/sleep/sleep.yaml](https://raw.githubusercontent.com/istio/istio/master/samples/sleep/sleep.yaml).
-### Uninstalling the webhook
+#### Uninstalling the webhook
-```bash
-kubectl delete -f install/kubernetes/istio-sidecar-injector-with-ca-bundle.yaml
+```command
+$ kubectl delete -f install/kubernetes/istio-sidecar-injector-with-ca-bundle.yaml
```
The above command will not remove the injected sidecars from
@@ -459,8 +321,8 @@ the deployment to create them is required.
Optionally, if may be also be desirable to clean-up other resources that were created in this task. This includes the secret holding the cert/key and CSR used to sign them, as well as any namespace that was labeled for injection.
-```bash
-kubectl -n istio-system delete secret sidecar-injector-certs
-kubectl delete csr istio-sidecar-injector.istio-system
-kubectl label namespace default istio-injection-
+```command
+$ kubectl -n istio-system delete secret sidecar-injector-certs
+$ kubectl delete csr istio-sidecar-injector.istio-system
+$ kubectl label namespace default istio-injection-
```
diff --git a/_docs/setup/kubernetes/upgrading-istio.md b/_docs/setup/kubernetes/upgrading-istio.md
new file mode 100644
index 0000000000000..6cd1ea9ff1a06
--- /dev/null
+++ b/_docs/setup/kubernetes/upgrading-istio.md
@@ -0,0 +1,77 @@
+---
+title: Upgrading Istio
+description: This guide demonstrates how to upgrade the Istio control plane and data plane independently.
+
+weight: 70
+
+---
+{% include home.html %}
+
+This guide demonstrates how to upgrade the Istio control plane and data plane
+for the Kubernetes environment.
+
+## Overview
+
+This guide describes how to upgrade an existing Istio deployment (including
+both control plane and sidecar proxy) to a new release of Istio. The upgrade
+process could involve new binaries as well as other changes like configuration
+and API schemas. The upgrade process may involve some service downtime.
+
+## Application setup
+
+In the following steps, we assume that the Istio components are installed and
+upgraded in the same namespace ISTIO\_NAMESPACE.
+
+## Tasks
+
+### Control plane upgrade
+
+The Istio control plane components include: Citadel, Ingress, Pilot, Mixer, and
+Sidecar injector. We can use Kubernetes’ rolling update mechanism to upgrade the
+control plane components. It can be done by simply applying the new version
+yaml file directly, e.g.
+
+```command
+$ kubectl apply -f istio.yaml (or istio-auth.yaml)
+```
+
+> If you have used [Helm](https://istio.io/docs/setup/kubernetes/helm.html)
+to generate a customized Istio deployment, please use the customized yaml files
+generated by Helm instead of the standard installation yamls.
+
+The rolling update process will upgrade all deployments and configmaps to the
+new version. If there is any issue with the new control plane, you can rollback
+the changes either by applying the old version yaml files.
+
+### Sidecar upgrade
+
+After the control plane is upgraded, you will need to re-inject the new version
+of sidecar proxy. There are two cases: Manual injection and Automatic injection.
+
+1. Manual injection:
+
+ If automatic sidecar injection is not enabled, you can upgrade the
+ sidecar manually by running the following command:
+
+ ```command
+ $ kubectl apply -f <(istioctl kube-inject -i $ISTIO_NAMESPACE -f $ORIGINAL_DEPLOYMENT_YAML)
+ ```
+
+ If the sidecar was previously injected with some customized inject config
+ files, you will need to change the version tag in the config files to the new
+ version and reinject the sidecar as follows:
+
+ ```command
+ $ kubectl apply -f <(istioctl kube-inject \
+ --injectConfigFile inject-config.yaml \
+ --filename $ORIGINAL_DEPLOYMENT_YAML)
+ ```
+
+1. Automatic injection:
+
+ If automatic sidecar injection is enabled, you can upgrade the sidecar
+ by doing a rolling update for all the pods, so that the new version of
+ sidecar will be automatically re-injected
+
+ There are some tricks to reload all pods. E.g. There is a [bash script](https://gist.github.com/jmound/ff6fa539385d1a057c82fa9fa739492e)
+ which triggers the rolling update by patching the grace termination period.
diff --git a/_docs/setup/mesos/index.md b/_docs/setup/mesos/index.md
index ecdf36323e486..e8ebb9ddcc35d 100644
--- a/_docs/setup/mesos/index.md
+++ b/_docs/setup/mesos/index.md
@@ -1,11 +1,9 @@
---
title: Mesos
-overview: Instructions for installing the Istio control plane in Apache Mesos.
+description: Instructions for installing the Istio control plane in Apache Mesos.
-order: 50
+weight: 50
-layout: docs
-type: markdown
toc: false
---
diff --git a/_docs/setup/mesos/install.md b/_docs/setup/mesos/install.md
index ea50e3b7538d9..2a1e5af34ffad 100644
--- a/_docs/setup/mesos/install.md
+++ b/_docs/setup/mesos/install.md
@@ -1,11 +1,9 @@
---
title: Installation
-overview: Instructions for installing the Istio control plane in Apache Mesos.
+description: Instructions for installing the Istio control plane in Apache Mesos.
-order: 10
+weight: 10
-layout: docs
-type: markdown
---
{% include home.html %}
diff --git a/_docs/tasks/index.md b/_docs/tasks/index.md
index 3325699459149..29b54b3d2f6fb 100644
--- a/_docs/tasks/index.md
+++ b/_docs/tasks/index.md
@@ -1,11 +1,9 @@
---
title: Tasks
-overview: Tasks show you how to do a single specific targeted activity with the Istio system.
+description: Tasks show you how to do a single specific targeted activity with the Istio system.
-order: 20
+weight: 20
-layout: docs
-type: markdown
toc: false
---
diff --git a/_docs/tasks/policy-enforcement/index.md b/_docs/tasks/policy-enforcement/index.md
index 1559ef3736b20..74222de2f1af1 100644
--- a/_docs/tasks/policy-enforcement/index.md
+++ b/_docs/tasks/policy-enforcement/index.md
@@ -1,11 +1,9 @@
---
title: Policy Enforcement
-overview: Describes tasks that demonstrate policy enforcement features.
+description: Describes tasks that demonstrate policy enforcement features.
-order: 20
+weight: 20
-layout: docs
-type: markdown
toc: false
---
diff --git a/_docs/tasks/policy-enforcement/rate-limiting.md b/_docs/tasks/policy-enforcement/rate-limiting.md
index dd8266e57d372..bc3c797c967a9 100644
--- a/_docs/tasks/policy-enforcement/rate-limiting.md
+++ b/_docs/tasks/policy-enforcement/rate-limiting.md
@@ -1,11 +1,10 @@
---
title: Enabling Rate Limits
-overview: This task shows you how to use Istio to dynamically limit the traffic to a service.
-
-order: 10
+description: This task shows you how to use Istio to dynamically limit the traffic to a service.
-layout: docs
-type: markdown
+weight: 10
+
+redirect_from: /docs/tasks/rate-limiting.html
---
{% include home.html %}
@@ -21,31 +20,31 @@ This task shows you how to use Istio to dynamically limit the traffic to a servi
* Initialize the application version routing to direct `reviews` service requests from
test user "jason" to version v2 and requests from any other user to v3.
- ```bash
- istioctl create -f samples/bookinfo/kube/route-rule-reviews-test-v2.yaml
- istioctl create -f samples/bookinfo/kube/route-rule-reviews-v3.yaml
+ ```command
+ $ istioctl create -f samples/bookinfo/kube/route-rule-reviews-test-v2.yaml
+ $ istioctl create -f samples/bookinfo/kube/route-rule-reviews-v3.yaml
```
-
- > Note: if you have conflicting rule that you set in previous tasks,
- use `istioctl replace` instead of `istioctl create`.
+
+> If you have conflicting rule that you set in previous tasks,
+use `istioctl replace` instead of `istioctl create`.
## Rate limits
Istio enables users to rate limit traffic to a service.
-
+
Consider `ratings` as an external paid service like Rotten Tomatoes® with `1qps` free quota.
-Using Istio we can ensure that `1qps` is not breached.
+Using Istio we can ensure that `1qps` is not breached.
1. Point your browser at the Bookinfo `productpage` (http://$GATEWAY_URL/productpage).
If you log in as user "jason", you should see black ratings stars with each review,
indicating that the `ratings` service is being called by the "v2" version of the `reviews` service.
-
+
If you log in as any other user (or logout) you should see red ratings stars with each review,
indicating that the `ratings` service is being called by the "v3" version of the `reviews` service.
-1. Configure a `memquota` adapter with rate limits.
-
+1. Configure a `memquota` adapter with rate limits.
+
Save the following YAML snippet as `ratelimit-handler.yaml`.
```yaml
@@ -75,14 +74,14 @@ Using Istio we can ensure that `1qps` is not breached.
and then run the following command:
- ```bash
- istioctl create -f ratelimit-handler.yaml
+ ```command
+ $ istioctl create -f ratelimit-handler.yaml
```
-
+
This configuration specifies a default 5000 qps rate limit. Traffic reaching the ratings service via
reviews-v2 is subject to a 1qps rate limit. In our example user "jason" is routed via reviews-v2 and is therefore subject
to the 1qps rate limit.
-
+
1. Configure rate limit instance and rule
Create a quota instance named `requestcount` that maps incoming attributes to quota dimensions,
@@ -115,19 +114,19 @@ Using Istio we can ensure that `1qps` is not breached.
Save the configuration as `ratelimit-rule.yaml` and run the following command:
- ```bash
- istioctl create -f ratelimit-rule.yaml
+ ```command
+ $ istioctl create -f ratelimit-rule.yaml
```
1. Generate load on the `productpage` with the following command:
- ```bash
- while true; do curl -s -o /dev/null http://$GATEWAY_URL/productpage; done
+ ```command
+ $ while true; do curl -s -o /dev/null http://$GATEWAY_URL/productpage; done
```
1. Refresh the `productpage` in your browser.
- If you log in as user "jason" while the load generator is running (i.e., generating more than 1 req/s),
+ If you log in as user "jason" while the load generator is running (i.e., generating more than 1 req/s),
the traffic generated by your browser will be rate limited to 1qps.
The reviews-v2 service is unable to access the ratings service and you stop seeing stars.
For all other users the default 5000qps rate limit will apply and you will continue seeing red stars.
@@ -164,31 +163,31 @@ In the preceding examples we saw how Mixer applies rate limits to requests that
Every named quota instance like `requestcount` represents a set of counters.
The set is defined by a Cartesian product of all quota dimensions.
If the number of requests in the last `expiration` duration exceed `maxAmount`, Mixer returns a `RESOURCE_EXHAUSTED`
-message to the proxy. The proxy in turn returns status `HTTP 429` to the caller.
+message to the proxy. The proxy in turn returns status `HTTP 429` to the caller.
-The `memquota` adapter uses a sliding window of sub second resolution to enforce rate limits.
+The `memquota` adapter uses a sliding window of sub second resolution to enforce rate limits.
The `maxAmount` in the adapter configuration sets the default limit for all counters associated with a quota instance.
This default limit applies if a quota override does not match the request. Memquota selects the first override that matches a request.
An override need not specify all quota dimensions. In the ratelimit-handler.yaml example, the `1qps` override is
-selected by matching only three out of four quota dimensions.
+selected by matching only three out of four quota dimensions.
-If you would like the above policies enforced for a given namespace instead of the entire Istio mesh, you can replace all occurrences of istio-system with the given namespace.
+If you would like the above policies enforced for a given namespace instead of the entire Istio mesh, you can replace all occurrences of istio-system with the given namespace.
## Cleanup
* Remove the rate limit configuration:
- ```bash
- istioctl delete -f ratelimit-handler.yaml
- istioctl delete -f ratelimit-rule.yaml
+ ```command
+ $ istioctl delete -f ratelimit-handler.yaml
+ $ istioctl delete -f ratelimit-rule.yaml
```
* Remove the application routing rules:
- ```
- istioctl delete -f samples/bookinfo/kube/route-rule-reviews-test-v2.yaml
- istioctl delete -f samples/bookinfo/kube/route-rule-reviews-v3.yaml
+ ```command
+ $ istioctl delete -f samples/bookinfo/kube/route-rule-reviews-test-v2.yaml
+ $ istioctl delete -f samples/bookinfo/kube/route-rule-reviews-v3.yaml
```
* If you are not planning to explore any follow-on tasks, refer to the
@@ -200,5 +199,3 @@ If you would like the above policies enforced for a given namespace instead of t
* Learn more about [Mixer]({{home}}/docs/concepts/policy-and-control/mixer.html) and [Mixer Config]({{home}}/docs/concepts/policy-and-control/mixer-config.html).
* Discover the full [Attribute Vocabulary]({{home}}/docs/reference/config/mixer/attribute-vocabulary.html).
-
-* Read the reference guide to [Writing Config]({{home}}/docs/reference/writing-config.html).
diff --git a/_docs/tasks/security/authn-policy.md b/_docs/tasks/security/authn-policy.md
new file mode 100644
index 0000000000000..128e460566ff8
--- /dev/null
+++ b/_docs/tasks/security/authn-policy.md
@@ -0,0 +1,265 @@
+---
+title: Basic Authentication Policy
+description: Shows you how to use Istio authentication policy to setup mutual TLS and basic end-user authentication.
+
+weight: 10
+
+---
+{% include home.html %}
+
+Through this task, you will learn how to:
+
+* Use authentication policy to setup mutual TLS.
+
+* Use authentication policy to do end-user authentication.
+
+## Before you begin
+
+* Understand Istio [authentication policy]({{home}}/docs/concepts/security/authn-policy.html) and related [mutual TLS authentication]({{home}}/docs/concepts/security/mutual-tls.html) concepts.
+
+* Know how to verify mTLS setup (recommend to walk through [testing Istio mutual TLS authentication]({{home}}/docs/tasks/security/mutual-tls.html))
+
+* Have a Kubernetes cluster with Istio installed, without mTLS. See [the Istio installation task]({{home}}/docs/setup/kubernetes/quick-start.html) and follow step 5.
+
+* For demo, create two namespaces `foo` and `bar`, and deploy [httpbin](https://github.com/istio/istio/tree/master/samples/httpbin) and [sleep](https://github.com/istio/istio/tree/master/samples/sleep) with sidecar on both of them. Also, run another sleep app without sidecar (to keep it separate, run it in `legacy` namespace)
+
+ ```command
+ $ kubectl create ns foo
+ $ kubectl apply -f <(istioctl kube-inject -f samples/httpbin/httpbin.yaml) -n foo
+ $ kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml) -n foo
+ $ kubectl create ns bar
+ $ kubectl apply -f <(istioctl kube-inject -f samples/httpbin/httpbin.yaml) -n bar
+ $ kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml) -n bar
+ $ kubectl create ns legacy
+ $ kubectl apply -f samples/sleep/sleep.yaml -n legacy
+ ```
+
+* Verifying setup by sending an http request (using curl command) from any sleep pod (among those in namespace `foo`, `bar` or `legacy`) to either `httpbin.foo` or `httpbin.bar`. All requests should success with HTTP code 200.
+
+ For example, here is a command to check `sleep.bar` to `httpbin.foo` reachability:
+
+ ```command
+ $ kubectl exec $(kubectl get pod -l app=sleep -n bar -o jsonpath={.items..metadata.name}) -c sleep -n bar -- curl http://httpbin.foo:8000/ip -s -o /dev/null -w "%{http_code}\n"
+ 200
+ ```
+
+ Conveniently, this one-liner command iterates through all combinations:
+
+ ```command
+ $ for from in "foo" "bar" "legacy"; do for to in "foo" "bar"; do kubectl exec $(kubectl get pod -l app=sleep -n ${from} -o jsonpath={.items..metadata.name}) -c sleep -n ${from} -- curl http://httpbin.${to}:8000/ip -s -o /dev/null -w "sleep.${from} to httpbin.${to}: %{http_code}\n"; done; done
+ sleep.foo to httpbin.foo: 200
+ sleep.foo to httpbin.bar: 200
+ sleep.bar to httpbin.foo: 200
+ sleep.bar to httpbin.bar: 200
+ sleep.legacy to httpbin.foo: 200
+ sleep.legacy to httpbin.bar: 200
+ ```
+
+* Also verify that there are no authentication policy in the system
+
+ ```command
+ $ kubectl get policies.authentication.istio.io -n foo
+ $ kubectl get policies.authentication.istio.io -n bar
+ No resources found.
+ ```
+
+## Enable mTLS for all services in namespace `foo`
+
+Run this command to set namespace-level policy for namespace `foo`.
+
+```bash
+cat <
+$ export TOKEN=
+```
+
+Also, for convenience, expose `httpbin.foo` via ingress (for more details, see [ingress task]({{home}}/docs/tasks/traffic-management/ingress.html)).
+
+```bash
+cat < Note: if you have conflicting rules that you set in previous tasks,
- use `istioctl replace` instead of `istioctl create`.
- > Note: if you are using a namespace other than `default`,
- use `istioctl -n namespace ...` to specify the namespace.
+ > If you have conflicting rules that you set in previous tasks,
+ > use `istioctl replace` instead of `istioctl create`.
+ >
+ > If you are using a namespace other than `default`,
+ > use `istioctl -n namespace ...` to specify the namespace.
-## Access control using _denials_
+## Access control using _denials_
Using Istio you can control access to a service based on any attributes that are available within Mixer.
This simple form of access control is based on conditionally denying requests using Mixer selectors.
@@ -44,33 +42,34 @@ of the `reviews` service. We would like to cut off access to version `v3` of the
If you log in as user "jason", you should see black rating stars with each review,
indicating that the `ratings` service is being called by the "v2" version of the `reviews` service.
-
+
If you log in as any other user (or logout) you should see red rating stars with each review,
indicating that the `ratings` service is being called by the "v3" version of the `reviews` service.
1. Explicitly deny access to version `v3` of the `reviews` service.
Run the following command to set up the deny rule along with a handler and an instance.
- ```bash
- istioctl create -f samples/bookinfo/kube/mixer-rule-deny-label.yaml
- ```
- You can expect to see the output similar to the following:
- ```bash
+
+ ```command
+ $ istioctl create -f samples/bookinfo/kube/mixer-rule-deny-label.yaml
Created config denier/default/denyreviewsv3handler at revision 2882105
Created config checknothing/default/denyreviewsv3request at revision 2882106
Created config rule/default/denyreviewsv3 at revision 2882107
```
+
Notice the following in the `denyreviewsv3` rule:
- ```
+
+ ```plain
match: destination.labels["app"] == "ratings" && source.labels["app"]=="reviews" && source.labels["version"] == "v3"
```
+
It matches requests coming from the service `reviews` with label `v3` to the service `ratings`.
This rule uses the `denier` adapter to deny requests coming from version `v3` of the reviews service.
- The adapter always denies requests with a pre-configured status code and message.
+ The adapter always denies requests with a preconfigured status code and message.
The status code and the message is specified in the [denier]({{home}}/docs/reference/config/adapters/denier.html)
adapter configuration.
-
+
1. Refresh the `productpage` in your browser.
If you are logged out or logged in as any user other than "jason" you will no longer see red ratings stars because
@@ -78,14 +77,15 @@ of the `reviews` service. We would like to cut off access to version `v3` of the
In contrast, if you log in as user "jason" (the `reviews:v2` user) you continue to see
the black ratings stars.
-## Access control using _whitelists_
+## Access control using _whitelists_
Istio also supports attribute-based whitelists and blacklists. The following whitelist configuration is equivalent to the
`denier` configuration in the previous section. The rule effectively rejects requests from version `v3` of the `reviews` service.
1. Remove the denier configuration that you added in the previous section.
- ```bash
- istioctl delete -f samples/bookinfo/kube/mixer-rule-deny-label.yaml
+
+ ```command
+ $ istioctl delete -f samples/bookinfo/kube/mixer-rule-deny-label.yaml
```
1. Verify that when you access the Bookinfo `productpage` (http://$GATEWAY_URL/productpage) without logging in, you see red stars.
@@ -106,10 +106,11 @@ Istio also supports attribute-based whitelists and blacklists. The following whi
overrides: ["v1", "v2"] # overrides provide a static list
blacklist: false
```
- and then run the following command:
- ```bash
- istioctl create -f whitelist-handler.yaml
+ and then run the following command:
+
+ ```command
+ $ istioctl create -f whitelist-handler.yaml
```
1. Extract the version label by creating an instance of the [`listentry`]({{home}}/docs/reference/config/template/listentry.html) template.
@@ -123,16 +124,16 @@ Save the following YAML snippet as `appversion-instance.yaml`:
spec:
value: source.labels["version"]
```
- and then run the following command:
- ```bash
- istioctl create -f appversion-instance.yaml
+ and then run the following command:
+
+ ```command
+ $ istioctl create -f appversion-instance.yaml
```
1. Enable `whitelist` checking for the ratings service.
Save the following YAML snippet as `checkversion-rule.yaml`:
-
```yaml
apiVersion: config.istio.io/v1alpha2
kind: rule
@@ -145,10 +146,11 @@ Save the following YAML snippet as `checkversion-rule.yaml`:
instances:
- appversion.listentry
```
- and then run the following command:
- ```bash
- istioctl create -f checkversion-rule.yaml
+ and then run the following command:
+
+ ```command
+ $ istioctl create -f checkversion-rule.yaml
```
1. Verify that when you access the Bookinfo `productpage` (http://$GATEWAY_URL/productpage) without logging in, you see **no** stars.
@@ -158,17 +160,17 @@ Verify that after logging in as "jason" you see black stars.
* Remove the mixer configuration:
- ```bash
- istioctl delete -f checkversion-rule.yaml
- istioctl delete -f appversion-instance.yaml
- istioctl delete -f whitelist-handler.yaml
+ ```command
+ $ istioctl delete -f checkversion-rule.yaml
+ $ istioctl delete -f appversion-instance.yaml
+ $ istioctl delete -f whitelist-handler.yaml
```
* Remove the application routing rules:
- ```
- istioctl delete -f samples/bookinfo/kube/route-rule-reviews-test-v2.yaml
- istioctl delete -f samples/bookinfo/kube/route-rule-reviews-v3.yaml
+ ```command
+ $ istioctl delete -f samples/bookinfo/kube/route-rule-reviews-test-v2.yaml
+ $ istioctl delete -f samples/bookinfo/kube/route-rule-reviews-v3.yaml
```
* If you are not planning to explore any follow-on tasks, refer to the
@@ -183,8 +185,6 @@ Verify that after logging in as "jason" you see black stars.
* Discover the full [Attribute Vocabulary]({{home}}/docs/reference/config/mixer/attribute-vocabulary.html).
-* Read the reference guide to [Writing Config]({{home}}/docs/reference/writing-config.html).
-
* Understand the differences between Kubernetes network policies and Istio
- access control policies from this
- [blog]({{home}}/blog/using-network-policy-in-concert-with-istio.html).
+access control policies from this
+[blog]({{home}}/blog/using-network-policy-in-concert-with-istio.html).
diff --git a/_docs/tasks/security/health-check.md b/_docs/tasks/security/health-check.md
new file mode 100644
index 0000000000000..5a4da27638363
--- /dev/null
+++ b/_docs/tasks/security/health-check.md
@@ -0,0 +1,141 @@
+---
+title: Citadel health checking
+description: Shows how to enable Citadel health checking with Kubernetes.
+
+weight: 70
+
+---
+{% include home.html %}
+
+This task shows how to enable Kubernetes health checking for Citadel. Note this is an Alpha feature.
+
+Since Istio 0.6, Citadel has a health checking feature that can be optionally enabled.
+By default, the normal Istio deployment process does not enable this feature.
+Currently, the health checking feature is able to detect the failures of Citadel CSR signing service,
+by periodically sending CSRs to the API. More health checking features are coming shortly.
+
+Citadel contains a _prober client_ module that periodically checks Citadel's status (currently only the health
+status of the gRPC server).
+If Citadel is healthy, the _prober client_ updates the _modification time_ of the _health status file_
+(the file is always empty). Otherwise, it does nothing. Citadel relies on a
+[K8s liveness and readiness probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/)
+with command line to check the _modification time_ of the _health status file_ on the pod.
+If the file is not updated for a period, the probe will be triggered and Kubelet will restart the Citadel container.
+
+Note: because Citadel health checking currently only monitors the health status of CSR service API,
+this feature is not needed if the production setup is not using the
+[Istio Mesh Expansion]({{home}}/docs/setup/kubernetes/mesh-expansion.html) (which requires the CSR service API).
+
+## Before you begin
+
+* Set up Istio by following the instructions in the
+ [quick start]({{home}}/docs/setup/kubernetes/quick-start.html).
+ Note that authentication should be enabled at step 5 in the
+ [installation steps]({{home}}/docs/setup/kubernetes/quick-start.html#installation-steps).
+
+## Deploying Citadel with health checking
+
+Deploy Citadel with health checking enabled.
+
+```command
+$ kubectl apply -f install/kubernetes/istio-citadel-with-health-check.yaml
+```
+
+Deploy the `istio-citadel` service so that the CSR service can be found by the health checker.
+
+```bash
+cat <Welcome to nginx!
+...
+```
+
+You can actually combine the above three command into one:
+
+```command
+$ kubectl exec $(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name}) -c istio-proxy -- curl https://my-nginx -k
+...
+
Welcome to nginx!
+...
+```
+
+### Create an HTTPS service with the Istio sidecar and mTLS disabled
+
+In "Before you begin" section, the Istio control plane is deployed with mTLS
+disabled. So you only need to redeploy the NGINX HTTPS service with sidecar.
+
+Delete the HTTPS service.
+
+```command
+$ kubectl delete -f nginx-app.yaml
+```
+
+Deploy it with a sidecar
+
+```command
+$ kubectl apply -f <(bin/istioctl kube-inject --debug -f samples/https/nginx-app.yaml)
+```
+
+Make sure the pod is up and running
+
+```command
+$ kubectl get pod
+NAME READY STATUS RESTARTS AGE
+my-nginx-6svcc 2/2 Running 0 1h
+sleep-847544bbfc-d27jg 2/2 Running 0 18h
+```
+
+And run
+
+```command
+$ kubectl exec sleep-847544bbfc-d27jg -c sleep -- curl https://my-nginx -k
+...
+
Welcome to nginx!
+...
+```
+
+If you run from istio-proxy container, it should work as well
+
+```command
+$ kubectl exec sleep-847544bbfc-d27jg -c istio-proxy -- curl https://my-nginx -k
+...
+
Welcome to nginx!
+...
+```
+
+> This example is borrowed from [kubernetes examples](https://github.com/kubernetes/examples/blob/master/staging/https-nginx/README.md).
+
+### Create an HTTPS service with Istio sidecar with mTLS enabled
+
+You need to deploy Istio control plane with mTLS enabled. If you have istio
+control plane with mTLS disabled installed, please delete it:
+
+```command
+$ kubectl delete -f install/kubernetes/istio.yaml
+```
+
+And wait for everything is down, i.e., there is no pod in control plane namespace (istio-system).
+
+```command
+$ kubectl get pod -n istio-system
+No resources found.
+```
+
+Then deploy the Istio control plane with mTLS enabled:
+
+```command
+$ kubectl apply -f install/kubernetes/istio-auth.yaml
+```
+
+Make sure everything is up and running:
+
+```command
+$ kubectl get po -n istio-system
+NAME READY STATUS RESTARTS AGE
+istio-citadel-58c5856966-k6nm4 1/1 Running 0 2m
+istio-ingress-5789d889bc-xzdg2 1/1 Running 0 2m
+istio-mixer-65c55bc5bf-8n95w 3/3 Running 0 2m
+istio-pilot-6954dcd96d-phh5z 2/2 Running 0 2m
+```
+
+Then redeploy the HTTPS service and sleep service
+
+```command
+$ kubectl delete -f <(bin/istioctl kube-inject --debug -f samples/sleep/sleep.yaml)
+$ kubectl apply -f <(bin/istioctl kube-inject --debug -f samples/sleep/sleep.yaml)
+$ kubectl delete -f <(bin/istioctl kube-inject --debug -f samples/https/nginx-app.yaml)
+$ kubectl apply -f <(bin/istioctl kube-inject --debug -f samples/https/nginx-app.yaml)
+```
+
+Make sure the pod is up and running
+
+```command
+$ kubectl get pod
+NAME READY STATUS RESTARTS AGE
+my-nginx-9dvet 2/2 Running 0 1h
+sleep-77f457bfdd-hdknx 2/2 Running 0 18h
+```
+
+And run
+
+```command
+$ kubectl exec sleep-77f457bfdd-hdknx -c sleep -- curl https://my-nginx -k
+...
+
Welcome to nginx!
+...
+```
+
+The reason is that for the workflow "sleep -> sleep-proxy -> nginx-proxy -> nginx",
+the whole flow is L7 traffic, and there is a L4 mTLS encryption between sleep-proxy
+and nginx-proxy. In this case, everything works fine.
+
+However, if you run this command from istio-proxy container, it will not work.
+
+```command
+$ kubectl exec sleep-77f457bfdd-hdknx -c istio-proxy -- curl https://my-nginx -k
+curl: (35) gnutls_handshake() failed: Handshake failed
+command terminated with exit code 35
+```
+
+The reason is that for the workflow "sleep-proxy -> nginx-proxy -> nginx",
+nginx-proxy is expected mTLS traffic from sleep-proxy. In the command above,
+sleep-proxy does not provide client cert. As a result, it won't work. Moreover,
+even sleep-proxy provides client cert in above command, it won't work either
+since the traffic will be downgraded to http from nginx-proxy to nginx.
diff --git a/_docs/tasks/security/index.md b/_docs/tasks/security/index.md
index 9ab74e597d65d..74cdd67fb7abc 100644
--- a/_docs/tasks/security/index.md
+++ b/_docs/tasks/security/index.md
@@ -1,12 +1,11 @@
---
title: Security
-overview: Describes tasks that help securing the service mesh traffic.
+description: Describes tasks that help securing the service mesh traffic.
-order: 40
+weight: 40
-layout: docs
-type: markdown
toc: false
+redirect_from: /docs/tasks/istio-auth.html
---
{% include section-index.html docs=site.docs %}
diff --git a/_docs/tasks/security/mutual-tls.md b/_docs/tasks/security/mutual-tls.md
index e26ef99431356..25003e75cac1e 100644
--- a/_docs/tasks/security/mutual-tls.md
+++ b/_docs/tasks/security/mutual-tls.md
@@ -1,11 +1,9 @@
---
-title: Testing Istio mutual TLS authentication
-overview: This task shows you how to verify and test Istio's automatic mutual TLS authentication.
+title: Testing mutual TLS
+description: Shows you how to verify and test Istio's automatic mutual TLS authentication.
-order: 10
+weight: 10
-layout: docs
-type: markdown
---
{% include home.html %}
@@ -24,32 +22,31 @@ This task assumes you have a Kubernetes cluster:
Note to choose "enable Istio mutual TLS Authentication feature" at step 5 in
"[Installation steps]({{home}}/docs/setup/kubernetes/quick-start.html#installation-steps)".
+> Starting with Istio 0.7, you can use [authentication policy]({{home}}/docs/concepts/security/authn-policy.html) to config mTLS for all/selected services in a namespace (repeated for all namespaces to get global setting). See [authentication policy task]({{home}}/docs/tasks/security/authn-policy.html)
+
## Verifying Istio's mutual TLS authentication setup
The following commands assume the services are deployed in the default namespace.
Use the parameter *-n yournamespace* to specify a namespace other than the default one.
-### Verifying Istio CA
-
-Verify the cluster-level CA is running:
+### Verifying Citadel
-```bash
-kubectl get deploy -l istio=istio-ca -n istio-system
-```
+Verify the cluster-level Citadel is running:
-```bash
-NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
-istio-ca 1 1 1 1 1m
+```command
+$ kubectl get deploy -l istio=istio-citadel -n istio-system
+NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
+istio-citadel 1 1 1 1 1m
```
-Istio CA is up if the "AVAILABLE" column is 1.
+Citadel is up if the "AVAILABLE" column is 1.
### Verifying service configuration
1. Verify AuthPolicy setting in ConfigMap.
- ```bash
- kubectl get configmap istio -o yaml -n istio-system | grep authPolicy | head -1
+ ```command
+ $ kubectl get configmap istio -o yaml -n istio-system | grep authPolicy | head -1
```
Istio mutual TLS authentication is enabled if the line `authPolicy: MUTUAL_TLS` is uncommented (doesn't have a `#`).
@@ -57,44 +54,56 @@ Istio CA is up if the "AVAILABLE" column is 1.
## Testing the authentication setup
When running Istio with mutual TLS authentication turned on, you can use curl in one service's
-envoy to send request to other services.
+Envoy to send request to other services.
For example, after starting the [Bookinfo]({{home}}/docs/guides/bookinfo.html)
-sample application you can ssh into the envoy container of `productpage` service,
-and send request to other services by curl.
+sample application you can ssh into the Envoy container of `productpage` service,
+and send request to other services by curl.
There are several steps:
-
+
1. get the productpage pod name
- ```bash
- kubectl get pods -l app=productpage
- ```
- ```bash
+ ```command
+ $ kubectl get pods -l app=productpage
NAME READY STATUS RESTARTS AGE
productpage-v1-4184313719-5mxjc 2/2 Running 0 23h
```
Make sure the pod is "Running".
-1. ssh into the envoy container
- ```bash
- kubectl exec -it productpage-v1-4184313719-5mxjc -c istio-proxy /bin/bash
+1. ssh into the Envoy container
+ ```command
+ $ kubectl exec -it productpage-v1-4184313719-5mxjc -c istio-proxy /bin/bash
```
1. make sure the key/cert is in /etc/certs/ directory
- ```bash
- ls /etc/certs/
- ```
- ```bash
+ ```command
+ $ ls /etc/certs/
cert-chain.pem key.pem root-cert.pem
- ```
-
- Note that cert-chain.pem is envoy's cert that needs to present to the other side. key.pem is envoy's private key paired with cert-chain.pem. root-cert.pem is the root cert to verify the other side's cert. Currently we only have one CA, so all envoys have the same root-cert.pem.
-
-1. send requests to another service, for example, details.
- ```bash
- curl https://details:9080/details/0 -v --key /etc/certs/key.pem --cert /etc/certs/cert-chain.pem --cacert /etc/certs/root-cert.pem -k
```
- ```bash
+
+ > `cert-chain.pem` is Envoy's cert that needs to present to the other side. `key.pem` is Envoy's private key
+ paired with Envoy's cert in `cert-chain.pem`. `root-cert.pem` is the root cert to verify the peer's cert.
+ In this example, we only have one Citadel in a cluster, so all Envoys have the same `root-cert.pem`.
+
+1. make sure 'curl' is installed by
+ ```command
+ $ curl
+ ```
+ If curl is installed, you should see something like
+ ```plain
+ curl: try 'curl --help' or 'curl --manual' for more information
+ ```
+
+ Otherwise run the command below to start over
+ ```command
+ $ kubectl apply -f <(istioctl kube-inject --debug -f samples/bookinfo/kube/bookinfo.yaml)
+ ```
+
+ > Istio proxy image does not have curl installed while the debug image does. The "--debug" flag in above command redeploys the service with debug image.
+
+1. send requests to another service, for example, details.
+ ```command
+ $ curl https://details:9080/details/0 -v --key /etc/certs/key.pem --cert /etc/certs/cert-chain.pem --cacert /etc/certs/root-cert.pem -k
...
error fetching CN from cert:The requested data were not available.
...
@@ -106,17 +115,17 @@ There are several steps:
< x-envoy-upstream-service-time: 2
...
```
-
+
The service name and port are defined [here](https://github.com/istio/istio/blob/master/samples/bookinfo/kube/bookinfo.yaml).
-
-Note that Istio uses [Kubernetes service account](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/)
-as service identity, which offers stronger security than service name
-(refer [here]({{home}}/docs/concepts/security/mutual-tls.html#identity) for more information).
-Thus the certificates used in Istio do not have service name, which is the information that curl needs to verify
-server identity. As a result, we use curl option '-k' to prevent the curl client from aborting when failing to
-find and verify the server name (i.e., productpage.ns.svc.cluster.local) in the certificate provided by the server.
-
-Please check secure naming [here]({{home}}/docs/concepts/security/mutual-tls.html#workflow) for more information
+
+Note that Istio uses [Kubernetes service accounts](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/)
+as service identity, which offers stronger security than service name
+(refer [here]({{home}}/docs/concepts/security/mutual-tls.html#identity) for more information).
+Thus the certificates used in Istio do not have service names, which is the information that `curl` needs to verify
+server identity. As a result, we use `curl` option `-k` to prevent the `curl` client from aborting when failing to
+find and verify the server name (i.e., productpage.ns.svc.cluster.local) in the certificate provided by the server.
+
+Please check [secure naming]({{home}}/docs/concepts/security/mutual-tls.html#workflow) for more information
about how the client verifies the server's identity in Istio.
What we are demonstrating and verifying above is that the server accepts the connection from the client. Try not giving the client `--key` and `--cert` and observe you are not allowed to connect and you do not get an HTTP 200.
diff --git a/_docs/tasks/security/per-service-mtls.md b/_docs/tasks/security/per-service-mtls.md
index f8c86e7851002..91424a2fffbf7 100644
--- a/_docs/tasks/security/per-service-mtls.md
+++ b/_docs/tasks/security/per-service-mtls.md
@@ -1,24 +1,24 @@
---
-title: Per-service mutual TLS authentication enablement
-overview: This task shows how to change mutual TLS authentication for a single service.
+title: Per-service mutual TLS authentication control
+description: Shows how to change mutual TLS authentication for a single service.
-order: 40
+weight: 50
-layout: docs
-type: markdown
---
{% include home.html %}
+> This feature will soon be deprecated. If you are using Istio 0.7 or later, please refer to [authentication policy task]({{home}}/docs/tasks/security/authn-policy.html) for the recommended approach moving forward.
+
In the [Installation guide]({{home}}/docs/setup/kubernetes/quick-start.html#installation-steps), we show how to enable [mutual TLS authentication]({{home}}/docs/concepts/security/mutual-tls.html) between sidecars. The settings will be applied to all sidecars in the mesh.
-In this tutorial, you will learn:
+In this task, you will learn:
* Annotate Kubernetes service to disable (or enable) mutual TLS authentication for a selective service(s).
* Modify Istio mesh config to exclude mutual TLS authentication for control services.
## Before you begin
-* Understand Isio [mutual TLS authentication]({{home}}/docs/concepts/security/mutual-tls.html) concepts.
+* Understand Istio [mutual TLS authentication]({{home}}/docs/concepts/security/mutual-tls.html) concepts and [authentication policy]({{home}}/docs/concepts/security/authn-policy.html)
* Familiar with [testing Istio mutual TLS authentication]({{home}}/docs/tasks/security/mutual-tls.html).
@@ -26,39 +26,32 @@ In this tutorial, you will learn:
* Start [httpbin demo](https://github.com/istio/istio/tree/master/samples/httpbin) with Istio sidecar. Also, for testing purpose, run two instances of [sleep](https://github.com/istio/istio/tree/master/samples/sleep), one with sidecar and one without (in different namespace). Below are commands to help you start these services.
-```bash
-kubectl apply -f <(istioctl kube-inject -f samples/httpbin/httpbin.yaml)
-kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml)
-
-kubectl create ns legacy && kubectl apply -f samples/sleep/sleep.yaml -n legacy
+```command
+$ kubectl apply -f <(istioctl kube-inject -f samples/httpbin/httpbin.yaml)
+$ kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml)
+$ kubectl create ns legacy && kubectl apply -f samples/sleep/sleep.yaml -n legacy
```
In this initial setup, we expect the sleep instance in default namespace can talk to httpbin service, but the one in legacy namespace cannot, as it doesn't have sidecar to facilitate mTLS.
-```bash
-kubectl exec $(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name}) -c sleep -- curl http://httpbin.default:8000/ip -s
-```
-
-```bash
+```command-output-as-json
+$ kubectl exec $(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name}) -c sleep -- curl http://httpbin.default:8000/ip -s
{
"origin": "127.0.0.1"
}
```
-```bash
-kubectl exec $(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name} -n legacy) -n legacy -- curl http://httpbin.default:8000/ip -s
-```
-
-```bash
+```command
+$ kubectl exec $(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name} -n legacy) -n legacy -- curl http://httpbin.default:8000/ip -s
command terminated with exit code 56
```
-## Disable mutual TLS authentication for "httpbin" service.
+## Disable mutual TLS authentication for httpbin
If we want to disable mTLS only for httpbin (on port 8000), without changing the mesh authentication settings,
we can do that by adding this annotations to the httpbin service definition.
-```bash
+```plain
annotations:
auth.istio.io/8000: NONE
```
@@ -72,7 +65,7 @@ Note:
* Annotations can also be used for a (server) service that *does not have sidecar*, to instruct Istio do not apply mTLS for the client when making a call to that service. In fact, if a system has some services that are not managed by Istio (i.e without sidecar), this is a recommended solution to fix communication problem with those services.
-## Disable mutual TLS authentication for control services.
+## Disable mutual TLS authentication for control services
As we cannot annotate control services, such as API server, in Istio 0.3, we introduced [mtls_excluded_services](https://github.com/istio/api/blob/master/mesh/v1alpha1/config.proto#L200:19) to the mesh configuration to specify the list of services for which mTLS should not be used. If your application needs to communicate to any control service, it's fully-qualified domain name should be listed there.
@@ -80,21 +73,15 @@ In the part of the demo, we will show the impact of this field.
By default (0.3 or later), this list contains `kubernetes.default.svc.cluster.local` (which is the name of the API server service in common setup). You can verify it by running this command:
-```bash
-kubectl get configmap -n istio-system istio -o yaml | grep mtlsExcludedServices
-```
-
-```bash
+```command
+$ kubectl get configmap -n istio-system istio -o yaml | grep mtlsExcludedServices
mtlsExcludedServices: ["kubernetes.default.svc.cluster.local"]
```
It's then expected that request to kubernetes.default service should be possible:
-```bash
-kubectl exec $(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name}) -c sleep -- curl https://kubernetes.default:443/api/ -k -s
-```
-
-```bash
+```command-output-as-json
+$ kubectl exec $(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name}) -c sleep -- curl https://kubernetes.default:443/api/ -k -s
{
"kind": "APIVersions",
"versions": [
@@ -109,18 +96,15 @@ kubectl exec $(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name})
}
```
-Now, run `kubectl edit configmap istio -n istio-system` and clear mtlsExcludedServices and restart pilot after done:
+Now, run `kubectl edit configmap istio -n istio-system` and clear `mtlsExcludedServices` and restart Pilot after done:
-```bash
-kubectl get pod $(kubectl get pod -l istio=pilot -n istio-system -o jsonpath={.items..metadata.name}) -n istio-system -o yaml | kubectl replace --force -f -
+```command
+$ kubectl get pod $(kubectl get pod -l istio=pilot -n istio-system -o jsonpath={.items..metadata.name}) -n istio-system -o yaml | kubectl replace --force -f -
```
The same test request above now fail with code 35, as sleep's sidecar starts using mTLS again:
-```bash
-kubectl exec $(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name}) -c sleep -- curl https://kubernetes.default:443/api/ -k -s
-```
-
-```bash
+```command
+$ kubectl exec $(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name}) -c sleep -- curl https://kubernetes.default:443/api/ -k -s
command terminated with exit code 35
```
diff --git a/_docs/tasks/security/plugin-ca-cert.md b/_docs/tasks/security/plugin-ca-cert.md
index 1b7e4eceb5d53..11d09e354af45 100644
--- a/_docs/tasks/security/plugin-ca-cert.md
+++ b/_docs/tasks/security/plugin-ca-cert.md
@@ -1,69 +1,62 @@
---
-title: Plugging in CA certificate and key
-overview: This task shows how operators can plug existing certificate and key into Istio CA.
+title: Plugging in external CA key and certificate
+description: Shows how operators can configure Citadel with existing root certificate, signing certificate and key.
-order: 40
+weight: 60
-layout: docs
-type: markdown
---
{% include home.html %}
-This task shows how operators can plug existing certificate and key into Istio CA.
+This task shows how operators can configure Citadel with existing root certificate, signing certificate and key.
-By default, the Istio CA generates self-signed CA certificate and key and uses them to sign the workload certificates.
-The Istio CA can also use the operator-specified certificate and key to sign workload certificates.
-This task demonstrates an example to plug certificate and key into the Istio CA.
+By default, Citadel generates self-signed root certificate and key, and uses them to sign the workload certificates.
+Citadel can also use the operator-specified certificate and key to sign workload certificates, with
+operator-specified root certificate. This task demonstrates an example to plug certificates and key into Citadel.
## Before you begin
-* Set up Istio on auth-enabled Kubernetes by following the instructions in the
+* Set up Istio by following the instructions in the
[quick start]({{home}}/docs/setup/kubernetes/quick-start.html).
- Note that authentication should be enabled at step 4 in the
+ Note that authentication should be enabled at step 5 in the
[installation steps]({{home}}/docs/setup/kubernetes/quick-start.html#installation-steps).
## Plugging in the existing certificate and key
-Suppose we want to have Istio CA use the existing certificate `ca-cert.pem` and key `ca-key.pem`.
-Furthermore, the certificate `ca-cert.pem` is signed by the root certificate `root-cert.pem`,
-and we would like to use `root-cert.pem` as the root certificate for Istio workloads.
+Suppose we want to have Citadel use the existing signing (CA) certificate `ca-cert.pem` and key `ca-key.pem`.
+Furthermore, the certificate `ca-cert.pem` is signed by the root certificate `root-cert.pem`.
+We would like to use `root-cert.pem` as the root certificate for Istio workloads.
-In this example, because the Istio CA certificate (`ca-cert.pem`) is not set as the workloads' root certificate (`root-cert.pem`),
-the workload cannot validate the workload certificates directly from the root certificate.
+In the following example,
+Citadel's signing (CA) certificate (`ca-cert.pem`) is different from root certificate (`root-cert.pem`),
+so the workload cannot validate the workload certificates directly from the root certificate.
The workload needs a `cert-chain.pem` file to specify the chain of trust,
which should include the certificates of all the intermediate CAs between the workloads and the root CA.
-In this example, it only contains the Istio CA certificate, so `cert-chain.pem` is the same as `ca-cert.pem`.
-Note that if your `ca-cert.pem` is the same as `root-cert.pem`, you can have an empty `cert-chain.pem` file.
-
- Download the example files:
- ```bash
- rm /tmp/ca-cert.pem /tmp/ca-key.pem /tmp/root-cert.pem /tmp/cert-chain.pem
- wget -P /tmp https://raw.githubusercontent.com/istio/istio/master/security/samples/plugin_ca_certs/ca-cert.pem
- wget -P /tmp https://raw.githubusercontent.com/istio/istio/master/security/samples/plugin_ca_certs/ca-key.pem
- wget -P /tmp https://raw.githubusercontent.com/istio/istio/master/security/samples/plugin_ca_certs/root-cert.pem
- wget -P /tmp https://raw.githubusercontent.com/istio/istio/master/security/samples/plugin_ca_certs/cert-chain.pem
- ```
+In our example, it contains Citadel's signing certificate, so `cert-chain.pem` is the same as `ca-cert.pem`.
+Note that if your `ca-cert.pem` is the same as `root-cert.pem`, the `cert-chain.pem` file should be empty.
+
+These files are ready to use in the `samples/certs/` directory.
-The following steps enable plugging in the certificate and key into the Istio CA:
+The following steps enable plugging in the certificates and key into Citadel:
1. Create a secret `cacert` including all the input files `ca-cert.pem`, `ca-key.pem`, `root-cert.pem` and `cert-chain.pem`:
- ```bash
- kubectl create secret generic cacerts -n istio-system --from-file=/tmp/ca-cert.pem --from-file=/tmp/ca-key.pem \
- --from-file=/tmp/root-cert.pem --from-file=/tmp/cert-chain.pem
+ ```command
+ $ kubectl create secret generic cacerts -n istio-system --from-file=samples/certs/ca-cert.pem \
+ --from-file=samples/certs/ca-key.pem --from-file=samples/certs/root-cert.pem \
+ --from-file=samples/certs/cert-chain.pem
```
-1. Redeploy the Istio CA, which reads the certificates and key from the secret-mount files:
- ```bash
- kubectl apply -f install/kubernetes/istio-ca-plugin-certs.yaml
+1. Redeploy Citadel, which reads the certificates and key from the secret-mount files:
+ ```command
+ $ kubectl apply -f install/kubernetes/istio-citadel-plugin-certs.yaml
```
+ > Note: if you are using different certificate/key file or secret names,
+ you need to change corresponding volume mounts and arguments in `istio-citadel-plugin-certs.yaml`.
1. To make sure the workloads obtain the new certificates promptly,
- delete the secrets generated by Istio CA (named as istio.\*).
- In this example, `istio.default`. The Istio CA will issue new certificates for the workloads.
- ```bash
- kubectl delete secret istio.default
+ delete the secrets generated by Citadel (named as istio.\*).
+ In this example, `istio.default`. Citadel will issue new certificates for the workloads.
+ ```command
+ $ kubectl delete secret istio.default
```
-Note that if you are using different certificate/key file or secret names,
-you need to change corresponding arguments in `istio-ca-plugin-certs.yaml`.
## Verifying the new certificates
@@ -73,59 +66,49 @@ This requires you have `openssl` installed on your machine.
1. Deploy the bookinfo application following the [instructions]({{home}}/docs/guides/bookinfo.html).
1. Retrieve the mounted certificates.
+ In the following, we take the ratings pod as an example, and verify the certificates mounted on the pod.
- Get the pods:
- ```bash
- kubectl get pods
+ Set the pod name to `RATINGSPOD`:
+ ```command
+ $ RATINGSPOD=`kubectl get pods -l app=ratings -o jsonpath='{.items[0].metadata.name}'`
```
- which produces:
- ```bash
- NAME READY STATUS RESTARTS AGE
- details-v1-1520924117-48z17 2/2 Running 0 6m
- productpage-v1-560495357-jk1lz 2/2 Running 0 6m
- ratings-v1-734492171-rnr5l 2/2 Running 0 6m
- reviews-v1-874083890-f0qf0 2/2 Running 0 6m
- reviews-v2-1343845940-b34q5 2/2 Running 0 6m
- reviews-v3-1813607990-8ch52 2/2 Running 0 6m
- ```
-
- In the following, we take the pod `ratings-v1-734492171-rnr5l` as an example, and verify the mounted certificates.
Run the following commands to retrieve the certificates mounted on the proxy:
- ```bash
- kubectl exec -it ratings-v1-734492171-rnr5l -c istio-proxy -- /bin/cat /etc/certs/root-cert.pem > /tmp/pod-root-cert.pem
+ ```command
+ $ kubectl exec -it $RATINGSPOD -c istio-proxy -- /bin/cat /etc/certs/root-cert.pem > /tmp/pod-root-cert.pem
```
- The file `/tmp/pod-root-cert.pem` should contain the root certificate specified by the operator.
+ The file `/tmp/pod-root-cert.pem` contains the root certificate propagated to the pod.
- ```bash
- kubectl exec -it ratings-v1-734492171-rnr5l -c istio-proxy -- /bin/cat /etc/certs/cert-chain.pem > /tmp/pod-cert-chain.pem
+ ```command
+ $ kubectl exec -it $RATINGSPOD -c istio-proxy -- /bin/cat /etc/certs/cert-chain.pem > /tmp/pod-cert-chain.pem
```
- The file `/tmp/pod-cert-chain.pem` should contain the workload certificate and the CA certificate.
+ The file `/tmp/pod-cert-chain.pem` contains the workload certificate and the CA certificate propagated to the pod.
1. Verify the root certificate is the same as the one specified by operator:
- ```bash
- openssl x509 -in /tmp/root-cert.pem -text -noout > /tmp/root-cert.crt.txt
+ ```command
+ openssl x509 -in samples/certs/root-cert.pem -text -noout > /tmp/root-cert.crt.txt
openssl x509 -in /tmp/pod-root-cert.pem -text -noout > /tmp/pod-root-cert.crt.txt
diff /tmp/root-cert.crt.txt /tmp/pod-root-cert.crt.txt
```
-
-1. Verify that the CA certificate is the same as the one specified by operator:
- ```bash
- tail /tmp/pod-cert-chain.pem -n 22 > /tmp/pod-cert-chain-ca.pem
- openssl x509 -in /tmp/ca-cert.pem -text -noout > /tmp/ca-cert.crt.txt
- openssl x509 -in /tmp/pod-cert-chain-ca.pem -text -noout > /tmp/pod-cert-chain-ca.crt.txt
- diff /tmp/ca-cert.crt.txt /tmp/pod-cert-chain-ca.crt.txt
+ Expect the output to be empty.
+
+1. Verify the CA certificate is the same as the one specified by operator:
+ ```command
+ $ tail -n 22 /tmp/pod-cert-chain.pem > /tmp/pod-cert-chain-ca.pem
+ $ openssl x509 -in samples/certs/ca-cert.pem -text -noout > /tmp/ca-cert.crt.txt
+ $ openssl x509 -in /tmp/pod-cert-chain-ca.pem -text -noout > /tmp/pod-cert-chain-ca.crt.txt
+ $ diff /tmp/ca-cert.crt.txt /tmp/pod-cert-chain-ca.crt.txt
```
- Expect that the output to be empty.
+ Expect the output to be empty.
1. Verify the certificate chain from the root certificate to the workload certificate:
- ```bash
- head /tmp/pod-cert-chain.pem -n 18 > /tmp/pod-cert-chain-workload.pem
- openssl verify -CAfile <(cat /tmp/ca-cert.pem /tmp/root-cert.pem) /tmp/pod-cert-chain-workload.pem
+ ```command
+ $ head -n 21 /tmp/pod-cert-chain.pem > /tmp/pod-cert-chain-workload.pem
+ $ openssl verify -CAfile <(cat samples/certs/ca-cert.pem samples/certs/root-cert.pem) /tmp/pod-cert-chain-workload.pem
```
Expect the following output:
- ```bash
+ ```command
/tmp/pod-cert-chain-workload.pem: OK
```
@@ -133,16 +116,16 @@ This requires you have `openssl` installed on your machine.
* To remove the secret `cacerts`:
- ```bash
- kubectl delete secret cacerts -n istio-system
+ ```command
+ $ kubectl delete secret cacerts -n istio-system
```
* To remove the Istio components:
- ```bash
- kubectl delete -f install/kubernetes/istio-auth.yaml
+ ```command
+ $ kubectl delete -f install/kubernetes/istio-auth.yaml
```
## What's next
-* Read the [Istio CA arguments](https://github.com/istio/istio/blob/master/security/cmd/istio_ca/main.go).
+* Read more about [Citadel (codename is istio\_ca) arguments]({{home}}/docs/reference/commands/istio_ca.html).
* Read [how the sample certificates and keys are generated](https://github.com/istio/istio/blob/master/security/samples/plugin_ca_certs).
diff --git a/_docs/tasks/security/role-based-access-control.md b/_docs/tasks/security/role-based-access-control.md
index 34d09cf2f0513..f39df187f4ef7 100644
--- a/_docs/tasks/security/role-based-access-control.md
+++ b/_docs/tasks/security/role-based-access-control.md
@@ -1,11 +1,9 @@
---
-title: Setting up Istio Role-Based Access Control
-overview: This task shows how to set up role-based access control for services in Istio mesh.
+title: Role-Based Access Control
+description: Shows how to set up role-based access control for services in Istio mesh.
-order: 30
+weight: 40
-layout: docs
-type: markdown
---
{% include home.html %}
@@ -21,11 +19,11 @@ RBAC from [Istio RBAC concept page]({{home}}/docs/concepts/security/rbac.html).
* Deploy the [Bookinfo]({{home}}/docs/guides/bookinfo.html) sample application.
- *> Note: Some sample configurations we use below are not in the current Istio release yet. So before you continue, you
- need to copy the following configuration files from https://github.com/istio/istio/tree/master/samples/bookinfo/kube to
- "samples/bookinfo/kube" directory under where you installed Istio. The files include `bookinfo-add-serviceaccount.yaml`
- (replace the original one), `istio-rbac-enable.yaml`, `istio-rbac-namespace.yaml`, `istio-rbac-productpage.yaml`,
- `istio-rbac-details-reviews.yaml`, `istio-rbac-ratings.yaml`.*
+> The current Istio release may not have the up-to-date Istio RBAC samples. So before you continue, you
+need to copy the following configuration files from to
+`samples/bookinfo/kube` directory under where you installed Istio, and replace the original ones. The files include
+`bookinfo-add-serviceaccount.yaml`, `istio-rbac-enable.yaml`, `istio-rbac-namespace.yaml`, `istio-rbac-productpage.yaml`,
+`istio-rbac-details-reviews.yaml`, `istio-rbac-ratings.yaml`.
* In this task, we will enable access control based on Service Accounts, which are cryptographically authenticated in the Istio mesh.
In order to give different microservices different access privileges, we will create some service accounts and redeploy Bookinfo
@@ -36,12 +34,8 @@ microservices running under them.
* Create service account `bookinfo-reviews`, and redeploy the services `reviews` (deployments `reviews-v2` and `reviews-v3`)
with the service account.
- ```bash
- kubectl apply -f <(istioctl kube-inject -f samples/bookinfo/kube/bookinfo-add-serviceaccount.yaml)
- ```
-
- You can expect to see the output similar to the following:
- ```bash
+ ```command
+ $ kubectl apply -f <(istioctl kube-inject -f samples/bookinfo/kube/bookinfo-add-serviceaccount.yaml)
serviceaccount "bookinfo-productpage" created
deployment "productpage-v1" configured
serviceaccount "bookinfo-reviews" created
@@ -49,9 +43,7 @@ microservices running under them.
deployment "reviews-v3" configured
```
-
- > Note: if you are using a namespace other than `default`,
- use `istioctl -n namespace ...` to specify the namespace.
+> If you are using a namespace other than `default`, use `istioctl -n namespace ...` to specify the namespace.
Point your browser at the Bookinfo `productpage` (http://$GATEWAY_URL/productpage). You should see:
* "Book Details" section in the lower left part of the page, including type, pages, publisher, etc.
@@ -59,12 +51,18 @@ Point your browser at the Bookinfo `productpage` (http://$GATEWAY_URL/productpag
## Enabling Istio RBAC
-Run the following command to enable Istio RBAC.
+Run the following command to enable Istio RBAC for "default" namespace.
-```bash
-kubectl apply -f samples/bookinfo/kube/istio-rbac-enable.yaml
+> If you are using a namespace other than `default`, edit the file `samples/bookinfo/kube/istio-rbac-enable.yaml`,
+and specify the namespace, say `"your-namespace"`, in the `match` statement in `rule` spec
+`"match: destination.namespace == "your-namespace"`.
+
+```command
+$ istioctl create -f samples/bookinfo/kube/istio-rbac-enable.yaml
```
+> If you have conflicting rules that you set in previous tasks, use `istioctl replace` instead of `istioctl create`.
+
It also defines "requestcontext", which is an instance of the
[authorization template](https://github.com/istio/istio/blob/master/mixer/template/authorization/template.proto).
"requestcontext" defines the input to the RBAC engine at runtime.
@@ -73,7 +71,7 @@ Point your browser at the Bookinfo `productpage` (http://$GATEWAY_URL/productpag
`"PERMISSION_DENIED:handler.rbac.istio-system:RBAC: permission denied."` This is because Istio RBAC is "deny by default",
which means that you need to explicitly define access control policy to grant access to any service.
- > Note: There may be delay due to caching on browser and Istio proxy.
+> There may be delay due to caching on browser and Istio proxy.
## Namespace-level access control
@@ -81,19 +79,21 @@ Using Istio RBAC, you can easily setup namespace-level access control by specify
in a namespace are accessible by services from another namespace.
In our Bookinfo sample, the "productpage", "reviews", "details", "ratings" services are deployed in "default" namespace.
-The Istio components like "ingress" service are deployed in "istio-system" namespace. We can define a policy that all
-services in "default" namespace are accessible by services in the same namespace (i.e., "default" namespace) and
-services in "istio-system" namespace.
+The Istio components like "ingress" service are deployed in "istio-system" namespace. We can define a policy that
+any service in "default" namespace that has "app" label set to one of the values in ["productpage", "details", "reviews", "ratings"]
+is accessible by services in the same namespace (i.e., "default" namespace) and services in "istio-system" namespace.
Run the following command to create a namespace-level access control policy.
-```bash
-kubectl apply -f samples/bookinfo/kube/istio-rbac-namespace.yaml
+```command
+$ istioctl create -f samples/bookinfo/kube/istio-rbac-namespace.yaml
```
The policy does the following:
-* Creates a ServiceRole "service-viewer" which allows read access to any services in "default" namespace.
+* Creates a `ServiceRole` "service-viewer" which allows read access to any service in "default" namespace that has "app" label
+set to one of the values in ["productpage", "details", "reviews", "ratings"]. Note that there is a "constraint" specifying that
+the services must have one of the listed "app" labels.
- ```bash
+ ```yaml
apiVersion: "config.istio.io/v1alpha2"
kind: ServiceRole
metadata:
@@ -103,11 +103,14 @@ The policy does the following:
rules:
- services: ["*"]
methods: ["GET"]
+ constraints:
+ - key: "app"
+ values: ["productpage", "details", "reviews", "ratings"]
```
-* Creates a ServiceRoleBinding that assign the "service-viewer" role to all services in "istio-system" and "default" namespaces.
+* Creates a `ServiceRoleBinding` that assign the "service-viewer" role to all services in "istio-system" and "default" namespaces.
- ```bash
+ ```yaml
apiVersion: "config.istio.io/v1alpha2"
kind: ServiceRoleBinding
metadata:
@@ -124,9 +127,9 @@ The policy does the following:
name: "service-viewer"
```
-You can expect to see the output similar to the following:
+You can expect to see output similar to the following:
-```bash
+```plain
servicerole "service-viewer" created
servicerolebinding "bind-service-viewer" created
```
@@ -134,14 +137,14 @@ servicerolebinding "bind-service-viewer" created
Now if you point your browser at Bookinfo `productpage` (http://$GATEWAY_URL/productpage). You should see "Bookinfo Sample" page,
with "Book Details" section in the lower left part and "Book Reviews" section in the lower right part.
- > Note: There may be delay due to caching on browser and Istio proxy.
+ > There may be delay due to caching on browser and Istio proxy.
### Cleanup namespace-level access control
Remove the following configuration before you proceed to the next task:
-```bash
-kubectl delete -f samples/bookinfo/kube/istio-rbac-namespace.yaml
+```command
+$ istioctl delete -f samples/bookinfo/kube/istio-rbac-namespace.yaml
```
## Service-level access control
@@ -159,14 +162,14 @@ access to the services in Bookinfo sample.
In this step, we will create a policy that allows external requests to view `productpage` service via Ingress.
Run the following command:
-```bash
-kubectl apply -f samples/bookinfo/kube/istio-rbac-productpage.yaml
+```command
+$ istioctl create -f samples/bookinfo/kube/istio-rbac-productpage.yaml
```
The policy does the following:
-* Creates a ServiceRole "productpage-viewer" which allows read access to "productpage" service.
+* Creates a `ServiceRole` "productpage-viewer" which allows read access to "productpage" service.
- ```bash
+ ```yaml
apiVersion: "config.istio.io/v1alpha2"
kind: ServiceRole
metadata:
@@ -178,9 +181,9 @@ The policy does the following:
methods: ["GET"]
```
-* Creates a ServiceRoleBinding "bind-productpager-viewer" which assigns "productpage-viewer" role to services from "istio-system" namespace.
+* Creates a `ServiceRoleBinding` "bind-productpager-viewer" which assigns "productpage-viewer" role to all users/services.
- ```bash
+ ```yaml
apiVersion: "config.istio.io/v1alpha2"
kind: ServiceRoleBinding
metadata:
@@ -188,8 +191,7 @@ The policy does the following:
namespace: default
spec:
subjects:
- - properties:
- namespace: "istio-system"
+ - user: "*"
roleRef:
kind: ServiceRole
name: "productpage-viewer"
@@ -200,7 +202,7 @@ page. But there are errors `"Error fetching product details"` and `"Error fetchi
are expected because we have not granted "productpage" service to access "details" and "reviews" services. We will fix the errors
in the following steps.
- > Note: There may be delay due to caching on browser and Istio proxy.
+ > There may be delay due to caching on browser and Istio proxy.
### Step 2. allowing "productpage" service to access "details" and "reviews" services
@@ -209,17 +211,14 @@ We will create a policy to allow "productpage" service to read "details" and "re
"bookinfo-productpage" service account is the authenticated identify for "productpage" service.
Run the following command:
-```bash
-kubectl apply -f samples/bookinfo/kube/istio-rbac-details-reviews.yaml
+```command
+$ istioctl create -f samples/bookinfo/kube/istio-rbac-details-reviews.yaml
```
The policy does the following:
-* Creates a ServiceRole "details-reviews-viewer" which allows
- * Read access to "details" service, and
- * Read access to "reviews" services at versions "v2" and "v3". Note that there is a "constraint" specifying that "version" must be
- "v2" or "v3".
+* Creates a `ServiceRole` "details-reviews-viewer" which allows read access to "details" and "reviews" services.
- ```bash
+ ```yaml
apiVersion: "config.istio.io/v1alpha2"
kind: ServiceRole
metadata:
@@ -227,19 +226,14 @@ The policy does the following:
namespace: default
spec:
rules:
- - services: ["details.default.svc.cluster.local"]
- methods: ["GET"]
- - services: ["reviews.default.svc.cluster.local"]
+ - services: ["details.default.svc.cluster.local", "reviews.default.svc.cluster.local"]
methods: ["GET"]
- constraints:
- - key: "version"
- values: ["v2", "v3"]
```
-* Creates a ServiceRoleBinding "bind-details-reviews" which assigns "details-reviews-viewer" role to service
+* Creates a `ServiceRoleBinding` "bind-details-reviews" which assigns "details-reviews-viewer" role to service
account "cluster.local/ns/default/sa/bookinfo-productpage" (representing the "productpage" service).
- ```bash
+ ```yaml
apiVersion: "config.istio.io/v1alpha2"
kind: ServiceRoleBinding
metadata:
@@ -255,30 +249,11 @@ account "cluster.local/ns/default/sa/bookinfo-productpage" (representing the "pr
Point your browser at the Bookinfo `productpage` (http://$GATEWAY_URL/productpage). Now you should see "Bookinfo Sample"
page with "Book Details" on the lower left part, and "Book Reviews" on the lower right part. However, in "Book Reviews" section,
-you see one of the following two errors:
-1. `"Error featching product reviews"`. This is because "productpage" service is only allowed to access "reviews" service with versions
-"v2" or "v3". The error occurs when "productpage" service is routed to "reviews" service at version "v1".
-2. "Book Reviews" section is shown on the lower right part of the page. But there is an error `"Ratings service currently unavailable"`. This
-is because "reviews" service does not have permission to access "ratings" service.
+there is an error `"Ratings service currently unavailable"`. This is because "reviews" service does not have permission to access
+"ratings" service. To fix this issue, you need to grant "reviews" service read access to "ratings" service.
+We will show how to do that in the next step.
- > Note: There may be delay due to caching on browser and Istio proxy.
-
-To fix the first error, you need to remove the "version" constraint, so that the "details-reviews-viewer" role look like the following:
-```bash
-apiVersion: "config.istio.io/v1alpha2"
-kind: ServiceRole
-metadata:
- name: details-reviews-viewer
- namespace: default
-spec:
- rules:
- - services: ["details.default.svc.cluster.local"]
- methods: ["GET"]
- - services: ["reviews.default.svc.cluster.local"]
- methods: ["GET"]
-```
-
-To fix the second issue, you need to grant "reviews" service read access to "ratings" service. We will show how to do that in the next step.
+> There may be delay due to caching on browser and Istio proxy.
### Step 3. allowing "reviews" service to access "ratings" service
@@ -288,14 +263,15 @@ We will create a policy to allow "reviews" service to read "ratings" service. No
Run the following command to create a policy that allows "reviews" service to read "ratings" service.
-```bash
-kubectl apply -f samples/bookinfo/kube/istio-rbac-ratings.yaml
+```command
+$ istioctl create -f samples/bookinfo/kube/istio-rbac-ratings.yaml
```
The policy does the following:
-* Creates a ServiceRole "ratings-viewer" which allows read access to "ratings" service.
- ```bash
+* Creates a `ServiceRole` "ratings-viewer" which allows read access to "ratings" service.
+
+ ```yaml
apiVersion: "config.istio.io/v1alpha2"
kind: ServiceRole
metadata:
@@ -307,10 +283,10 @@ The policy does the following:
methods: ["GET"]
```
-* Creates a ServiceRoleBinding "bind-ratings" which assigns "ratings-viewer" role to service
+* Creates a `ServiceRoleBinding` "bind-ratings" which assigns "ratings-viewer" role to service
account "cluster.local/ns/default/sa/bookinfo-reviews", which represents the "reviews" services.
- ```bash
+ ```yaml
apiVersion: "config.istio.io/v1alpha2"
kind: ServiceRoleBinding
metadata:
@@ -327,12 +303,12 @@ account "cluster.local/ns/default/sa/bookinfo-reviews", which represents the "re
Point your browser at the Bookinfo `productpage` (http://$GATEWAY_URL/productpage). Now you should see
the "black" and "red" ratings in "Book Reviews" section.
- > Note: There may be delay due to caching on browser and Istio proxy.
+ > There may be delay due to caching on browser and Istio proxy.
If you would like to only see "red" ratings in "Book Reviews" section, you can do that by specifying that only "reviews"
service at version "v3" can access "ratings" service.
-```bash
+```yaml
apiVersion: "config.istio.io/v1alpha2"
kind: ServiceRoleBinding
metadata:
@@ -352,25 +328,25 @@ spec:
* Remove Istio RBAC policy configuration:
- ```bash
- kubectl delete -f samples/bookinfo/kube/istio-rbac-ratings.yaml
- kubectl delete -f samples/bookinfo/kube/istio-rbac-details-reviews.yaml
- kubectl delete -f samples/bookinfo/kube/istio-rbac-productpage.yaml
+ ```command
+ $ istioctl delete -f samples/bookinfo/kube/istio-rbac-ratings.yaml
+ $ istioctl delete -f samples/bookinfo/kube/istio-rbac-details-reviews.yaml
+ $ istioctl delete -f samples/bookinfo/kube/istio-rbac-productpage.yaml
```
- Alternatively, you can delete all ServiceRole and ServiceRoleBinding objects by running the following commands:
+ Alternatively, you can delete all `ServiceRole` and `ServiceRoleBinding` resources by running the following commands:
- ```bash
- kubectl delete servicerole --all
- kubectl delete servicerolebinding --all
+ ```command
+ $ kubectl delete servicerole --all
+ $ kubectl delete servicerolebinding --all
```
* Disable Istio RBAC:
- ```bash
- kubectl delete -f samples/bookinfo/kube/istio-rbac-enable.ymal
+ ```command
+ $ istioctl delete -f samples/bookinfo/kube/istio-rbac-enable.yaml
```
## What's next
-* Learn more about [Istio RBAC]({{home}}/docs/concepts/security/rbac.html).
\ No newline at end of file
+* Learn more about [Istio RBAC]({{home}}/docs/concepts/security/rbac.html).
diff --git a/_docs/tasks/security/secure-access-control.md b/_docs/tasks/security/secure-access-control.md
index 15502904e7188..1e718146200a1 100644
--- a/_docs/tasks/security/secure-access-control.md
+++ b/_docs/tasks/security/secure-access-control.md
@@ -1,11 +1,9 @@
---
-title: Setting up Secure Access Control
-overview: This task shows how to securely control access to a service using service accounts.
+title: Secure Access Control
+description: Shows how to securely control access to a service using service accounts.
-order: 30
+weight: 30
-layout: docs
-type: markdown
---
{% include home.html %}
@@ -29,19 +27,13 @@ For the format of the service account in Istio, please refer to the
* Run the following command to create service account `bookinfo-productpage`,
and redeploy the service `productpage` with the service account.
- ```bash
- kubectl apply -f <(istioctl kube-inject -f samples/bookinfo/kube/bookinfo-add-serviceaccount.yaml)
- ```
-
- You can expect to see the output similar to the following:
- ```bash
+ ```command
+ $ kubectl apply -f <(istioctl kube-inject -f samples/bookinfo/kube/bookinfo-add-serviceaccount.yaml)
serviceaccount "bookinfo-productpage" created
deployment "productpage-v1" configured
```
-
-
- > Note: if you are using a namespace other than `default`,
- use `istioctl -n namespace ...` to specify the namespace.
+> If you are using a namespace other than `default`,
+use `istioctl -n namespace ...` to specify the namespace.
## Access control using _denials_
@@ -57,25 +49,23 @@ the `productpage` service.
1. Explicitly deny the requests from `productpage` to `details`.
Run the following command to set up the deny rule along with a handler and an instance.
- ```bash
- istioctl create -f samples/bookinfo/kube/mixer-rule-deny-serviceaccount.yaml
- ```
- You can expect to see the output similar to the following:
- ```bash
+ ```command
+ $ istioctl create -f samples/bookinfo/kube/mixer-rule-deny-serviceaccount.yaml
Created config denier/default/denyproductpagehandler at revision 2877836
Created config checknothing/default/denyproductpagerequest at revision 2877837
Created config rule/default/denyproductpage at revision 2877838
```
Notice the following in the `denyproductpage` rule:
- ```
+ ```plain
match: destination.labels["app"] == "details" && source.user == "cluster.local/ns/default/sa/bookinfo-productpage"
```
- It matches requests coming from the serivce account
+ It matches requests coming from the service account
"_cluster.local/ns/default/sa/bookinfo-productpage_" on the `details` service.
- > Note: If you are using a namespace other than `default`, replace the `default` with your namespace in the value of `source.user`.
+
+ > If you are using a namespace other than `default`, replace the `default` with your namespace in the value of `source.user`.
This rule uses the `denier` adapter to deny these requests.
- The adapter always denies requests with a pre-configured status code and message.
+ The adapter always denies requests with a preconfigured status code and message.
The status code and message are specified in the [denier]({{home}}/docs/reference/config/adapters/denier.html)
adapter configuration.
@@ -91,8 +81,8 @@ the `productpage` service.
* Remove the mixer configuration:
- ```bash
- istioctl delete -f samples/bookinfo/kube/mixer-rule-deny-serviceaccount.yaml
+ ```command
+ $ istioctl delete -f samples/bookinfo/kube/mixer-rule-deny-serviceaccount.yaml
```
* If you are not planning to explore any follow-on tasks, refer to the
@@ -105,8 +95,6 @@ the `productpage` service.
* Discover the full [Attribute Vocabulary]({{home}}/docs/reference/config/mixer/attribute-vocabulary.html).
-* Read the reference guide to [Writing Config]({{home}}/docs/reference/writing-config.html).
-
* Understand the differences between Kubernetes network policies and Istio
access control policies from this
[blog]({{home}}/blog/using-network-policy-in-concert-with-istio.html).
diff --git a/_docs/tasks/telemetry/distributed-tracing.md b/_docs/tasks/telemetry/distributed-tracing.md
index 3412327ab984e..d1a6d9ec6c6f3 100644
--- a/_docs/tasks/telemetry/distributed-tracing.md
+++ b/_docs/tasks/telemetry/distributed-tracing.md
@@ -1,16 +1,15 @@
---
title: Distributed Tracing
-overview: How to configure the proxies to send tracing requests to Zipkin or Jaeger
+description: How to configure the proxies to send tracing requests to Zipkin or Jaeger
-order: 10
+weight: 10
-layout: docs
-type: markdown
+redirect_from: /docs/tasks/zipkin-tracing.html
---
{% include home.html %}
-This task shows you how Istio-enabled applications
-can be configured to collect trace spans using [Zipkin](http://zipkin.io) or [Jaeger](https://jaeger.readthedocs.io).
+This task shows you how Istio-enabled applications
+can be configured to collect trace spans using [Zipkin](https://zipkin.io) or [Jaeger](https://jaeger.readthedocs.io).
After completing this task, you should understand all of the assumptions about your
application and how to have it participate in tracing, regardless of what
language/framework/platform you use to build your application.
@@ -18,35 +17,35 @@ language/framework/platform you use to build your application.
The [Bookinfo]({{home}}/docs/guides/bookinfo.html) sample is used as the
example application for this task.
-
## Before you begin
* Setup Istio by following the instructions in the [Installation guide]({{home}}/docs/setup/).
If you didn't start the Zipkin or Jaeger addon during installation,
- you can run the following command to start it now:
-
- ```bash
- kubectl apply -f install/kubernetes/addons/zipkin.yaml
+ you can run the following command to start it now.
+
+ For Zipkin:
+
+ ```command
+ $ kubectl apply -f install/kubernetes/addons/zipkin.yaml
```
- for Zipkin, or
- ```bash
- kubectl apply -n istio-system -f https://raw.githubusercontent.com/jaegertracing/jaeger-kubernetes/master/all-in-one/jaeger-all-in-one-template.yml
+ For Jaeger:
+
+ ```command
+ $ kubectl apply -n istio-system -f https://raw.githubusercontent.com/jaegertracing/jaeger-kubernetes/master/all-in-one/jaeger-all-in-one-template.yml
```
- for Jaeger.
* Deploy the [Bookinfo]({{home}}/docs/guides/bookinfo.html) sample application.
-
## Accessing the dashboard
### Zipkin
Setup access to the Zipkin dashboard URL using port-forwarding:
-```bash
-kubectl port-forward -n istio-system $(kubectl get pod -n istio-system -l app=zipkin -o jsonpath='{.items[0].metadata.name}') 9411:9411 &
+```command
+$ kubectl port-forward -n istio-system $(kubectl get pod -n istio-system -l app=zipkin -o jsonpath='{.items[0].metadata.name}') 9411:9411 &
```
Then open your browser at [http://localhost:9411](http://localhost:9411)
@@ -55,13 +54,12 @@ Then open your browser at [http://localhost:9411](http://localhost:9411)
Setup access to the Jaeger dashboard URL using port-forwarding:
-```bash
-kubectl port-forward -n istio-system $(kubectl get pod -n istio-system -l app=jaeger -o jsonpath='{.items[0].metadata.name}') 16686:16686 &
+```command
+$ kubectl port-forward -n istio-system $(kubectl get pod -n istio-system -l app=jaeger -o jsonpath='{.items[0].metadata.name}') 16686:16686 &
```
Then open your browser at [http://localhost:16686](http://localhost:16686)
-
## Generating traces using the Bookinfo sample
With the Bookinfo application up and running, generate trace information by accessing
@@ -69,36 +67,28 @@ With the Bookinfo application up and running, generate trace information by acce
If you now look at the dashboard, you should see something similar to the following:
-{% include figure.html width='100%' ratio='44.28%'
- img='./img/zipkin_dashboard.png'
- alt='Zipkin Dashboard'
- title='Zipkin Dashboard'
- caption='Zipkin Dashboard'
+{% include image.html width="100%" ratio="44.28%"
+ link="./img/zipkin_dashboard.png"
+ caption="Zipkin Dashboard"
%}
-{% include figure.html width='100%' ratio='42.35%'
- img='./img/jaeger_dashboard.png'
- alt='Jaeger Dashboard'
- title='Jaeger Dashboard'
- caption='Jaeger Dashboard'
+{% include image.html width="100%" ratio="42.35%"
+ link="./img/jaeger_dashboard.png"
+ caption="Jaeger Dashboard"
%}
If you click on the top (most recent) trace, you should see the details corresponding to your
latest refresh of the `/productpage`.
The page should look something like this:
-{% include figure.html width='100%' ratio='19.70%'
- img='./img/zipkin_span.png'
- alt='Zipkin Trace View'
- title='Zipkin Trace View'
- caption='Zipkin Trace View'
+{% include image.html width="100%" ratio="19.70%"
+ link="./img/zipkin_span.png"
+ caption="Zipkin Trace View"
%}
-{% include figure.html width='100%' ratio='26.99%'
- img='./img/jaeger_trace.png'
- alt='Jaeger Trace View'
- title='Jaeger Trace View'
- caption='Jaeger Trace View'
+{% include image.html width="100%" ratio="26.99%"
+ link="./img/jaeger_trace.png"
+ caption="Jaeger Trace View"
%}
As you can see, the trace is comprised of spans,
@@ -158,7 +148,7 @@ def getForwardHeaders(request):
```
The reviews application (Java) does something similar:
-
+
```java
@GET
@Path("/reviews")
@@ -175,25 +165,24 @@ public Response bookReviews(@CookieParam("user") Cookie user,
if(ratings_enabled){
JsonObject ratings = getRatings(user, xreq, xtraceid, xspanid, xparentspanid, xsampled, xflags, xotspan);
-```
+```
When you make downstream calls in your applications, make sure to include these headers.
## Cleanup
-
* Remove the addon tracing configuration:
- If you are running with Zipkin, run the followign command to cleanup:
+ If you are running with Zipkin, run the following command to cleanup:
- ```bash
- kubectl delete -f install/kubernetes/addons/zipkin.yaml
+ ```command
+ $ kubectl delete -f install/kubernetes/addons/zipkin.yaml
```
- If you are running with Jaeger, run the followign command to cleanup:
+ If you are running with Jaeger, run the following command to cleanup:
- ```bash
- kubectl delete -f https://raw.githubusercontent.com/jaegertracing/jaeger-kubernetes/master/all-in-one/jaeger-all-in-one-template.yml
+ ```command
+ $ kubectl delete -f https://raw.githubusercontent.com/jaegertracing/jaeger-kubernetes/master/all-in-one/jaeger-all-in-one-template.yml
```
* If you are not planning to explore any follow-on tasks, refer to the
diff --git a/_docs/tasks/telemetry/fluentd.md b/_docs/tasks/telemetry/fluentd.md
index c2e488d76248a..62d3c82fd1af6 100644
--- a/_docs/tasks/telemetry/fluentd.md
+++ b/_docs/tasks/telemetry/fluentd.md
@@ -1,12 +1,10 @@
---
title: Logging with Fluentd
-overview: This task shows you how to configure Istio to log to a Fluentd daemon
+description: This task shows you how to configure Istio to log to a Fluentd daemon
-order: 60
+weight: 60
-layout: docs
-type: markdown
---
{% include home.html %}
@@ -24,6 +22,7 @@ The [Bookinfo]({{home}}/docs/guides/bookinfo.html) sample application is used
as the example application throughout this task.
## Before you begin
+
* [Install Istio]({{home}}/docs/setup/) in your cluster and deploy an
application. This task assumes that Mixer is setup in a default configuration
(`--configDefaultNamespace=istio-system`). If you use a different
@@ -48,7 +47,7 @@ connect to a running Fluentd daemon, you may need to add a
for Fluentd. The Fluentd configuration to listen for forwarded logs
is:
-```
+```xml
type forward
@@ -71,7 +70,7 @@ called `logging`.
Save the following as `logging-stack.yaml`.
-```
+```yaml
# Logging Namespace. All below are a part of this namespace.
apiVersion: v1
kind: Namespace
@@ -279,13 +278,8 @@ spec:
Create the resources:
-```bash
-kubectl apply -f logging-stack.yaml
-```
-
-You should see the following:
-
-```
+```command
+$ kubectl apply -f logging-stack.yaml
namespace "logging" created
service "elasticsearch" created
deployment "elasticsearch" created
@@ -305,7 +299,7 @@ Istio will generate and collect automatically.
Save the following as `fluentd-istio.yaml`:
-```
+```yaml
# Configuration for logentry instances
apiVersion: "config.istio.io/v1alpha2"
kind: logentry
@@ -350,12 +344,8 @@ spec:
Create the resources:
-```bash
-istioctl create -f fluentd-istio.yaml
-```
-
-The expected output is similar to:
-```
+```command
+$ istioctl create -f fluentd-istio.yaml
Created config logentry/istio-system/newlog at revision 22374
Created config fluentd/istio-system/handler at revision 22375
Created config rule/istio-system/newlogtofluentd at revision 22376
@@ -374,18 +364,18 @@ example stack.
sample, visit `http://$GATEWAY_URL/productpage` in your web browser
or issue the following command:
- ```bash
- curl http://$GATEWAY_URL/productpage
+ ```command
+ $ curl http://$GATEWAY_URL/productpage
```
1. In a Kubernetes environment, setup port-forwarding for Kibana by
executing the following command:
- ```bash
- kubectl -n logging port-forward $(kubectl -n logging get pod -l app=kibana -o jsonpath='{.items[0].metadata.name}') 5601:5601
+ ```command
+ $ kubectl -n logging port-forward $(kubectl -n logging get pod -l app=kibana -o jsonpath='{.items[0].metadata.name}') 5601:5601
```
- Leave the command running. Press Ctrl-C to exit when done accessing the Kibana UI.
+ Leave the command running. Press Ctrl-C to exit when done accessing the Kibana UI.
1. Navigate to the [Kibana UI](http://localhost:5601/) and click the "Set up index patterns" in the top right.
@@ -393,20 +383,20 @@ example stack.
1. Select `@timestamp` as the Time Filter field name, and click "Create index pattern."
-1. Now click "Discover" on the left menu, and start exploring the logs generated
+1. Now click "Discover" on the left menu, and start exploring the logs generated
## Cleanup
* Remove the new telemetry configuration:
- ```bash
- istioctl delete -f fluentd-istio.yaml
+ ```command
+ $ istioctl delete -f fluentd-istio.yaml
```
* Remove the example Fluentd, Elasticsearch, Kibana stack:
- ```bash
- kubectl delete -f logging-stack.yaml
+ ```command
+ $ kubectl delete -f logging-stack.yaml
```
* If you are not planning to explore any follow-on tasks, refer to the
@@ -422,5 +412,3 @@ example stack.
and [Mixer Config]({{home}}/docs/concepts/policy-and-control/mixer-config.html).
* Discover the full [Attribute Vocabulary]({{home}}/docs/reference/config/mixer/attribute-vocabulary.html).
-
-* Read the reference guide to [Writing Config]({{home}}/docs/reference/writing-config.html).
diff --git a/_docs/tasks/telemetry/img/istio-tcp-attribute-flow.svg b/_docs/tasks/telemetry/img/istio-tcp-attribute-flow.svg
index f18a68aaafe69..ea5609b96d9d6 100644
--- a/_docs/tasks/telemetry/img/istio-tcp-attribute-flow.svg
+++ b/_docs/tasks/telemetry/img/istio-tcp-attribute-flow.svg
@@ -1,4 +1,5 @@
-
\ No newline at end of file
+M-->P: ReportResponse]]>
+ Created with Raphaël 2.2.0
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ background
+
+
+
+ Layer 1
+
+
+
+ Source
+ Service
+
+
+
+
+ Source
+ Service
+
+
+
+
+
+ Destination
+ Envoy Proxy
+
+
+
+
+ Destination
+ Envoy Proxy
+
+
+
+
+
+ Mixer
+
+
+
+
+ Mixer
+
+
+
+
+
+ Destination
+ Service
+
+
+
+
+ Destination
+ Service
+
+
+
+
+ connect()
+
+
+
+
+
+ Proxy must confirm
+ connection allowed
+
+
+
+ Check(CheckRequest)
+
+
+
+
+
+ Attributes (values):
+ context.protocol ("tcp")
+ connection.id
+ source.ip
+ source.port
+ source.user
+ destination.uid
+ destination.service
+ request.time
+
+
+
+ CheckResponse
+
+
+
+
+
+
+
+
+ accept()
+
+
+
+
+
+ With connection
+ established,
+ start sending data
+
+
+
+ send()
+
+
+
+
+ send()
+
+
+
+
+ recv()
+
+
+
+
+ recv()
+
+
+
+
+
+ ...
+
+
+
+ close()
+
+
+
+
+ close()
+
+
+
+
+ close()
+
+
+
+
+ close()
+
+
+
+
+ Report(ReportRequest)
+
+
+
+
+
+ Attributes (values):
+ connection.duration
+ connection.id
+ connection.received.bytes
+ connection.received.bytes_total
+ connection.sent.bytes
+ connection.sent.bytes_total
+ context.protocol ("tcp")
+ context.time
+ source.ip
+ source.port
+ source.user
+ destination.ip
+ destination.port
+ destination.uid
+ destination.service
+ request.time
+
+
+
+ ReportResponse
+
+
+ accept()
+
+ connect()
+
+
+ Proxy starts
+
+ PReport(ReportRequest)
+
+ Report(ReportRequest)
+
+
+ Report(ReportRequest)
+
+ Periodical Report(ReportRequest)
+
+ Periodical Report(ReportRequest)
+ periodical report
+
+
+
+ ReportResponse
+
+
+ ReportResponse
+
+
+
\ No newline at end of file
diff --git a/_docs/tasks/telemetry/img/servicegraph-example.png b/_docs/tasks/telemetry/img/servicegraph-example.png
index 26636f399643d..b6de9991730dc 100644
Binary files a/_docs/tasks/telemetry/img/servicegraph-example.png and b/_docs/tasks/telemetry/img/servicegraph-example.png differ
diff --git a/_docs/tasks/telemetry/index.md b/_docs/tasks/telemetry/index.md
index 93d7938e00797..2e7f3f70c5a96 100644
--- a/_docs/tasks/telemetry/index.md
+++ b/_docs/tasks/telemetry/index.md
@@ -1,11 +1,9 @@
---
title: Metrics, Logs, and Traces
-overview: Describes tasks that demonstrate how to collect telemetry information from the service mesh.
+description: Describes tasks that demonstrate how to collect telemetry information from the service mesh.
-order: 30
+weight: 30
-layout: docs
-type: markdown
toc: false
---
diff --git a/_docs/tasks/telemetry/metrics-logs.md b/_docs/tasks/telemetry/metrics-logs.md
index 83f9ddd48c322..0663422128ef9 100644
--- a/_docs/tasks/telemetry/metrics-logs.md
+++ b/_docs/tasks/telemetry/metrics-logs.md
@@ -1,12 +1,10 @@
---
title: Collecting Metrics and Logs
-overview: This task shows you how to configure Istio to collect metrics and logs.
+description: This task shows you how to configure Istio to collect metrics and logs.
-order: 20
+weight: 20
-layout: docs
-type: markdown
---
{% include home.html %}
@@ -18,24 +16,19 @@ The [Bookinfo]({{home}}/docs/guides/bookinfo.html) sample application is used
as the example application throughout this task.
## Before you begin
+
* [Install Istio]({{home}}/docs/setup/) in your cluster and deploy an
application. This task assumes that Mixer is setup in a default configuration
(`--configDefaultNamespace=istio-system`). If you use a different
value, update the configuration and commands in this task to match the value.
-* Install the Prometheus add-on. Prometheus
- will be used to verify task success.
- ```bash
- kubectl apply -f install/kubernetes/addons/prometheus.yaml
- ```
- See [Prometheus](https://prometheus.io) for details.
-
## Collecting new telemetry data
1. Create a new YAML file to hold configuration for the new metric and log
stream that Istio will generate and collect automatically.
Save the following as `new_telemetry.yaml`:
+
```yaml
# Configuration for metric instances
apiVersion: "config.istio.io/v1alpha2"
@@ -125,12 +118,8 @@ as the example application throughout this task.
1. Push the new configuration.
- ```bash
- istioctl create -f new_telemetry.yaml
- ```
-
- The expected output is similar to:
- ```
+ ```command
+ $ istioctl create -f new_telemetry.yaml
Created config metric/istio-system/doublerequestcount at revision 1973035
Created config prometheus/istio-system/doublehandler at revision 1973036
Created config rule/istio-system/doubleprom at revision 1973037
@@ -144,8 +133,8 @@ as the example application throughout this task.
For the Bookinfo sample, visit `http://$GATEWAY_URL/productpage` in your web
browser or issue the following command:
- ```bash
- curl http://$GATEWAY_URL/productpage
+ ```command
+ $ curl http://$GATEWAY_URL/productpage
```
1. Verify that the new metric values are being generated and collected.
@@ -153,21 +142,21 @@ as the example application throughout this task.
In a Kubernetes environment, setup port-forwarding for Prometheus by
executing the following command:
- ```bash
- kubectl -n istio-system port-forward $(kubectl -n istio-system get pod -l app=prometheus -o jsonpath='{.items[0].metadata.name}') 9090:9090 &
+ ```command
+ $ kubectl -n istio-system port-forward $(kubectl -n istio-system get pod -l app=prometheus -o jsonpath='{.items[0].metadata.name}') 9090:9090 &
```
View values for the new metric via the [Prometheus UI](http://localhost:9090/graph#%5B%7B%22range_input%22%3A%221h%22%2C%22expr%22%3A%22istio_double_request_count%22%2C%22tab%22%3A1%7D%5D).
-
+
The provided link opens the Prometheus UI and executes a query for values of
the `istio_double_request_count` metric. The table displayed in the
**Console** tab includes entries similar to:
- ```
- istio_double_request_count{destination="details.default.svc.cluster.local",instance="istio-mixer.istio-system:42422",job="istio-mesh",message="twice the fun!",source="productpage.default.svc.cluster.local"} 2
- istio_double_request_count{destination="ingress.istio-system.svc.cluster.local",instance="istio-mixer.istio-system:42422",job="istio-mesh",message="twice the fun!",source="unknown"} 2
- istio_double_request_count{destination="productpage.default.svc.cluster.local",instance="istio-mixer.istio-system:42422",job="istio-mesh",message="twice the fun!",source="ingress.istio-system.svc.cluster.local"} 2
- istio_double_request_count{destination="reviews.default.svc.cluster.local",instance="istio-mixer.istio-system:42422",job="istio-mesh",message="twice the fun!",source="productpage.default.svc.cluster.local"} 2
+ ```plain
+ istio_double_request_count{destination="details.default.svc.cluster.local",instance="istio-mixer.istio-system:42422",job="istio-mesh",message="twice the fun!",source="productpage.default.svc.cluster.local"} 2
+ istio_double_request_count{destination="ingress.istio-system.svc.cluster.local",instance="istio-mixer.istio-system:42422",job="istio-mesh",message="twice the fun!",source="unknown"} 2
+ istio_double_request_count{destination="productpage.default.svc.cluster.local",instance="istio-mixer.istio-system:42422",job="istio-mesh",message="twice the fun!",source="ingress.istio-system.svc.cluster.local"} 2
+ istio_double_request_count{destination="reviews.default.svc.cluster.local",instance="istio-mixer.istio-system:42422",job="istio-mesh",message="twice the fun!",source="productpage.default.svc.cluster.local"} 2
```
For more on querying Prometheus for metric values, see the [Querying Istio
@@ -179,13 +168,8 @@ as the example application throughout this task.
In a Kubernetes environment, search through the logs for the Mixer pod as
follows:
- ```bash
- kubectl -n istio-system logs $(kubectl -n istio-system get pods -l istio=mixer -o jsonpath='{.items[0].metadata.name}') mixer | grep \"instance\":\"newlog.logentry.istio-system\"
- ```
-
- The expected output is similar to:
-
- ```json
+ ```command-output-as-json
+ $ kubectl -n istio-system logs $(kubectl -n istio-system get pods -l istio=mixer -o jsonpath='{.items[0].metadata.name}') mixer | grep \"instance\":\"newlog.logentry.istio-system\"
{"level":"warn","ts":"2017-09-21T04:33:31.249Z","instance":"newlog.logentry.istio-system","destination":"details","latency":"6.848ms","responseCode":200,"responseSize":178,"source":"productpage","user":"unknown"}
{"level":"warn","ts":"2017-09-21T04:33:31.291Z","instance":"newlog.logentry.istio-system","destination":"ratings","latency":"6.753ms","responseCode":200,"responseSize":48,"source":"reviews","user":"unknown"}
{"level":"warn","ts":"2017-09-21T04:33:31.263Z","instance":"newlog.logentry.istio-system","destination":"reviews","latency":"39.848ms","responseCode":200,"responseSize":379,"source":"productpage","user":"unknown"}
@@ -200,10 +184,13 @@ automatically generate and report a new metric and a new log stream for all
traffic within the mesh.
The added configuration controlled three pieces of Mixer functionality:
+
1. Generation of *instances* (in this example, metric values and log entries)
from Istio attributes
+
1. Creation of *handlers* (configured Mixer adapters) capable of processing
generated *instances*
+
1. Dispatch of *instances* to *handlers* according to a set of *rules*
### Understanding the metrics configuration
@@ -241,7 +228,7 @@ translates received metric instances into prometheus-formatted values that can
be processed by a Prometheus backend. This configuration specified a new
Prometheus metric named `double_request_count`. The Prometheus adapter prepends
the `istio_` namespace to all metric names, therefore this metric will show up
-in Promethus as `istio_double_request_count`. The metric has three labels
+in Prometheus as `istio_double_request_count`. The metric has three labels
matching the dimensions configured for `doublerequestcount.metric` instances.
For `kind: prometheus` handlers, Mixer instances are matched to Prometheus
@@ -249,10 +236,10 @@ metrics via the `instance_name` parameter. The `instance_name` values must be
the fully-qualified name for Mixer instances (example:
`doublerequestcount.metric.istio-system`).
-The `kind: rule` stanza of config defines a new *rule* named `doubleprom`. The
+The `kind: rule` stanza of config defines a new *rule* named `doubleprom`. The
rule directs Mixer to send all `doublerequestcount.metric` instances to the
-`doublehandler.prometheus` handler. Because there is no `match` clause in the
-rule, and because the rule is in the configured default configuration namespace
+`doublehandler.prometheus` handler. Because there is no `match` clause in the
+rule, and because the rule is in the configured default configuration namespace
(`istio-system`), the rule is executed for all requests in the mesh.
### Understanding the logs configuration
@@ -303,8 +290,8 @@ here to illustrate how to use `match` expressions to control rule execution.
* Remove the new telemetry configuration:
- ```bash
- istioctl delete -f new_telemetry.yaml
+ ```command
+ $ istioctl delete -f new_telemetry.yaml
```
* If you are not planning to explore any follow-on tasks, refer to the
@@ -320,7 +307,4 @@ here to illustrate how to use `match` expressions to control rule execution.
* Discover the full [Attribute
Vocabulary]({{home}}/docs/reference/config/mixer/attribute-vocabulary.html).
-* Read the reference guide to [Writing
- Config]({{home}}/docs/reference/writing-config.html).
-
* Refer to the [In-Depth Telemetry]({{home}}/docs/guides/telemetry.html) guide.
diff --git a/_docs/tasks/telemetry/querying-metrics.md b/_docs/tasks/telemetry/querying-metrics.md
index 7c8339268e34b..f66c1909f4934 100644
--- a/_docs/tasks/telemetry/querying-metrics.md
+++ b/_docs/tasks/telemetry/querying-metrics.md
@@ -1,47 +1,33 @@
---
title: Querying Metrics from Prometheus
-overview: This task shows you how to query for Istio Metrics using Prometheus.
+description: This task shows you how to query for Istio Metrics using Prometheus.
-order: 30
+weight: 30
-layout: docs
-type: markdown
---
{% include home.html %}
This task shows you how to query for Istio Metrics using Prometheus. As part of
-this task, you will install the Prometheus Istio addon and use the web-based
-interface for querying metric values.
+this task, you will use the web-based interface for querying metric values.
The [Bookinfo]({{home}}/docs/guides/bookinfo.html) sample application is used as
the example application throughout this task.
## Before you begin
-* [Install Istio]({{home}}/docs/setup/) in your cluster and deploy an
- application.
-## Querying Istio Metrics
-
-1. To query the metrics provided by Mixer, first install the Prometheus add-on.
-
- In Kubernetes environments, execute the following command:
+[Install Istio]({{home}}/docs/setup/) in your cluster and deploy an
+application.
- ```bash
- kubectl apply -f install/kubernetes/addons/prometheus.yaml
- ```
+## Querying Istio Metrics
-1. Verify that the service is running in your cluster.
+1. Verify that the prometheus service is running in your cluster (since 0.8 the
+ prometheus setup is included in istio.yaml and istio-auth.yaml by default)
In Kubernetes environments, execute the following command:
- ```bash
- kubectl -n istio-system get svc prometheus
- ```
-
- The output will be similar to:
-
- ```
+ ```command
+ $ kubectl -n istio-system get svc prometheus
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
prometheus 10.59.241.54 9090/TCP 2m
```
@@ -51,35 +37,32 @@ the example application throughout this task.
For the Bookinfo sample, visit `http://$GATEWAY_URL/productpage` in your web
browser or issue the following command:
- ```bash
- curl http://$GATEWAY_URL/productpage
+ ```command
+ $ curl http://$GATEWAY_URL/productpage
```
- Note: `$GATEWAY_URL` is the value set in the
- [Bookinfo]({{home}}/docs/guides/bookinfo.html) guide.
+ > `$GATEWAY_URL` is the value set in the [Bookinfo]({{home}}/docs/guides/bookinfo.html) guide.
1. Open the Prometheus UI.
In Kubernetes environments, execute the following command:
- ```bash
- kubectl -n istio-system port-forward $(kubectl -n istio-system get pod -l app=prometheus -o jsonpath='{.items[0].metadata.name}') 9090:9090 &
+ ```command
+ $ kubectl -n istio-system port-forward $(kubectl -n istio-system get pod -l app=prometheus -o jsonpath='{.items[0].metadata.name}') 9090:9090 &
```
Visit [http://localhost:9090/graph](http://localhost:9090/graph) in your web browser.
1. Execute a Prometheus query.
- In the "Expression" input box at the top of the web page, enter the text:
- `istio_request_count`. Then, click the **Execute** button.
+ In the "Expression" input box at the top of the web page, enter the text:
+ `istio_request_count`. Then, click the **Execute** button.
- The results will be similar to:
+ The results will be similar to:
-{% include figure.html width='100%' ratio='39.36%'
- img='./img/prometheus_query_result.png'
- alt='Prometheus Query Result'
- title='Prometheus Query Result'
- caption='Prometheus Query Result'
+{% include image.html width="100%" ratio="39.36%"
+ link="./img/prometheus_query_result.png"
+ caption="Prometheus Query Result"
%}
Other queries to try:
@@ -95,7 +78,7 @@ the example application throughout this task.
```
istio_request_count{destination_service="reviews.default.svc.cluster.local", destination_version="v3"}
```
-
+
This query returns the current total count of all requests to the v3 of the reviews service.
- Rate of requests over the past 5 minutes to all `productpage` services:
@@ -104,13 +87,13 @@ the example application throughout this task.
rate(istio_request_count{destination_service=~"productpage.*", response_code="200"}[5m])
```
-### About the Prometheus Add-on
+### About the Prometheus add-on
Mixer comes with a built-in [Prometheus](https://prometheus.io) adapter that
exposes an endpoint serving generated metric values. The Prometheus add-on is a
-Prometheus server that comes pre-configured to scrape Mixer endpoints to collect
+Prometheus server that comes preconfigured to scrape Mixer endpoints to collect
the exposed metrics. It provides a mechanism for persistent storage and querying
-of Istio metrics.
+of Istio metrics.
The configured Prometheus add-on scrapes three endpoints:
1. *istio-mesh* (`istio-mixer.istio-system:42422`): all Mixer-generated mesh
@@ -125,18 +108,11 @@ docs](https://prometheus.io/docs/querying/basics/).
## Cleanup
-* In Kubernetes environments, execute the following command to remove the
- Prometheus add-on:
-
- ```bash
- kubectl delete -f install/kubernetes/addons/prometheus.yaml
- ```
-
* Remove any `kubectl port-forward` processes that may still be running:
- ```bash
- killall kubectl
- ```
+ ```command
+ $ killall kubectl
+ ```
* If you are not planning to explore any follow-on tasks, refer to the
[Bookinfo cleanup]({{home}}/docs/guides/bookinfo.html#cleanup) instructions
diff --git a/_docs/tasks/telemetry/servicegraph.md b/_docs/tasks/telemetry/servicegraph.md
index 86c3329548ac3..fb80b067fefd1 100644
--- a/_docs/tasks/telemetry/servicegraph.md
+++ b/_docs/tasks/telemetry/servicegraph.md
@@ -1,12 +1,10 @@
---
title: Generating a Service Graph
-overview: This task shows you how to generate a graph of services within an Istio mesh.
+description: This task shows you how to generate a graph of services within an Istio mesh.
-order: 50
+weight: 50
-layout: docs
-type: markdown
---
{% include home.html %}
@@ -22,12 +20,6 @@ the example application throughout this task.
* [Install Istio]({{home}}/docs/setup/) in your cluster and deploy an
application.
-* Install the Prometheus add-on. Directions for install of this add-on are
- supplied as part of the [Querying
- Metrics]({{home}}/docs/tasks/telemetry/querying-metrics.html) Task.
-
- Use of the Prometheus add-on is _required_ for the service graph.
-
## Generating a Service Graph
1. To view a graphical representation of your service mesh, install the
@@ -35,21 +27,16 @@ the example application throughout this task.
In Kubernetes environments, execute the following command:
- ```bash
- kubectl apply -f install/kubernetes/addons/servicegraph.yaml
+ ```command
+ $ kubectl apply -f install/kubernetes/addons/servicegraph.yaml
```
1. Verify that the service is running in your cluster.
In Kubernetes environments, execute the following command:
- ```bash
- kubectl -n istio-system get svc servicegraph
- ```
-
- The output will be similar to:
-
- ```
+ ```command
+ $ kubectl -n istio-system get svc servicegraph
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
servicegraph 10.59.253.165 8088/TCP 30s
```
@@ -59,63 +46,84 @@ the example application throughout this task.
For the Bookinfo sample, visit `http://$GATEWAY_URL/productpage` in your web
browser or issue the following command:
- ```bash
- curl http://$GATEWAY_URL/productpage
+ ```command
+ $ curl http://$GATEWAY_URL/productpage
```
Refresh the page a few times (or send the command a few times) to generate a
small amount of traffic.
- Note: `$GATEWAY_URL` is the value set in the
- [Bookinfo]({{home}}/docs/guides/bookinfo.html) guide.
+ > `$GATEWAY_URL` is the value set in the [Bookinfo]({{home}}/docs/guides/bookinfo.html) guide.
1. Open the Servicegraph UI.
In Kubernetes environments, execute the following command:
- ```bash
- kubectl -n istio-system port-forward $(kubectl -n istio-system get pod -l app=servicegraph -o jsonpath='{.items[0].metadata.name}') 8088:8088 &
+ ```command
+ $ kubectl -n istio-system port-forward $(kubectl -n istio-system get pod -l app=servicegraph -o jsonpath='{.items[0].metadata.name}') 8088:8088 &
```
- Visit [http://localhost:8088/dotviz](http://localhost:8088/dotviz) in your web browser.
+ Visit [http://localhost:8088/force/forcegraph.html](http://localhost:8088/force/forcegraph.html)
+ in your web browser. Try clicking on a service to see details on
+ the service. Real time traffic data is shown in a panel below.
The results will look similar to:
- {% include figure.html width='100%' ratio='63.16%'
- img='./img/servicegraph-example.png'
- alt='Example Servicegraph'
- title='Example Servicegraph'
- caption='Example Servicegraph'
+ {% include image.html width="75%" ratio="107.7%"
+ link="./img/servicegraph-example.png"
+ caption="Example Servicegraph"
%}
+1. Experiment with Query Parameters
+
+ Visit
+ [http://localhost:8088/force/forcegraph.html?time_horizon=15s&filter_empty=true](http://localhost:8088/force/forcegraph.html?time_horizon=15s&filter_empty=true)
+ in your web browser. Note the query parameters provided.
+
+ `filter_empty=true` will only show services that are currently receiving traffic within the time horizon.
+
+ `time_horizon=15s` affects the filter above, and also affects the
+ reported traffic information when clicking on a service. The
+ traffic information will be aggregated over the specified time
+ horizon.
+
+ The default behavior is to not filter empty services, and use a
+ time horizon of 5 minutes.
+
### About the Servicegraph Add-on
-The Servicegraph service is an example service that provides endpoints for
-generating and visualizing a graph of services within a mesh. It exposes the
-following endpoints:
+The [Servicegraph](https://github.com/istio/istio/tree/master/addons/servicegraph)
+service provides endpoints for generating and visualizing a graph of
+services within a mesh. It exposes the following endpoints:
+
+* `/force/forcegraph.html` As explored above, this is an interactive
+ [D3.js](https://d3js.org/) visualization.
-- `/graph` which provides a JSON serialization of the servicegraph
-- `/dotgraph` which provides a dot serialization of the servicegraph
-- `/dotviz` which provides a visual representation of the servicegraph
+* `/dotviz` is a static [Graphviz](https://www.graphviz.org/)
+ visualization.
-All endpoints take an optional argument of `time_horizon`, which controls the
-timespan to consider for graph generation.
+* `/dotgraph` provides a
+ [DOT](https://en.wikipedia.org/wiki/DOT_(graph_description_language))
+ serialization.
-All endpoints also take an optional argument of `filter_empty=true`, which will
-restrict the nodes and edges shown to only those that reflect non-zero traffic
-levels during the specified `time_horizon`.
+* `/d3graph` provides a JSON serialization for D3 visualization.
-The Servicegraph example is built on top of Prometheus queries.
+* `/graph` provides a generic JSON serialization.
+
+All endpoints take the query parameters explored above.
+
+The Servicegraph example is built on top of Prometheus queries and
+depends on the standard Istio metric configuration.
## Cleanup
* In Kubernetes environments, execute the following command to remove the
- Servicegraph add-on:
+Servicegraph add-on:
- ```bash
- kubectl delete -f install/kubernetes/addons/servicegraph.yaml
- ```
+ ```command
+ $ kubectl delete -f install/kubernetes/addons/servicegraph.yaml
+ ```
* If you are not planning to explore any follow-on tasks, refer to the
- [Bookinfo cleanup]({{home}}/docs/guides/bookinfo.html#cleanup) instructions
- to shutdown the application.
+[Bookinfo cleanup]({{home}}/docs/guides/bookinfo.html#cleanup) instructions
+to shutdown the application.
diff --git a/_docs/tasks/telemetry/tcp-metrics.md b/_docs/tasks/telemetry/tcp-metrics.md
index e3adf5ee9ae9e..3c2a5cdc2c044 100644
--- a/_docs/tasks/telemetry/tcp-metrics.md
+++ b/_docs/tasks/telemetry/tcp-metrics.md
@@ -1,12 +1,10 @@
---
title: Collecting Metrics for TCP services
-overview: This task shows you how to configure Istio to collect metrics for TCP services.
+description: This task shows you how to configure Istio to collect metrics for TCP services.
-order: 25
+weight: 25
-layout: docs
-type: markdown
---
{% include home.html %}
@@ -19,24 +17,18 @@ The [Bookinfo]({{home}}/docs/guides/bookinfo.html) sample application is used
as the example application throughout this task.
## Before you begin
+
* [Install Istio]({{home}}/docs/setup/) in your cluster and deploy an
- application.
+application.
* This task assumes that the Bookinfo sample will be deployed in the `default`
- namespace. If you use a different namespace, you will need to update the
- example configuration and commands.
-
-* Install the Prometheus add-on. Prometheus
- will be used to verify task success.
- ```bash
- kubectl apply -f install/kubernetes/addons/prometheus.yaml
- ```
- See [Prometheus](https://prometheus.io) for details.
+namespace. If you use a different namespace, you will need to update the
+example configuration and commands.
## Collecting new telemetry data
1. Create a new YAML file to hold configuration for the new metrics that Istio
- will generate and collect automatically.
+will generate and collect automatically.
Save the following as `tcp_telemetry.yaml`:
@@ -112,12 +104,8 @@ as the example application throughout this task.
1. Push the new configuration.
- ```bash
- istioctl create -f tcp_telemetry.yaml
- ```
-
- The expected output is similar to:
- ```
+ ```command
+ $ istioctl create -f tcp_telemetry.yaml
Created config metric/default/mongosentbytes at revision 3852843
Created config metric/default/mongoreceivedbytes at revision 3852844
Created config prometheus/default/mongohandler at revision 3852845
@@ -131,19 +119,14 @@ as the example application throughout this task.
If you are using a cluster with automatic sidecar injection enabled,
simply deploy the services using `kubectl`:
- ```
- kubectl apply -f samples/bookinfo/kube/bookinfo-ratings-v2.yaml
+ ```command
+ $ kubectl apply -f samples/bookinfo/kube/bookinfo-ratings-v2.yaml
```
If you are using manual sidecar injection, use the following command instead:
- ```
- kubectl apply -f <(istioctl kube-inject -f samples/bookinfo/kube/bookinfo-ratings-v2.yaml)
- ```
-
- Expected output:
-
- ```
+ ```command
+ $ kubectl apply -f <(istioctl kube-inject -f samples/bookinfo/kube/bookinfo-ratings-v2.yaml)
deployment "ratings-v2" configured
```
@@ -152,32 +135,22 @@ as the example application throughout this task.
If you are using a cluster with automatic sidecar injection enabled,
simply deploy the services using `kubectl`:
- ```
- kubectl apply -f samples/bookinfo/kube/bookinfo-db.yaml
+ ```command
+ $ kubectl apply -f samples/bookinfo/kube/bookinfo-db.yaml
```
If you are using manual sidecar injection, use the following command instead:
- ```
- kubectl apply -f <(istioctl kube-inject -f samples/bookinfo/kube/bookinfo-db.yaml)
- ```
-
- Expected output:
-
- ```
+ ```command
+ $ kubectl apply -f <(istioctl kube-inject -f samples/bookinfo/kube/bookinfo-db.yaml)
service "mongodb" configured
deployment "mongodb-v1" configured
```
1. Add routing rules to send traffic to `v2` of the `ratings` service:
- ```
- istioctl create -f samples/bookinfo/kube/route-rule-ratings-db.yaml
- ```
-
- Expected output:
-
- ```
+ ```command
+ $ istioctl create -f samples/bookinfo/kube/route-rule-ratings-db.yaml
Created config route-rule//ratings-test-v2 at revision 7216403
Created config route-rule//reviews-test-ratings-v2 at revision 7216404
```
@@ -187,8 +160,8 @@ as the example application throughout this task.
For the Bookinfo sample, visit `http://$GATEWAY_URL/productpage` in your web
browser or issue the following command:
- ```bash
- curl http://$GATEWAY_URL/productpage
+ ```command
+ $ curl http://$GATEWAY_URL/productpage
```
1. Verify that the new metric values are being generated and collected.
@@ -196,26 +169,26 @@ as the example application throughout this task.
In a Kubernetes environment, setup port-forwarding for Prometheus by
executing the following command:
- ```bash
- kubectl -n istio-system port-forward $(kubectl -n istio-system get pod -l app=prometheus -o jsonpath='{.items[0].metadata.name}') 9090:9090 &
+ ```command
+ $ kubectl -n istio-system port-forward $(kubectl -n istio-system get pod -l app=prometheus -o jsonpath='{.items[0].metadata.name}') 9090:9090 &
```
View values for the new metric via the [Prometheus UI](http://localhost:9090/graph#%5B%7B%22range_input%22%3A%221h%22%2C%22expr%22%3A%22istio_mongo_received_bytes%22%2C%22tab%22%3A1%7D%5D).
-
+
The provided link opens the Prometheus UI and executes a query for values of
the `istio_mongo_received_bytes` metric. The table displayed in the
**Console** tab includes entries similar to:
- ```
- istio_mongo_received_bytes{destination_version="v1",instance="istio-mixer.istio-system:42422",job="istio-mesh",source_service="ratings.default.svc.cluster.local",source_version="v2"} 2317
+ ```plain
+ istio_mongo_received_bytes{destination_version="v1",instance="istio-mixer.istio-system:42422",job="istio-mesh",source_service="ratings.default.svc.cluster.local",source_version="v2"} 2317
```
- NOTE: Istio also collects protocol-specific statistics for MongoDB. For
- example, the value of total OP_QUERY messages sent from the `ratings` service
- is collected in the following metric:
- `envoy_mongo_mongo_collection_ratings_query_total` (click
- [here](http://localhost:9090/graph#%5B%7B%22range_input%22%3A%221h%22%2C%22expr%22%3A%22envoy_mongo_mongo_collection_ratings_query_total%22%2C%22tab%22%3A1%7D%5D)
- to execute the query).
+ > Istio also collects protocol-specific statistics for MongoDB. For
+ > example, the value of total OP_QUERY messages sent from the `ratings` service
+ > is collected in the following metric:
+ > `envoy_mongo_mongo_collection_ratings_query_total` (click
+ > (click [here](http://localhost:9090/graph#%5B%7B%22range_input%22%3A%221h%22%2C%22expr%22%3A%22envoy_mongo_mongo_collection_ratings_query_total%22%2C%22tab%22%3A1%7D%5D)
+ > to execute the query).
## Understanding TCP telemetry collection
@@ -229,36 +202,33 @@ configuration consisted of _instances_, a _handler_, and a _rule_. Please see
that Task for a complete description of the components of metric collection.
Metrics collection for TCP services differs only in the limited set of
-attributes that are available for use in _instances_.
+attributes that are available for use in _instances_.
### TCP Attributes
Several TCP-specific attributes enable TCP policy and control within Istio.
-These attributes are generated by server-side Envoy proxies and forwarded to
-Mixer at both connection establishment and connection close. Additionally,
-context attributes provide the ability to distinguish between `http` and `tcp`
+These attributes are generated by server-side Envoy proxies. They are forwarded to Mixer at connection establishment, and forwarded periodically when connection is alive (periodical report), and forwarded at connection close (final report). The default interval for periodical report is 10 seconds, and it should be at least 1 second. Additionally, context attributes provide the ability to distinguish between `http` and `tcp`
protocols within policies.
-{% include figure.html width='100%' ratio='192.50%'
- img='./img/istio-tcp-attribute-flow.svg'
- alt='Attribute Generation Flow for TCP Services in an Istio Mesh.'
- title='TCP Attribute Flow'
- caption='TCP Attribute Flow'
+{% include image.html width="100%" ratio="192.50%"
+ link="./img/istio-tcp-attribute-flow.svg"
+ alt="Attribute Generation Flow for TCP Services in an Istio Mesh."
+ caption="TCP Attribute Flow"
%}
## Cleanup
* Remove the new telemetry configuration:
- ```bash
- istioctl delete -f tcp_telemetry.yaml
- ```
+ ```command
+ $ istioctl delete -f tcp_telemetry.yaml
+ ```
* Remove the `port-forward` process:
- ```bash
- killall kubectl
- ```
+ ```command
+ $ killall kubectl
+ ```
* If you are not planning to explore any follow-on tasks, refer to the
[Bookinfo cleanup]({{home}}/docs/guides/bookinfo.html#cleanup) instructions
@@ -267,20 +237,15 @@ protocols within policies.
## What's next
* Learn more about [Mixer]({{home}}/docs/concepts/policy-and-control/mixer.html)
- and [Mixer
- Config]({{home}}/docs/concepts/policy-and-control/mixer-config.html).
+and [Mixer Config]({{home}}/docs/concepts/policy-and-control/mixer-config.html).
* Discover the full [Attribute
- Vocabulary]({{home}}/docs/reference/config/mixer/attribute-vocabulary.html).
-
-* Read the reference guide to [Writing
- Config]({{home}}/docs/reference/writing-config.html).
+Vocabulary]({{home}}/docs/reference/config/mixer/attribute-vocabulary.html).
* Refer to the [In-Depth Telemetry]({{home}}/docs/guides/telemetry.html) guide.
* Learn more about [Querying Istio
- Metrics]({{home}}/docs/tasks/telemetry/querying-metrics.html).
+Metrics]({{home}}/docs/tasks/telemetry/querying-metrics.html).
* Learn more about the [MongoDB-specific statistics generated by
- Envoy](https://www.envoyproxy.io/docs/envoy/latest/configuration/network_filters/mongo_proxy_filter#statistics).
-
+Envoy](https://www.envoyproxy.io/docs/envoy/latest/configuration/network_filters/mongo_proxy_filter#statistics).
diff --git a/_docs/tasks/telemetry/using-istio-dashboard.md b/_docs/tasks/telemetry/using-istio-dashboard.md
index 1a1f322762c00..7dbbaecb70e5c 100644
--- a/_docs/tasks/telemetry/using-istio-dashboard.md
+++ b/_docs/tasks/telemetry/using-istio-dashboard.md
@@ -1,12 +1,10 @@
---
title: Visualizing Metrics with Grafana
-overview: This task shows you how to setup and use the Istio Dashboard to monitor mesh traffic.
+description: This task shows you how to setup and use the Istio Dashboard to monitor mesh traffic.
-order: 40
+weight: 40
-layout: docs
-type: markdown
---
{% include home.html %}
@@ -18,38 +16,26 @@ The [Bookinfo]({{home}}/docs/guides/bookinfo.html) sample application is used as
the example application throughout this task.
## Before you begin
+
* [Install Istio]({{home}}/docs/setup/) in your cluster and deploy an
application.
-* Install the Prometheus add-on.
-
- ```bash
- kubectl apply -f install/kubernetes/addons/prometheus.yaml
- ```
-
- Use of the Prometheus add-on is _required_ for the Istio Dashboard.
-
## Viewing the Istio Dashboard
-1. To view Istio metrics in a graphical dashboard install the Grafana add-on.
+1. To view Istio metrics in a graphical dashboard install the Grafana add-on.
In Kubernetes environments, execute the following command:
- ```bash
- kubectl apply -f install/kubernetes/addons/grafana.yaml
+ ```command
+ $ kubectl apply -f install/kubernetes/addons/grafana.yaml
```
1. Verify that the service is running in your cluster.
In Kubernetes environments, execute the following command:
- ```bash
- kubectl -n istio-system get svc grafana
- ```
-
- The output will be similar to:
-
- ```
+ ```command
+ $ kubectl -n istio-system get svc grafana
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
grafana 10.59.247.103 3000/TCP 2m
```
@@ -58,19 +44,17 @@ the example application throughout this task.
In Kubernetes environments, execute the following command:
- ```bash
- kubectl -n istio-system port-forward $(kubectl -n istio-system get pod -l app=grafana -o jsonpath='{.items[0].metadata.name}') 3000:3000 &
+ ```command
+ $ kubectl -n istio-system port-forward $(kubectl -n istio-system get pod -l app=grafana -o jsonpath='{.items[0].metadata.name}') 3000:3000 &
```
Visit [http://localhost:3000/dashboard/db/istio-dashboard](http://localhost:3000/dashboard/db/istio-dashboard) in your web browser.
The Istio Dashboard will look similar to:
- {% include figure.html width='100%' ratio='56.57%'
- img='./img/grafana-istio-dashboard.png'
- alt='Istio Dashboard'
- title='Istio Dashboard'
- caption='Istio Dashboard'
+ {% include image.html width="100%" ratio="56.57%"
+ link="./img/grafana-istio-dashboard.png"
+ caption="Istio Dashboard"
%}
1. Send traffic to the mesh.
@@ -78,8 +62,8 @@ the example application throughout this task.
For the Bookinfo sample, visit `http://$GATEWAY_URL/productpage` in your web
browser or issue the following command:
- ```bash
- curl http://$GATEWAY_URL/productpage
+ ```command
+ $ curl http://$GATEWAY_URL/productpage
```
Refresh the page a few times (or send the command a few times) to generate a
@@ -88,19 +72,16 @@ the example application throughout this task.
Look at the Istio Dashboard again. It should reflect the traffic that was
generated. It will look similar to:
- {% include figure.html width='100%' ratio='56.57%'
- img='./img/dashboard-with-traffic.png'
- alt='Istio Dashboard With Traffic'
- title='Istio Dashboard With Traffic'
- caption='Istio Dashboard With Traffic'
+ {% include image.html width="100%" ratio="56.57%"
+ link="./img/dashboard-with-traffic.png"
+ caption="Istio Dashboard With Traffic"
%}
- Note: `$GATEWAY_URL` is the value set in the
- [Bookinfo]({{home}}/docs/guides/bookinfo.html) guide.
+ > `$GATEWAY_URL` is the value set in the [Bookinfo]({{home}}/docs/guides/bookinfo.html) guide.
### About the Grafana add-on
-The Grafana add-on is a pre-configured instance of Grafana. The base image
+The Grafana add-on is a preconfigured instance of Grafana. The base image
([`grafana/grafana:4.1.2`](https://hub.docker.com/r/grafana/grafana/)) has been
modified to start with both a Prometheus data source and the Istio Dashboard
installed. The base install files for Istio, and Mixer in particular, ship with
@@ -117,23 +98,23 @@ The Istio Dashboard consists of three main sections:
responses for each individual service within the mesh (HTTP and TCP).
For more on how to create, configure, and edit dashboards, please see the
-[Grafana documentation](http://docs.grafana.org/).
+[Grafana documentation](https://docs.grafana.org/).
## Cleanup
* In Kubernetes environments, execute the following command to remove the Grafana
- add-on:
+add-on:
- ```bash
- kubectl delete -f install/kubernetes/addons/grafana.yaml
- ```
+ ```command
+ $ kubectl delete -f install/kubernetes/addons/grafana.yaml
+ ```
* Remove any `kubectl port-forward` processes that may be running:
- ```bash
- killall kubectl
- ```
+ ```command
+ $ killall kubectl
+ ```
* If you are not planning to explore any follow-on tasks, refer to the
- [Bookinfo cleanup]({{home}}/docs/guides/bookinfo.html#cleanup) instructions
- to shutdown the application.
+[Bookinfo cleanup]({{home}}/docs/guides/bookinfo.html#cleanup) instructions
+to shutdown the application.
diff --git a/_docs/tasks/traffic-management-v1alpha3/circuit-breaking.md b/_docs/tasks/traffic-management-v1alpha3/circuit-breaking.md
new file mode 100644
index 0000000000000..04a9a80ecddb2
--- /dev/null
+++ b/_docs/tasks/traffic-management-v1alpha3/circuit-breaking.md
@@ -0,0 +1,240 @@
+---
+title: Circuit Breaking
+description: This task demonstrates the circuit-breaking capability for resilient applications
+
+weight: 50
+
+---
+{% include home.html %}
+
+This task demonstrates the circuit-breaking capability for resilient applications. Circuit breaking allows developers to write applications that limit the impact of failures, latency spikes, and other undesirable effects of network peculiarities. This task will show how to configure circuit breaking for connections, requests, and outlier detection.
+
+## Before you begin
+
+* Setup Istio by following the instructions in the
+ [Installation guide]({{home}}/docs/setup/).
+
+* Start the [httpbin](https://github.com/istio/istio/tree/master/samples/httpbin) sample
+ which will be used as the backend service for our task
+
+ ```command
+ $ kubectl apply -f <(istioctl kube-inject --debug -f samples/httpbin/httpbin.yaml)
+ ```
+
+## Circuit breaker
+
+Let's set up a scenario to demonstrate the circuit-breaking capabilities of Istio. We should have the `httpbin` service running from the previous section.
+
+1. Create a [destination rule]({{home}}/docs/reference/config/istio.networking.v1alpha3.html#DestinationRule) to specify our circuit breaking settings when calling the `httpbin` service:
+
+ ```bash
+ cat <2 procs, for 5s: http://httpbin:8000/get
+Starting at max qps with 2 thread(s) [gomax 2] for exactly 20 calls (10 per thread + 0)
+23:51:10 W http.go:617> Parsed non ok code 503 (HTTP/1.1 503)
+Ended after 106.474079ms : 20 calls. qps=187.84
+Aggregated Function Time : count 20 avg 0.010215375 +/- 0.003604 min 0.005172024 max 0.019434859 sum 0.204307492
+# range, mid point, percentile, count
+>= 0.00517202 <= 0.006 , 0.00558601 , 5.00, 1
+> 0.006 <= 0.007 , 0.0065 , 20.00, 3
+> 0.007 <= 0.008 , 0.0075 , 30.00, 2
+> 0.008 <= 0.009 , 0.0085 , 40.00, 2
+> 0.009 <= 0.01 , 0.0095 , 60.00, 4
+> 0.01 <= 0.011 , 0.0105 , 70.00, 2
+> 0.011 <= 0.012 , 0.0115 , 75.00, 1
+> 0.012 <= 0.014 , 0.013 , 90.00, 3
+> 0.016 <= 0.018 , 0.017 , 95.00, 1
+> 0.018 <= 0.0194349 , 0.0187174 , 100.00, 1
+# target 50% 0.0095
+# target 75% 0.012
+# target 99% 0.0191479
+# target 99.9% 0.0194062
+Code 200 : 19 (95.0 %)
+Code 503 : 1 (5.0 %)
+Response Header Sizes : count 20 avg 218.85 +/- 50.21 min 0 max 231 sum 4377
+Response Body/Total Sizes : count 20 avg 652.45 +/- 99.9 min 217 max 676 sum 13049
+All done 20 calls (plus 0 warmup) 10.215 ms avg, 187.8 qps
+```
+
+We see almost all requests made it through!
+
+```plain
+Code 200 : 19 (95.0 %)
+Code 503 : 1 (5.0 %)
+```
+
+The istio-proxy does allow for some leeway. Let's bring the number of concurrent connections up to 3:
+
+```command
+$ kubectl exec -it $FORTIO_POD -c fortio /usr/local/bin/fortio -- load -c 3 -qps 0 -n 20 -loglevel Warning http://httpbin:8000/get
+Fortio 0.6.2 running at 0 queries per second, 2->2 procs, for 5s: http://httpbin:8000/get
+Starting at max qps with 3 thread(s) [gomax 2] for exactly 30 calls (10 per thread + 0)
+23:51:51 W http.go:617> Parsed non ok code 503 (HTTP/1.1 503)
+23:51:51 W http.go:617> Parsed non ok code 503 (HTTP/1.1 503)
+23:51:51 W http.go:617> Parsed non ok code 503 (HTTP/1.1 503)
+23:51:51 W http.go:617> Parsed non ok code 503 (HTTP/1.1 503)
+23:51:51 W http.go:617> Parsed non ok code 503 (HTTP/1.1 503)
+23:51:51 W http.go:617> Parsed non ok code 503 (HTTP/1.1 503)
+23:51:51 W http.go:617> Parsed non ok code 503 (HTTP/1.1 503)
+23:51:51 W http.go:617> Parsed non ok code 503 (HTTP/1.1 503)
+23:51:51 W http.go:617> Parsed non ok code 503 (HTTP/1.1 503)
+23:51:51 W http.go:617> Parsed non ok code 503 (HTTP/1.1 503)
+23:51:51 W http.go:617> Parsed non ok code 503 (HTTP/1.1 503)
+Ended after 71.05365ms : 30 calls. qps=422.22
+Aggregated Function Time : count 30 avg 0.0053360199 +/- 0.004219 min 0.000487853 max 0.018906468 sum 0.160080597
+# range, mid point, percentile, count
+>= 0.000487853 <= 0.001 , 0.000743926 , 10.00, 3
+> 0.001 <= 0.002 , 0.0015 , 30.00, 6
+> 0.002 <= 0.003 , 0.0025 , 33.33, 1
+> 0.003 <= 0.004 , 0.0035 , 40.00, 2
+> 0.004 <= 0.005 , 0.0045 , 46.67, 2
+> 0.005 <= 0.006 , 0.0055 , 60.00, 4
+> 0.006 <= 0.007 , 0.0065 , 73.33, 4
+> 0.007 <= 0.008 , 0.0075 , 80.00, 2
+> 0.008 <= 0.009 , 0.0085 , 86.67, 2
+> 0.009 <= 0.01 , 0.0095 , 93.33, 2
+> 0.014 <= 0.016 , 0.015 , 96.67, 1
+> 0.018 <= 0.0189065 , 0.0184532 , 100.00, 1
+# target 50% 0.00525
+# target 75% 0.00725
+# target 99% 0.0186345
+# target 99.9% 0.0188793
+Code 200 : 19 (63.3 %)
+Code 503 : 11 (36.7 %)
+Response Header Sizes : count 30 avg 145.73333 +/- 110.9 min 0 max 231 sum 4372
+Response Body/Total Sizes : count 30 avg 507.13333 +/- 220.8 min 217 max 676 sum 15214
+All done 30 calls (plus 0 warmup) 5.336 ms avg, 422.2 qps
+```
+
+Now we start to see the circuit breaking behavior we expect.
+
+```plain
+Code 200 : 19 (63.3 %)
+Code 503 : 11 (36.7 %)
+```
+
+Only 63.3% of the requests made it through and the rest were trapped by circuit breaking. We can query the istio-proxy stats to see more:
+
+```command
+$ kubectl exec -it $FORTIO_POD -c istio-proxy -- sh -c 'curl localhost:15000/stats' | grep httpbin | grep pending
+cluster.out.httpbin.springistio.svc.cluster.local|http|version=v1.upstream_rq_pending_active: 0
+cluster.out.httpbin.springistio.svc.cluster.local|http|version=v1.upstream_rq_pending_failure_eject: 0
+cluster.out.httpbin.springistio.svc.cluster.local|http|version=v1.upstream_rq_pending_overflow: 12
+cluster.out.httpbin.springistio.svc.cluster.local|http|version=v1.upstream_rq_pending_total: 39
+```
+
+We see `12` for the `upstream_rq_pending_overflow` value which means `12` calls so far have been flagged for circuit breaking.
+
+## Cleaning up
+
+1. Remove the rules.
+
+ ```command
+ $ istioctl delete destinationrule httpbin
+ ```
+
+1. Shutdown the [httpbin](https://github.com/istio/istio/tree/master/samples/httpbin) service and client.
+
+ ```command
+ $ kubectl delete deploy httpbin fortio-deploy
+ $ kubectl delete svc httpbin
+ ```
+
+## What's next
+
+Check out the [destination rule]({{home}}/docs/reference/config/istio.networking.v1alpha3.html#DestinationRule) reference section for more circuit breaker settings.
diff --git a/_docs/tasks/traffic-management-v1alpha3/egress-tcp.md b/_docs/tasks/traffic-management-v1alpha3/egress-tcp.md
new file mode 100644
index 0000000000000..ae27c6c090585
--- /dev/null
+++ b/_docs/tasks/traffic-management-v1alpha3/egress-tcp.md
@@ -0,0 +1,108 @@
+---
+title: Control Egress TCP Traffic
+description: Describes how to configure Istio to route TCP traffic from services in the mesh to external services.
+
+weight: 41
+
+---
+{% include home.html %}
+
+The [Control Egress Traffic]({{home}}/docs/tasks/traffic-management-v1alpha3/egress.html) task demonstrated how external (outside the Kubernetes cluster) HTTP and HTTPS services can be accessed from applications inside the mesh. A quick reminder: by default, Istio-enabled applications are unable to access URLs outside the cluster. To enable such access, an [external service]({{home}}/docs/reference/config/istio.networking.v1alpha3.html#ServiceEntry) must be defined, or, alternatively, [direct access to external services]({{home}}/docs/tasks/traffic-management-v1alpha3/egress.html#calling-external-services-directly) must be configured.
+
+This task describes how to configure Istio to expose external TCP services to applications inside the Istio service mesh.
+
+## Before you begin
+
+* Setup Istio by following the instructions in the
+ [Installation guide]({{home}}/docs/setup/).
+
+* Start the [sleep](https://github.com/istio/istio/tree/master/samples/sleep) sample application which will be used as a test source for external calls.
+
+ ```command
+ $ kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml)
+ ```
+
+ **Note**: any pod that you can execute `curl` from is good enough.
+
+## Using Istio external services for external TCP traffic
+
+In this task we access `wikipedia.org` by HTTPS originated by the application. This task demonstrates the use case where an application cannot use HTTP with TLS origination by the sidecar proxy. Using HTTP with TLS origination by the sidecar proxy is described in the [Control Egress Traffic]({{home}}/docs/tasks/traffic-management-v1alpha3/egress.html) task. In that task, `https://google.com` was accessed by issuing HTTP requests to `http://www.google.com:443`.
+
+The HTTPS traffic originated by the application will be treated by Istio as _opaque_ TCP. To enable such traffic, we define a TCP external service on port 443. In TCP external services, as opposed to HTTP-based external services, the destinations are specified by IPs or by blocks of IPs in [CIDR notation](https://tools.ietf.org/html/rfc2317).
+
+Let's assume for the sake of this example that we want to access `wikipedia.org` by the domain name. This means that we have to specify all the IPs of `wikipedia.org` in our TCP external service. Fortunately, the IPs of `wikipedia.org` are published [here]( https://www.mediawiki.org/wiki/Wikipedia_Zero/IP_Addresses). It is a list of IP blocks in [CIDR notation](https://tools.ietf.org/html/rfc2317): `91.198.174.192/27`, `103.102.166.224/27`, and more.
+
+## Creating an external service
+
+Let's create an external service to enable TCP access to `wikipedia.org`:
+
+```bash
+cat <5,563,121 articles in English
+ ```
+
+ This means there were 5,563,121 articles in Wikipedia in English when this task was written.
+
+## Cleanup
+
+1. Remove the external service we created.
+
+ ```command
+ $ istioctl delete serviceentry wikipedia-ext
+ ```
+
+1. Shutdown the [sleep](https://github.com/istio/istio/tree/master/samples/sleep) application.
+
+ ```command
+ $ kubectl delete -f samples/sleep/sleep.yaml
+ ```
+
+## What's next
+
+* The [ServiceEntry]({{home}}/docs/reference/config/istio.networking.v1alpha3.html#ServiceEntry) reference.
+
+* The [Control Egress Traffic]({{home}}/docs/tasks/traffic-management-v1alpha3/egress.html) task, for HTTP and HTTPS.
diff --git a/_docs/tasks/traffic-management-v1alpha3/egress.md b/_docs/tasks/traffic-management-v1alpha3/egress.md
new file mode 100644
index 0000000000000..49f800627c41a
--- /dev/null
+++ b/_docs/tasks/traffic-management-v1alpha3/egress.md
@@ -0,0 +1,284 @@
+---
+title: Control Egress Traffic
+description: Describes how to configure Istio to route traffic from services in the mesh to external services.
+
+weight: 40
+
+---
+{% include home.html %}
+
+By default, Istio-enabled services are unable to access URLs outside of the cluster because
+iptables is used in the pod to transparently redirect all outbound traffic to the sidecar proxy,
+which only handles intra-cluster destinations.
+
+This task describes how to configure Istio to expose external services to Istio-enabled clients.
+You'll learn how to enable access to external services by defining `ServiceEntry` configurations,
+or alternatively, to simply bypass the Istio proxy for a specific range of IPs.
+
+## Before you begin
+
+* Setup Istio by following the instructions in the
+ [Installation guide]({{home}}/docs/setup/).
+
+* Start the [sleep](https://github.com/istio/istio/tree/master/samples/sleep) sample
+ which will be used as a test source for external calls.
+
+ ```command
+ $ kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml)
+ ```
+
+ Note that any pod that you can `exec` and `curl` from would do.
+
+## Configuring Istio external services
+
+Using Istio `ServiceEntry` configurations, you can access any publicly accessible service
+from within your Istio cluster. In this task we will use
+[httpbin.org](http://httpbin.org) and [www.google.com](http://www.google.com) as examples.
+
+### Configuring the external services
+
+1. Create an `ServiceEntry` to allow access to an external HTTP service:
+
+ ```bash
+ cat < Notice that we are restricting the failure impact to user "jason" only. If you login
+> as any other user, you would not experience any delays.
+
+**Fixing the bug:** At this point we would normally fix the problem by either increasing the
+productpage timeout or decreasing the reviews to ratings service timeout,
+terminate and restart the fixed microservice, and then confirm that the `productpage`
+returns its response without any errors.
+
+However, we already have this fix running in v3 of the reviews service, so we can simply
+fix the problem by migrating all
+traffic to `reviews:v3` as described in the
+[traffic shifting]({{home}}/docs/tasks/traffic-management/traffic-shifting.html) task.
+
+(Left as an exercise for the reader - change the delay rule to
+use a 2.8 second delay and then run it against the v3 version of reviews.)
+
+## Fault injection using HTTP Abort
+
+As another test of resiliency, we will introduce an HTTP abort to the ratings microservices for the user "jason".
+We expect the page to load immediately unlike the delay example and display the "product ratings not available"
+message.
+
+1. Create a fault injection rule to send an HTTP abort for user "jason"
+
+ ```command
+ $ istioctl replace -f samples/bookinfo/routing/route-rule-ratings-test-abort.yaml
+ ```
+
+ Confirm the rule is created
+
+ ```command-output-as-yaml
+ $ istioctl get virtualservice ratings -o yaml
+ apiVersion: networking.istio.io/v1alpha3
+ kind: VirtualService
+ metadata:
+ name: ratings
+ ...
+ spec:
+ hosts:
+ - ratings
+ http:
+ - fault:
+ abort:
+ httpStatus: 500
+ percent: 100
+ match:
+ - headers:
+ cookie:
+ regex: ^(.*?;)?(user=jason)(;.*)?$
+ route:
+ - destination:
+ name: ratings
+ subset: v1
+ - route:
+ - destination:
+ name: ratings
+ subset: v1
+ ```
+
+1. Observe application behavior
+
+ Login as user "jason". If the rule propagated successfully to all pods, you should see the page load
+ immediately with the "product ratings not available" message. Logout from user "jason" and you should
+ see reviews with rating stars show up successfully on the productpage web page.
+
+## Cleanup
+
+* Remove the application routing rules:
+
+ ```command
+ $ istioctl delete -f samples/bookinfo/routing/route-rule-all-v1.yaml
+ ```
+
+* If you are not planning to explore any follow-on tasks, refer to the
+ [Bookinfo cleanup]({{home}}/docs/guides/bookinfo.html#cleanup) instructions
+ to shutdown the application.
+
+## What's next
+
+* Learn more about [fault injection]({{home}}/docs/concepts/traffic-management/fault-injection.html).
diff --git a/_docs/tasks/traffic-management-v1alpha3/index.md b/_docs/tasks/traffic-management-v1alpha3/index.md
new file mode 100644
index 0000000000000..13b4dec06b8c1
--- /dev/null
+++ b/_docs/tasks/traffic-management-v1alpha3/index.md
@@ -0,0 +1,10 @@
+---
+title: Traffic Management (v1alpha3)
+description: WIP - Describes tasks that demonstrate traffic routing features of Istio service mesh.
+
+weight: 15
+
+toc: false
+---
+
+{% include section-index.html docs=site.docs %}
diff --git a/_docs/tasks/traffic-management-v1alpha3/ingress.md b/_docs/tasks/traffic-management-v1alpha3/ingress.md
new file mode 100644
index 0000000000000..ea445fcd894bb
--- /dev/null
+++ b/_docs/tasks/traffic-management-v1alpha3/ingress.md
@@ -0,0 +1,472 @@
+---
+title: Control Ingress Traffic
+description: Describes how to configure Istio to expose a service outside of the service mesh.
+
+weight: 30
+
+redirect_from: /docs/tasks/ingress.html
+---
+{% include home.html %}
+
+In a Kubernetes environment, the [Kubernetes Ingress Resource](https://kubernetes.io/docs/concepts/services-networking/ingress/)
+allows users to specify services that should be exposed outside the cluster.
+For traffic entering an Istio service mesh, however, an Istio-aware [ingress controller](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-controllers)
+is needed to allow Istio features, for example, monitoring and route rules, to be applied to traffic entering the cluster.
+
+Istio provides an envoy-based ingress controller that implements very limited support for standard Kubernetes `Ingress` resources
+as well as full support for an alternative specification,
+[Istio Gateway]({{home}}/docs/reference/config/istio.networking.v1alpha3.html#Gateway).
+Using a `Gateway` is the recommended approach for configuring ingress traffic for Istio services.
+It is significantly more functional, not to mention the only option for non-Kubernetes environments.
+
+This task describes how to configure Istio to expose a service outside of the service mesh using either specification.
+
+## Before you begin
+
+* Setup Istio by following the instructions in the
+ [Installation guide]({{home}}/docs/setup/).
+
+* Make sure your current directory is the `istio` directory.
+
+* Start the [httpbin](https://github.com/istio/istio/tree/master/samples/httpbin) sample,
+ which will be used as the destination service to be exposed externally.
+
+ If you installed the [Istio-Initializer]({{home}}/docs/setup/kubernetes/sidecar-injection.html#automatic-sidecar-injection), do
+
+ ```command
+ $ kubectl apply -f samples/httpbin/httpbin.yaml
+ ```
+
+ Without the Istio-Initializer:
+
+ ```command
+ $ kubectl apply -f <(istioctl kube-inject -f samples/httpbin/httpbin.yaml)
+ ```
+
+* Generate a certificate and key that will be used to demonstrate a TLS-secured gateway
+
+ A private key and certificate can be created for testing using [OpenSSL](https://www.openssl.org/).
+
+ ```command
+ $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /tmp/tls.key -out /tmp/tls.crt -subj "/CN=foo.bar.com"
+ ```
+
+## Configuring ingress using an Istio Gateway resource (recommended)
+
+An [Istio Gateway]({{home}}/docs/reference/config/istio.networking.v1alpha3.html#Gateway) is the preferred
+model for configuring ingress traffic in Istio.
+An ingress `Gateway` describes a load balancer operating at the edge of the mesh receiving incoming
+HTTP/TCP connections.
+It configures exposed ports, protocols, etc.,
+but, unlike [Kubernetes Ingress Resources](https://kubernetes.io/docs/concepts/services-networking/ingress/),
+does not include any traffic routing configuration. Traffic routing for ingress traffic is instead configured
+using Istio routing rules, exactly in the same was as for internal service requests.
+
+### Configuring a Gateway
+
+1. Create an Istio `Gateway`
+
+ ```bash
+ cat < 80:31486/TCP,443:32254/TCP 32m
+ ```
+
+ ```command
+ $ export INGRESS_HOST=169.47.243.100:31486
+ $ export SECURE_INGRESS_HOST=169.47.243.100:32254
+ ```
+
+1. Access the httpbin service with either HTTP or HTTPS using _curl_:
+
+ ```command
+ $ curl -I http://$INGRESS_HOST/status/200
+ HTTP/1.1 200 OK
+ server: envoy
+ date: Mon, 29 Jan 2018 04:45:49 GMT
+ content-type: text/html; charset=utf-8
+ access-control-allow-origin: *
+ access-control-allow-credentials: true
+ content-length: 0
+ x-envoy-upstream-service-time: 48
+ ```
+
+ ```command
+ $ curl -I -k https://$SECURE_INGRESS_HOST/status/200
+ HTTP/1.1 200 OK
+ server: envoy
+ date: Mon, 29 Jan 2018 04:45:49 GMT
+ content-type: text/html; charset=utf-8
+ access-control-allow-origin: *
+ access-control-allow-credentials: true
+ content-length: 0
+ x-envoy-upstream-service-time: 96
+ ```
+
+1. Access any other URL that has not been explicitly exposed. You should
+ see a HTTP 404 error:
+
+ ```command
+ $ curl -I http://$INGRESS_HOST/headers
+ HTTP/1.1 404 Not Found
+ date: Mon, 29 Jan 2018 04:45:49 GMT
+ server: envoy
+ content-length: 0
+ ```
+
+ ```command
+ $ curl -I https://$SECURE_INGRESS_HOST/headers
+ HTTP/1.1 404 Not Found
+ date: Mon, 29 Jan 2018 04:45:49 GMT
+ server: envoy
+ content-length: 0
+ ```
+
+## Configuring ingress using a Kubernetes Ingress resource
+
+An Istio `Ingress` specification is based on the standard [Kubernetes Ingress Resource](https://kubernetes.io/docs/concepts/services-networking/ingress/)
+specification, with the following differences:
+
+1. Istio `Ingress` specification contains a `kubernetes.io/ingress.class: istio` annotation.
+
+1. All other annotations are ignored.
+
+1. Path syntax is [c++11 regex format](http://en.cppreference.com/w/cpp/regex/ecmascript)
+
+Note that `Ingress` traffic is not affected by routing rules configured for a backend
+(i.e., an Istio `VirtualService` cannot be combined with an `Ingress` specification).
+Traffic splitting, fault injection, mirroring, header match, etc., will not work for ingress traffic.
+A `DestinationRule` associated with the backend service will, however, work as expected.
+
+The `servicePort` field in the `Ingress` specification can take a port number
+(integer) or a name. The port name must follow the Istio port naming
+conventions (e.g., `grpc-*`, `http2-*`, `http-*`, etc.) in order to
+function properly. The name used must match the port name in the backend
+service declaration.
+
+### Configuring simple Ingress
+
+1. Create a basic `Ingress` specification for the httpbin service
+
+ ```bash
+ cat < 80:31486/TCP,443:32254/TCP 32m
+ ```
+
+ ```command
+ $ export INGRESS_HOST=169.47.243.100:31486
+ ```
+
+1. Access the httpbin service using _curl_:
+
+ ```command
+ $ curl -I http://$INGRESS_HOST/status/200
+ HTTP/1.1 200 OK
+ server: envoy
+ date: Mon, 29 Jan 2018 04:45:49 GMT
+ content-type: text/html; charset=utf-8
+ access-control-allow-origin: *
+ access-control-allow-credentials: true
+ content-length: 0
+ x-envoy-upstream-service-time: 48
+ ```
+
+1. Access any other URL that has not been explicitly exposed. You should
+ see a HTTP 404 error
+
+ ```command
+ $ curl -I http://$INGRESS_HOST/headers
+ HTTP/1.1 404 Not Found
+ date: Mon, 29 Jan 2018 04:45:49 GMT
+ server: envoy
+ content-length: 0
+ ```
+
+### Configuring secure Ingress (HTTPS)
+
+1. Create a Kubernetes `Secret` to hold the key/cert
+
+ Create the secret `istio-ingress-certs` in namespace `istio-system` using `kubectl`. The Istio ingress controller
+ will automatically load the secret.
+
+ > The secret MUST be called `istio-ingress-certs` in the `istio-system` namespace, or it will not
+ be mounted and available to the Istio ingress controller.
+
+ ```command
+ $ kubectl create -n istio-system secret tls istio-ingress-certs --key /tmp/tls.key --cert /tmp/tls.crt
+ ```
+
+ Note that by default all service accounts in the `istio-system` namespace can access this ingress key/cert,
+ which risks leaking the key/cert. You can change the Role-Based Access Control (RBAC) rules to protect them.
+ See (Link TBD) for details.
+
+1. Create the `Ingress` specification for the httpbin service
+
+ ```bash
+ cat < Because SNI is not yet supported, Envoy currently only allows a single TLS secret in the ingress.
+ > That means the secretName field in ingress resource is not used.
+
+### Verifying secure Ingress
+
+1. Determine the ingress URL:
+
+ * If your cluster is running in an environment that supports external load balancers,
+ use the ingress' external address:
+
+ ```command
+ $ kubectl get ingress secure-ingress -o wide
+ NAME HOSTS ADDRESS PORTS AGE
+ secure-ingress * 130.211.10.121 80 1d
+ ```
+
+ ```command
+ $ export SECURE_INGRESS_HOST=130.211.10.121
+ ```
+
+ * If load balancers are not supported, use the ingress controller pod's hostIP:
+
+ ```command
+ $ kubectl -n istio-system get po -l istio=ingress -o jsonpath='{.items[0].status.hostIP}'
+ 169.47.243.100
+ ```
+
+ along with the istio-ingress service's nodePort for port 443:
+
+ ```command
+ $ kubectl -n istio-system get svc istio-ingress
+ NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+ istio-ingress 10.10.10.155 80:31486/TCP,443:32254/TCP 32m
+ ```
+
+ ```command
+ $ export SECURE_INGRESS_HOST=169.47.243.100:32254
+ ```
+
+1. Access the httpbin service using _curl_:
+
+ ```command
+ $ curl -I -k https://$SECURE_INGRESS_HOST/status/200
+ HTTP/1.1 200 OK
+ server: envoy
+ date: Mon, 29 Jan 2018 04:45:49 GMT
+ content-type: text/html; charset=utf-8
+ access-control-allow-origin: *
+ access-control-allow-credentials: true
+ content-length: 0
+ x-envoy-upstream-service-time: 96
+ ```
+
+1. Access any other URL that has not been explicitly exposed. You should
+ see a HTTP 404 error
+
+ ```command
+ $ curl -I -k https://$SECURE_INGRESS_HOST/headers
+ HTTP/1.1 404 Not Found
+ date: Mon, 29 Jan 2018 04:45:49 GMT
+ server: envoy
+ content-length: 0
+ ```
+
+## Understanding what happened
+
+`Gateway` or `Ingress` configuration resources allow external traffic to enter the
+Istio service mesh and make the traffic management and policy features of Istio
+available for edge services.
+
+In the preceding steps we created a service inside the Istio service mesh
+and showed how to expose both HTTP and HTTPS endpoints of the service to
+external traffic. Using an Istio `Gateway` provides significantly more functionality
+and is recommended. Using a Kubernetes `Ingress`, however, is also supported
+and may be especially useful when moving existing Kubernetes applications to Istio.
+
+## Cleanup
+
+1. Remove the `Gateway` configuration.
+
+ ```command
+ $ kubectl delete gateway httpbin-gateway
+ ```
+
+1. Remove the `Ingress` configuration.
+
+ ```command
+ $ kubectl delete ingress simple-ingress secure-ingress
+ ```
+
+1. Remove the routing rule and secret.
+
+ ```command
+ $ istioctl delete virtualservice httpbin
+ $ kubectl delete -n istio-system secret istio-ingress-certs
+ ```
+
+1. Shutdown the [httpbin](https://github.com/istio/istio/tree/master/samples/httpbin) service.
+
+ ```command
+ $ kubectl delete -f samples/httpbin/httpbin.yaml
+ ```
+
+## What's next
+
+* Learn more about [Ingress Control](https://kubernetes.io/docs/concepts/services-networking/ingress/).
+
+* Learn more about [Traffic Routing]({{home}}/docs/reference/config/istio.networking.v1alpha3.html).
diff --git a/_docs/tasks/traffic-management-v1alpha3/mirroring.md b/_docs/tasks/traffic-management-v1alpha3/mirroring.md
new file mode 100644
index 0000000000000..1b662f5b969e1
--- /dev/null
+++ b/_docs/tasks/traffic-management-v1alpha3/mirroring.md
@@ -0,0 +1,249 @@
+---
+title: Mirroring
+description: This task demonstrates the traffic shadowing/mirroring capabilities of Istio
+
+weight: 60
+
+---
+{% include home.html %}
+
+This task demonstrates the traffic shadowing/mirroring capabilities of Istio. Traffic mirroring is a powerful concept that allows feature teams to bring changes to production with as little risk as possible. Mirroring brings a copy of live traffic to a mirrored service and happens out of band of the critical request path for the primary service.
+
+## Before you begin
+
+* Setup Istio by following the instructions in the
+ [Installation guide]({{home}}/docs/setup/).
+
+* Start two versions of the `httpbin` service that have access logging enabled
+
+httpbin-v1:
+
+```bash
+cat <
+```
+
+1. Change the route rule to mirror traffic to v2
+
+```bash
+cat < This task assumes you don't have any routes set yet. If you've already created conflicting route rules for the sample,
+you'll need to use `replace` rather than `create` in the following command.
+
+1. Set the default version for all microservices to v1.
+
+ ```command
+ $ istioctl create -f samples/bookinfo/routing/route-rule-all-v1.yaml
+ ```
+
+ > In a Kubernetes deployment of Istio, you can replace `istioctl`
+ > with `kubectl` in the above, and for all other CLI commands.
+ > Note, however, that `kubectl` currently does not provide input validation.
+
+ You can display the routes that are defined with the following command:
+
+ ```command-output-as-yaml
+ $ istioctl get virtualservices -o yaml
+ apiVersion: networking.istio.io/v1alpha3
+ kind: VirtualService
+ metadata:
+ name: details
+ ...
+ spec:
+ hosts:
+ - details
+ http:
+ - route:
+ - destination:
+ name: details
+ subset: v1
+ ---
+ apiVersion: networking.istio.io/v1alpha3
+ kind: VirtualService
+ metadata:
+ name: productpage
+ ...
+ spec:
+ gateways:
+ - bookinfo-gateway
+ - mesh
+ hosts:
+ - productpage
+ http:
+ - route:
+ - destination:
+ name: productpage
+ subset: v1
+ ---
+ apiVersion: networking.istio.io/v1alpha3
+ kind: VirtualService
+ metadata:
+ name: ratings
+ ...
+ spec:
+ hosts:
+ - ratings
+ http:
+ - route:
+ - destination:
+ name: ratings
+ subset: v1
+ ---
+ apiVersion: networking.istio.io/v1alpha3
+ kind: VirtualService
+ metadata:
+ name: reviews
+ ...
+ spec:
+ hosts:
+ - reviews
+ http:
+ - route:
+ - destination:
+ name: reviews
+ subset: v1
+ ---
+ ```
+
+ > The corresponding `subset` definitions can be displayed using `istioctl get destinationrules -o yaml`.
+
+ Since rule propagation to the proxies is asynchronous, you should wait a few seconds for the rules
+ to propagate to all pods before attempting to access the application.
+
+1. Open the Bookinfo URL (http://$GATEWAY_URL/productpage) in your browser
+
+ You should see the Bookinfo application productpage displayed.
+ Notice that the `productpage` is displayed with no rating stars since `reviews:v1` does not access the ratings service.
+
+1. Route a specific user to `reviews:v2`
+
+ Lets enable the ratings service for test user "jason" by routing productpage traffic to
+ `reviews:v2` instances.
+
+ ```command
+ $ istioctl replace -f samples/bookinfo/routing/route-rule-reviews-test-v2.yaml
+ ```
+
+ Confirm the rule is created:
+
+ ```command-output-as-yaml
+ $ istioctl get virtualservice reviews -o yaml
+ apiVersion: networking.istio.io/v1alpha3
+ kind: VirtualService
+ metadata:
+ name: reviews
+ ...
+ spec:
+ hosts:
+ - reviews
+ http:
+ - match:
+ - headers:
+ cookie:
+ regex: ^(.*?;)?(user=jason)(;.*)?$
+ route:
+ - destination:
+ name: reviews
+ subset: v2
+ - route:
+ - destination:
+ name: reviews
+ subset: v1
+ ```
+
+1. Log in as user "jason" at the `productpage` web page.
+
+ You should now see ratings (1-5 stars) next to each review. Notice that if you log in as
+ any other user, you will continue to see `reviews:v1`.
+
+## Understanding what happened
+
+In this task, you used Istio to send 100% of the traffic to the v1 version of each of the Bookinfo
+services. You then set a rule to selectively send traffic to version v2 of the reviews service based
+on a header (i.e., a user cookie) in a request.
+
+Once the v2 version has been tested to our satisfaction, we could use Istio to send traffic from
+all users to v2, optionally in a gradual fashion. We'll explore this in a separate task.
+
+## Cleanup
+
+* Remove the application routing rules.
+
+ ```command
+ $ istioctl delete -f samples/bookinfo/routing/route-rule-all-v1.yaml
+ ```
+
+* If you are not planning to explore any follow-on tasks, refer to the
+ [Bookinfo cleanup]({{home}}/docs/guides/bookinfo.html#cleanup) instructions
+ to shutdown the application.
+
+## What's next
+
+* Learn more about [request routing]({{home}}/docs/concepts/traffic-management/rules-configuration.html).
diff --git a/_docs/tasks/traffic-management-v1alpha3/request-timeouts.md b/_docs/tasks/traffic-management-v1alpha3/request-timeouts.md
new file mode 100644
index 0000000000000..b5c38dacca217
--- /dev/null
+++ b/_docs/tasks/traffic-management-v1alpha3/request-timeouts.md
@@ -0,0 +1,146 @@
+---
+title: Setting Request Timeouts
+description: This task shows you how to setup request timeouts in Envoy using Istio.
+
+weight: 28
+
+---
+{% include home.html %}
+
+This task shows you how to setup request timeouts in Envoy using Istio.
+
+## Before you begin
+
+* Setup Istio by following the instructions in the
+ [Installation guide]({{home}}/docs/setup/).
+
+* Deploy the [Bookinfo]({{home}}/docs/guides/bookinfo.html) sample application.
+
+* Initialize the application version routing by running the following command:
+
+ ```command
+ $ istioctl create -f samples/bookinfo/routing/route-rule-all-v1.yaml
+ ```
+
+## Request timeouts
+
+A timeout for http requests can be specified using the *httpReqTimeout* field of a routing rule.
+By default, the timeout is 15 seconds, but in this task we'll override the `reviews` service
+timeout to 1 second.
+To see its effect, however, we'll also introduce an artificial 2 second delay in calls
+to the `ratings` service.
+
+1. Route requests to v2 of the `reviews` service, i.e., a version that calls the `ratings` service
+
+ ```bash
+ cat < With the current Envoy sidecar implementation, you may need to refresh the `productpage` very many times
+ > to see the proper distribution. It may require 15 refreshes or more before you see any change. You can modify the rules to route 90% of the traffic to v3 to see red stars more often.
+
+1. When version v3 of the `reviews` microservice is considered stable, we can route 100% of the traffic to `reviews:v3`:
+
+ ```command
+ $ istioctl replace -f samples/bookinfo/routing/route-rule-reviews-v3.yaml
+ ```
+
+ You can now log into the `productpage` as any user and you should always see book reviews
+ with *red* colored star ratings for each review.
+
+## Understanding what happened
+
+In this task we migrated traffic from an old to new version of the `reviews` service using Istio's
+weighted routing feature. Note that this is very different than version migration using deployment features
+of container orchestration platforms, which use instance scaling to manage the traffic.
+With Istio, we can allow the two versions of the `reviews` service to scale up and down independently,
+without affecting the traffic distribution between them.
+For more about version routing with autoscaling, check out [Canary Deployments using Istio]({{home}}/blog/canary-deployments-using-istio.html).
+
+## Cleanup
+
+* Remove the application routing rules.
+
+ ```command
+ $ istioctl delete -f samples/bookinfo/routing/route-rule-all-v1.yaml
+ ```
+
+* If you are not planning to explore any follow-on tasks, refer to the
+ [Bookinfo cleanup]({{home}}/docs/guides/bookinfo.html#cleanup) instructions
+ to shutdown the application.
+
+## What's next
+
+* Learn more about [request routing]({{home}}/docs/concepts/traffic-management/request-routing.html).
diff --git a/_docs/tasks/traffic-management/circuit-breaking.md b/_docs/tasks/traffic-management/circuit-breaking.md
index 7a8f949b39ab7..1da7e6a7a4dd7 100644
--- a/_docs/tasks/traffic-management/circuit-breaking.md
+++ b/_docs/tasks/traffic-management/circuit-breaking.md
@@ -1,12 +1,11 @@
---
title: Circuit Breaking
-overview: This task demonstrates the circuit-breaking capability for resilient applications
+description: This task demonstrates the circuit-breaking capability for resilient applications
-order: 50
+weight: 50
-layout: docs
-type: markdown
---
+{% include home.html %}
This task demonstrates the circuit-breaking capability for resilient applications. Circuit breaking allows developers to write applications that limit the impact of failures, latency spikes, and other undesirable effects of network peculiarities. This task will show how to configure circuit breaking for connections, requests, and outlier detection.
@@ -14,13 +13,13 @@ This task demonstrates the circuit-breaking capability for resilient application
* Setup Istio by following the instructions in the
[Installation guide]({{home}}/docs/setup/).
-
+
* Start the [httpbin](https://github.com/istio/istio/tree/master/samples/httpbin) sample
which will be used as the backend service for our task
-
- ```bash
- kubectl apply -f <(istioctl kube-inject --debug -f samples/httpbin/httpbin.yaml)
- ```
+
+ ```command
+ $ kubectl apply -f <(istioctl kube-inject --debug -f samples/httpbin/httpbin.yaml)
+ ```
## Circuit breaker
@@ -31,60 +30,38 @@ Let's set up a scenario to demonstrate the circuit-breaking capabilities of Isti
1. Create a default route rule to route all traffic to `v1` of our `httpbin` service:
- ```bash
- istioctl create -f samples/httpbin/routerules/httpbin-v1.yaml
+ ```command
+ $ istioctl create -f samples/httpbin/routerules/httpbin-v1.yaml
```
-2. Create a [destination policy]({{home}}/docs/reference/config/istio.routing.v1alpha1.html#CircuitBreaker) to specify our circuit breaking settings when calling `httpbin` service:
-
- ```bash
- cat <2 procs, for 5s: http://httpbin:8000/get
Starting at max qps with 2 thread(s) [gomax 2] for exactly 20 calls (10 per thread + 0)
23:51:10 W http.go:617> Parsed non ok code 503 (HTTP/1.1 503)
@@ -147,20 +122,18 @@ Response Header Sizes : count 20 avg 218.85 +/- 50.21 min 0 max 231 sum 4377
Response Body/Total Sizes : count 20 avg 652.45 +/- 99.9 min 217 max 676 sum 13049
All done 20 calls (plus 0 warmup) 10.215 ms avg, 187.8 qps
```
-
-We see almost all requests made it through!
-```
+We see almost all requests made it through!
+
+```plain
Code 200 : 19 (95.0 %)
Code 503 : 1 (5.0 %)
```
The istio-proxy does allow for some leeway. Let's bring the number of concurrent connections up to 3:
-```bash
-kubectl exec -it $FORTIO_POD -c fortio /usr/local/bin/fortio -- load -c 3 -qps 0 -n 20 -loglevel Warning http://httpbin:8000/get
-```
-```
+```command
+$ kubectl exec -it $FORTIO_POD -c fortio /usr/local/bin/fortio -- load -c 3 -qps 0 -n 20 -loglevel Warning http://httpbin:8000/get
Fortio 0.6.2 running at 0 queries per second, 2->2 procs, for 5s: http://httpbin:8000/get
Starting at max qps with 3 thread(s) [gomax 2] for exactly 30 calls (10 per thread + 0)
23:51:51 W http.go:617> Parsed non ok code 503 (HTTP/1.1 503)
@@ -202,39 +175,37 @@ All done 30 calls (plus 0 warmup) 5.336 ms avg, 422.2 qps
Now we start to see the circuit breaking behavior we expect.
-```
+```plain
Code 200 : 19 (63.3 %)
Code 503 : 11 (36.7 %)
```
Only 63.3% of the requests made it through and the rest were trapped by circuit breaking. We can query the istio-proxy stats to see more:
-```bash
-kubectl exec -it $FORTIO_POD -c istio-proxy -- sh -c 'curl localhost:15000/stats' | grep httpbin | grep pending
-```
-```
+```command
+$ kubectl exec -it $FORTIO_POD -c istio-proxy -- sh -c 'curl localhost:15000/stats' | grep httpbin | grep pending
cluster.out.httpbin.springistio.svc.cluster.local|http|version=v1.upstream_rq_pending_active: 0
cluster.out.httpbin.springistio.svc.cluster.local|http|version=v1.upstream_rq_pending_failure_eject: 0
cluster.out.httpbin.springistio.svc.cluster.local|http|version=v1.upstream_rq_pending_overflow: 12
cluster.out.httpbin.springistio.svc.cluster.local|http|version=v1.upstream_rq_pending_total: 39
-```
-
-We see `12` for the `upstream_rq_pending_overflow` value which means `12` calls so far have been flagged for circuit breaking.
+```
+
+We see `12` for the `upstream_rq_pending_overflow` value which means `12` calls so far have been flagged for circuit breaking.
## Cleaning up
1. Remove the rules.
-
- ```bash
- istioctl delete routerule httpbin-default-v1
- istioctl delete destinationpolicy httpbin-circuit-breaker
+
+ ```command
+ $ istioctl delete routerule httpbin-default-v1
+ $ istioctl delete destinationpolicy httpbin-circuit-breaker
```
1. Shutdown the [httpbin](https://github.com/istio/istio/tree/master/samples/httpbin) service and client.
- ```bash
- kubectl delete deploy httpbin fortio-deploy
- kubectl delete svc httpbin
+ ```command
+ $ kubectl delete deploy httpbin fortio-deploy
+ $ kubectl delete svc httpbin
```
## What's next
diff --git a/_docs/tasks/traffic-management/egress-tcp.md b/_docs/tasks/traffic-management/egress-tcp.md
index a9f3982a11508..dbf38b09d059a 100644
--- a/_docs/tasks/traffic-management/egress-tcp.md
+++ b/_docs/tasks/traffic-management/egress-tcp.md
@@ -1,11 +1,9 @@
---
title: Control Egress TCP Traffic
-overview: Describes how to configure Istio to route TCP traffic from services in the mesh to external services.
+description: Describes how to configure Istio to route TCP traffic from services in the mesh to external services.
-order: 41
+weight: 41
-layout: docs
-type: markdown
---
{% include home.html %}
@@ -20,13 +18,14 @@ This task describes how to configure Istio to expose external TCP services to ap
* Start the [sleep](https://github.com/istio/istio/tree/master/samples/sleep) sample application which will be used as a test source for external calls.
- ```bash
- kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml)
+ ```command
+ $ kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml)
```
- **Note**: any pod that you can execute `curl` from is good enough.
+ > any pod that you can execute `curl` from is good enough.
## Using Istio egress rules for external TCP traffic
+
In this task we access `wikipedia.org` by HTTPS originated by the application. This task demonstrates the use case when the application cannot use HTTP with TLS origination by the sidecar proxy. Using HTTP with TLS origination by the sidecar proxy is described in the [Control Egress Traffic]({{home}}/docs/tasks/traffic-management/egress.html) task. In that task, `https://google.com` was accessed by issuing HTTP requests to `http://www.google.com:443`.
The HTTPS traffic originated by the application will be treated by Istio as _opaque_ TCP. To enable such traffic, we define a TCP egress rule on port 443.
@@ -38,7 +37,9 @@ Let's assume for the sake of the example that we want to access `wikipedia.org`
Alternatively, if we want to access `wikipedia.org` by an IP, just a single egress rule for that IP must be defined.
## Creating egress rules
+
Let's create egress rules to enable TCP access to `wikipedia.org`:
+
```bash
cat <5,563,121 articles in English
```
@@ -131,14 +127,14 @@ This command will create five egress rules, a rule per different block of IPs of
1. Remove the egress rules we created.
- ```bash
- istioctl delete egressrule wikipedia-range1 wikipedia-range2 wikipedia-range3 wikipedia-range4 wikipedia-range5 -n default
+ ```command
+ $ istioctl delete egressrule wikipedia-range1 wikipedia-range2 wikipedia-range3 wikipedia-range4 wikipedia-range5 -n default
```
1. Shutdown the [sleep](https://github.com/istio/istio/tree/master/samples/sleep) application.
- ```bash
- kubectl delete -f samples/sleep/sleep.yaml
+ ```command
+ $ kubectl delete -f samples/sleep/sleep.yaml
```
## What's next
diff --git a/_docs/tasks/traffic-management/egress.md b/_docs/tasks/traffic-management/egress.md
index 8e4973abc6e9d..22bae2f86b717 100644
--- a/_docs/tasks/traffic-management/egress.md
+++ b/_docs/tasks/traffic-management/egress.md
@@ -1,18 +1,17 @@
---
title: Control Egress Traffic
-overview: Describes how to configure Istio to route traffic from services in the mesh to external services.
+description: Describes how to configure Istio to route traffic from services in the mesh to external services.
-order: 40
+weight: 40
-layout: docs
-type: markdown
+redirect_from: /docs/tasks/egress.html
---
{% include home.html %}
By default, Istio-enabled services are unable to access URLs outside of the cluster because
iptables is used in the pod to transparently redirect all outbound traffic to the sidecar proxy,
which only handles intra-cluster destinations.
-
+
This task describes how to configure Istio to expose external services to Istio-enabled clients.
You'll learn how to enable access to external services using egress rules,
or alternatively, to simply bypass the Istio proxy for a specific range of IPs.
@@ -24,9 +23,9 @@ or alternatively, to simply bypass the Istio proxy for a specific range of IPs.
* Start the [sleep](https://github.com/istio/istio/tree/master/samples/sleep) sample
which will be used as a test source for external calls.
-
- ```bash
- kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml)
+
+ ```command
+ $ kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml)
```
Note that any pod that you can `exec` and `curl` from would do.
@@ -34,8 +33,8 @@ or alternatively, to simply bypass the Istio proxy for a specific range of IPs.
## Using Istio egress rules
Using Istio egress rules, you can access any publicly accessible service
-from within your Istio cluster. In this task we will use
-[httpbin.org](http://httpbin.org) and [www.google.com](http://www.google.com) as examples.
+from within your Istio cluster. In this task we will use
+[httpbin.org](https://httpbin.org) and [www.google.com](https://www.google.com) as examples.
### Configuring the external services
@@ -56,7 +55,7 @@ from within your Istio cluster. In this task we will use
EOF
```
-2. Create an egress rule to allow access to an external HTTPS service:
+1. Create an egress rule to allow access to an external HTTPS service:
```bash
cat < --zone | grep -e clusterIpv4Cidr -e servicesIpv4Cidr
clusterIpv4Cidr: 10.4.0.0/14
servicesIpv4Cidr: 10.7.240.0/20
```
-```bash
-kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml --includeIPRanges=10.4.0.0/14,10.7.240.0/20)
+
+```command
+$ kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml --includeIPRanges=10.4.0.0/14,10.7.240.0/20)
```
On Azure Container Service(ACS), use:
-```bash
-kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml --includeIPRanges=10.244.0.0/16,10.240.0.0/16)
+```command
+$ kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml --includeIPRanges=10.244.0.0/16,10.240.0.0/16)
```
-
After starting your service this way, the Istio sidecar will only intercept and manage internal requests
within the cluster. Any external request will simply bypass the sidecar and go straight to its intended
destination.
-```bash
-export SOURCE_POD=$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name})
-kubectl exec -it $SOURCE_POD -c sleep curl http://httpbin.org/headers
+```command
+$ export SOURCE_POD=$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name})
+$ kubectl exec -it $SOURCE_POD -c sleep curl http://httpbin.org/headers
```
-
## Understanding what happened
In this task we looked at two ways to call external services from within an Istio cluster:
1. Using an egress rule (recommended)
-2. Configuring the Istio sidecar to exclude external IPs from its remapped IP table
+1. Configuring the Istio sidecar to exclude external IPs from its remapped IP table
The first approach (egress rule) currently only supports HTTP(S) requests, but allows
-you to use all of the same Istio service mesh features for calls to services within or outside
+you to use all of the same Istio service mesh features for calls to services within or outside
of the cluster. We demonstrated this by setting a timeout rule for calls to an external service.
The second approach bypasses the Istio sidecar proxy, giving your services direct access to any
external URL. However, configuring the proxy this way does require
cloud provider specific knowledge and configuration.
-
## Cleanup
1. Remove the rules.
-
- ```bash
- istioctl delete egressrule httpbin-egress-rule google-egress-rule
- istioctl delete routerule httpbin-timeout-rule
+
+ ```command
+ $ istioctl delete egressrule httpbin-egress-rule google-egress-rule
+ $ istioctl delete routerule httpbin-timeout-rule
```
1. Shutdown the [sleep](https://github.com/istio/istio/tree/master/samples/sleep) service.
- ```bash
- kubectl delete -f samples/sleep/sleep.yaml
+ ```command
+ $ kubectl delete -f samples/sleep/sleep.yaml
```
## Egress Rules and Access Control
+
Note that Istio Egress Rules are **not a security feature**. They enable access to external (out of the service mesh) services. It is up to the user to deploy appropriate security mechanisms such as firewalls to prevent unauthorized access to the external services. We are working on adding access control support for the external services.
## What's next
diff --git a/_docs/tasks/traffic-management/fault-injection.md b/_docs/tasks/traffic-management/fault-injection.md
index 43e2dd0643369..545449f972d12 100644
--- a/_docs/tasks/traffic-management/fault-injection.md
+++ b/_docs/tasks/traffic-management/fault-injection.md
@@ -1,11 +1,9 @@
---
title: Fault Injection
-overview: This task shows how to inject delays and test the resiliency of your application.
+description: This task shows how to inject delays and test the resiliency of your application.
-order: 20
+weight: 20
-layout: docs
-type: markdown
---
{% include home.html %}
@@ -21,24 +19,25 @@ This task shows how to inject delays and test the resiliency of your application
* Initialize the application version routing by either first doing the
[request routing](./request-routing.html) task or by running following
commands:
-
- > Note: This assumes you don't have any routes set yet. If you've already created conflicting route rules for the sample, you'll need to use `replace` rather than `create` in one or both of the following commands.
- ```bash
- istioctl create -f samples/bookinfo/kube/route-rule-all-v1.yaml
- istioctl create -f samples/bookinfo/kube/route-rule-reviews-test-v2.yaml
+ > This assumes you don't have any routes set yet. If you've already created conflicting route rules for the sample, you'll need to use `replace` rather than `create` in one or both of the following commands.
+
+ ```command
+ $ istioctl create -f samples/bookinfo/kube/route-rule-all-v1.yaml
+ $ istioctl create -f samples/bookinfo/kube/route-rule-reviews-test-v2.yaml
```
-> Note: This task assumes you are deploying the application on Kubernetes.
- All of the example commands are using the Kubernetes version of the rule yaml files
- (e.g., `samples/bookinfo/kube/route-rule-all-v1.yaml`). If you are running this
- task in a different environment, change `kube` to the directory that corresponds
- to your runtime (e.g., `samples/bookinfo/consul/route-rule-all-v1.yaml` for
- the Consul-based runtime).
+> This task assumes you are deploying the application on Kubernetes.
+All of the example commands are using the Kubernetes version of the rule yaml files
+(e.g., `samples/bookinfo/kube/route-rule-all-v1.yaml`). If you are running this
+task in a different environment, change `kube` to the directory that corresponds
+to your runtime (e.g., `samples/bookinfo/consul/route-rule-all-v1.yaml` for
+the Consul-based runtime).
# Fault injection
## Fault injection using HTTP delay
+
To test our Bookinfo application microservices for resiliency, we will _inject a 7s delay_
between the reviews:v2 and ratings microservices, for user "jason". Since the _reviews:v2_ service has a
10s timeout for its calls to the ratings service, we expect the end-to-end flow to
@@ -46,16 +45,14 @@ continue without any errors.
1. Create a fault injection rule to delay traffic coming from user "jason" (our test user)
- ```bash
- istioctl create -f samples/bookinfo/kube/route-rule-ratings-test-delay.yaml
+ ```command
+ $ istioctl create -f samples/bookinfo/kube/route-rule-ratings-test-delay.yaml
```
Confirm the rule is created:
- ```bash
- istioctl get routerule ratings-test-delay -o yaml
- ```
- ```yaml
+ ```command-output-as-yaml
+ $ istioctl get routerule ratings-test-delay -o yaml
apiVersion: config.istio.io/v1alpha2
kind: RouteRule
metadata:
@@ -118,28 +115,27 @@ continue without any errors.
use a 2.8 second delay and then run it against the v3 version of reviews.)
## Fault injection using HTTP Abort
+
As another test of resiliency, we will introduce an HTTP abort to the ratings microservices for the user "jason".
We expect the page to load immediately unlike the delay example and display the "product ratings not available"
message.
1. Remove the fault delay injection rule before attempting the fault abort rule
- ```bash
- istioctl delete -f samples/bookinfo/kube/route-rule-ratings-test-delay.yaml
+ ```command
+ $ istioctl delete -f samples/bookinfo/kube/route-rule-ratings-test-delay.yaml
```
1. Create the fault injection rule to send an HTTP abort for user "jason"
- ```bash
- istioctl create -f samples/bookinfo/kube/route-rule-ratings-test-abort.yaml
+ ```command
+ $ istioctl create -f samples/bookinfo/kube/route-rule-ratings-test-abort.yaml
```
Confirm the rule is created
- ```bash
- istioctl get routerules ratings-test-abort -o yaml
- ```
- ```yaml
+ ```command-output-as-yaml
+ $ istioctl get routerules ratings-test-abort -o yaml
apiVersion: config.istio.io/v1alpha2
kind: RouteRule
metadata:
@@ -174,11 +170,11 @@ message.
* Remove the application routing rules:
- ```bash
- istioctl delete -f samples/bookinfo/kube/route-rule-all-v1.yaml
- istioctl delete -f samples/bookinfo/kube/route-rule-reviews-test-v2.yaml
- istioctl delete -f samples/bookinfo/kube/route-rule-ratings-test-delay.yaml
- istioctl delete -f samples/bookinfo/kube/route-rule-ratings-test-abort.yaml
+ ```command
+ $ istioctl delete -f samples/bookinfo/kube/route-rule-all-v1.yaml
+ $ istioctl delete -f samples/bookinfo/kube/route-rule-reviews-test-v2.yaml
+ $ istioctl delete -f samples/bookinfo/kube/route-rule-ratings-test-delay.yaml
+ $ istioctl delete -f samples/bookinfo/kube/route-rule-ratings-test-abort.yaml
```
* If you are not planning to explore any follow-on tasks, refer to the
diff --git a/_docs/tasks/traffic-management/index.md b/_docs/tasks/traffic-management/index.md
index f027de1ddfa96..ec908bbf6dcca 100644
--- a/_docs/tasks/traffic-management/index.md
+++ b/_docs/tasks/traffic-management/index.md
@@ -1,11 +1,9 @@
---
title: Traffic Management
-overview: Describes tasks that demonstrate traffic routing features of Istio service mesh.
+description: Describes tasks that demonstrate traffic routing features of Istio service mesh.
-order: 10
+weight: 10
-layout: docs
-type: markdown
toc: false
---
diff --git a/_docs/tasks/traffic-management/ingress.md b/_docs/tasks/traffic-management/ingress.md
index 761a43224ef2d..f96da33322356 100644
--- a/_docs/tasks/traffic-management/ingress.md
+++ b/_docs/tasks/traffic-management/ingress.md
@@ -1,51 +1,51 @@
---
title: Istio Ingress
-overview: Describes how to configure Istio Ingress on Kubernetes.
+description: Describes how to configure Istio Ingress on Kubernetes.
-order: 30
+weight: 30
-layout: docs
-type: markdown
+redirect_from: /docs/tasks/ingress.html
---
+{% include home.html %}
This task describes how to configure Istio to expose a service outside of the service mesh cluster.
In a Kubernetes environment, the [Kubernetes Ingress Resource](https://kubernetes.io/docs/concepts/services-networking/ingress/)
allows users to specify services that should be exposed outside the
cluster. It allows one to define a backend service per virtual host and path.
-Once the Istio Ingress specification is defined, the traffic entering the cluster is directed through the `istio-ingress` service. As a result, Istio features, for example, monitoring and route rules, can be applied to the traffic entering the cluster.
+Once the Istio Ingress specification is defined, traffic entering the cluster is directed through the `istio-ingress` service. As a result, Istio features, for example, monitoring and route rules, can be applied to the traffic entering the cluster.
The Istio Ingress specification is based on the standard [Kubernetes Ingress Resource](https://kubernetes.io/docs/concepts/services-networking/ingress/) specification, with the following differences:
1. Istio Ingress specification contains `kubernetes.io/ingress.class: istio` annotation.
-2. All other annotations are ignored.
+1. All other annotations are ignored.
The following are known limitations of Istio Ingress:
1. Regular expressions in paths are not supported.
-2. Fault injection at the Ingress is not supported.
+1. Fault injection at the Ingress is not supported.
## Before you begin
* Setup Istio by following the instructions in the
[Installation guide]({{home}}/docs/setup/).
-
+
* Make sure your current directory is the `istio` directory.
-
+
* Start the [httpbin](https://github.com/istio/istio/tree/master/samples/httpbin) sample,
which will be used as the destination service to be exposed externally.
If you installed the [Istio-Initializer]({{home}}/docs/setup/kubernetes/sidecar-injection.html#automatic-sidecar-injection), do
- ```bash
- kubectl apply -f samples/httpbin/httpbin.yaml
+ ```command
+ $ kubectl apply -f samples/httpbin/httpbin.yaml
```
Without the Istio-Initializer:
- ```bash
- kubectl apply -f <(istioctl kube-inject -f samples/httpbin/httpbin.yaml)
+ ```command
+ $ kubectl apply -f <(istioctl kube-inject -f samples/httpbin/httpbin.yaml)
```
## Configuring ingress (HTTP)
@@ -74,64 +74,51 @@ The following are known limitations of Istio Ingress:
servicePort: 8000
EOF
```
-
+
`/.*` is a special Istio notation that is used to indicate a prefix
match, specifically a
[rule match configuration]({{home}}/docs/reference/config/istio.routing.v1alpha1.html#matchcondition)
of the form (`prefix: /`).
-
-### Verifying ingress
+
+### Verifying HTTP ingress
1. Determine the ingress URL:
- * If your cluster is running in an environment that supports external load balancers,
- use the ingress' external address:
+ * If your cluster is running in an environment that supports external load balancers, use the ingress' external address:
- ```bash
- kubectl get ingress simple-ingress -o wide
- ```
-
- ```bash
- NAME HOSTS ADDRESS PORTS AGE
- simple-ingress * 130.211.10.121 80 1d
- ```
+ ```command
+ $ kubectl get ingress simple-ingress -o wide
+ NAME HOSTS ADDRESS PORTS AGE
+ simple-ingress * 130.211.10.121 80 1d
+ ```
- ```bash
- export INGRESS_HOST=130.211.10.121
- ```
+ ```command
+ $ export INGRESS_HOST=130.211.10.121
+ ```
* If load balancers are not supported, use the ingress controller pod's hostIP:
-
- ```bash
- kubectl -n istio-system get po -l istio=ingress -o jsonpath='{.items[0].status.hostIP}'
- ```
- ```bash
- 169.47.243.100
- ```
+ ```command
+ $ kubectl -n istio-system get po -l istio=ingress -o jsonpath='{.items[0].status.hostIP}'
+ 169.47.243.100
+ ```
- along with the istio-ingress service's nodePort for port 80:
-
- ```bash
- kubectl -n istio-system get svc istio-ingress
- ```
-
- ```bash
- NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
- istio-ingress 10.10.10.155 80:31486/TCP,443:32254/TCP 32m
- ```
-
- ```bash
- export INGRESS_HOST=169.47.243.100:31486
- ```
-
-1. Access the httpbin service using _curl_:
+ along with the istio-ingress service's nodePort for port 80:
- ```bash
- curl -I http://$INGRESS_HOST/status/200
+ ```command
+ $ kubectl -n istio-system get svc istio-ingress
+ NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+ istio-ingress 10.10.10.155 80:31486/TCP,443:32254/TCP 32m
```
+ ```command
+ $ export INGRESS_HOST=169.47.243.100:31486
```
+
+1. Access the httpbin service using _curl_:
+
+ ```command
+ $ curl -I http://$INGRESS_HOST/status/200
HTTP/1.1 200 OK
server: envoy
date: Mon, 29 Jan 2018 04:45:49 GMT
@@ -145,11 +132,8 @@ The following are known limitations of Istio Ingress:
1. Access any other URL that has not been explicitly exposed. You should
see a HTTP 404 error
- ```bash
- curl -I http://$INGRESS_HOST/headers
- ```
-
- ```
+ ```command
+ $ curl -I http://$INGRESS_HOST/headers
HTTP/1.1 404 Not Found
date: Mon, 29 Jan 2018 04:45:49 GMT
server: envoy
@@ -162,8 +146,8 @@ The following are known limitations of Istio Ingress:
A private key and certificate can be created for testing using [OpenSSL](https://www.openssl.org/).
- ```bash
- openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /tmp/tls.key -out /tmp/tls.crt -subj "/CN=foo.bar.com"
+ ```command
+ $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /tmp/tls.key -out /tmp/tls.crt -subj "/CN=foo.bar.com"
```
1. Create the secret
@@ -171,10 +155,10 @@ The following are known limitations of Istio Ingress:
Create the secret `istio-ingress-certs` in namespace `istio-system` using `kubectl`. The Istio Ingress will automatically
load the secret.
- > Note: the secret must be called `istio-ingress-certs` in `istio-system` namespace, for it to be mounted on Istio Ingress.
+ > The secret must be called `istio-ingress-certs` in `istio-system` namespace, for it to be mounted on Istio Ingress.
- ```bash
- kubectl create -n istio-system secret tls istio-ingress-certs --key /tmp/tls.key --cert /tmp/tls.crt
+ ```command
+ $ kubectl create -n istio-system secret tls istio-ingress-certs --key /tmp/tls.key --cert /tmp/tls.crt
```
1. Create the Ingress specification for the httpbin service
@@ -204,61 +188,49 @@ The following are known limitations of Istio Ingress:
EOF
```
- > Note: Because SNI is not yet supported, Envoy currently only allows a single TLS secret in the ingress.
+ > Because SNI is not yet supported, Envoy currently only allows a single TLS secret in the ingress.
> That means the secretName field in ingress resource is not used.
-### Verifying ingress
+### Verifying HTTPS ingress
1. Determine the ingress URL:
* If your cluster is running in an environment that supports external load balancers,
use the ingress' external address:
- ```bash
- kubectl get ingress secure-ingress -o wide
- ```
-
- ```bash
+ ```command
+ $ kubectl get ingress secure-ingress -o wide
NAME HOSTS ADDRESS PORTS AGE
secure-ingress * 130.211.10.121 80 1d
```
- ```bash
- export INGRESS_HOST=130.211.10.121
+ ```command
+ $ export INGRESS_HOST=130.211.10.121
```
* If load balancers are not supported, use the ingress controller pod's hostIP:
- ```bash
- kubectl -n istio-system get po -l istio=ingress -o jsonpath='{.items[0].status.hostIP}'
- ```
-
- ```bash
+ ```command
+ $ kubectl -n istio-system get po -l istio=ingress -o jsonpath='{.items[0].status.hostIP}'
169.47.243.100
```
- along with the istio-ingress service's nodePort for port 80:
-
- ```bash
- kubectl -n istio-system get svc istio-ingress
- ```
+ along with the istio-ingress service's nodePort for port 443:
- ```bash
+ ```command
+ $ kubectl -n istio-system get svc istio-ingress
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
istio-ingress 10.10.10.155 80:31486/TCP,443:32254/TCP 32m
```
- ```bash
- export INGRESS_HOST=169.47.243.100:31486
+ ```command
+ $ export INGRESS_HOST=169.47.243.100:32254
```
1. Access the httpbin service using _curl_:
- ```bash
- curl -I -k https://$INGRESS_HOST/status/200
- ```
-
- ```
+ ```command
+ $ curl -I -k https://$INGRESS_HOST/status/200
HTTP/1.1 200 OK
server: envoy
date: Mon, 29 Jan 2018 04:45:49 GMT
@@ -272,11 +244,8 @@ The following are known limitations of Istio Ingress:
1. Access any other URL that has not been explicitly exposed. You should
see a HTTP 404 error
- ```bash
- curl -I -k http://$INGRESS_HOST/headers
- ```
-
- ```
+ ```command
+ $ curl -I -k https://$INGRESS_HOST/headers
HTTP/1.1 404 Not Found
date: Mon, 29 Jan 2018 04:45:49 GMT
server: envoy
@@ -285,18 +254,15 @@ The following are known limitations of Istio Ingress:
1. Configuring RBAC for ingress key/cert
- There are service accounts which can access this ingress key/cert, and this leads to risks of
- leaking key/cert. We can set up Role-Based Access Control ("RBAC") to protect it.
- install/kubernetes/istio.yaml defines ClusterRoles and ClusterRoleBindings which allow service
- accounts in namespace istio-system to access all secret resources. We need to update or replace
+ There are service accounts which can access this ingress key/cert, and this leads to risks of
+ leaking key/cert. We can set up Role-Based Access Control ("RBAC") to protect it.
+ install/kubernetes/istio.yaml defines `ClusterRoles` and `ClusterRoleBindings` which allow service
+ accounts in namespace istio-system to access all secret resources. We need to update or replace
these RBAC set up to only allow istio-ingress-service-account to access ingress key/cert.
- We can use kubectl to list all secrets in namespace istio-system that we need to protect using RBAC.
- ```bash
- kubectl get secrets -n istio-system
- ```
- This produces the following output:
- ```bash
+ We can use `kubectl` to list all secrets in namespace istio-system that we need to protect using RBAC.
+ ```command
+ $ kubectl get secrets -n istio-system
NAME TYPE DATA AGE
istio-ingress-certs kubernetes.io/tls 2 7d
istio.istio-ingress-service-account istio.io/key-and-cert 3 7d
@@ -304,42 +270,42 @@ The following are known limitations of Istio Ingress:
```
1. Update RBAC set up for istio-pilot-service-account and istio-mixer-istio-service-account
-
- Record ClusterRole istio-mixer-istio-system and istio-pilot-istio-system. We will refer to
+
+ Record `ClusterRole` istio-mixer-istio-system and istio-pilot-istio-system. We will refer to
these copies when we redefine them to avoid breaking access permissions to other resources.
- ```bash
- kubectl describe ClusterRole istio-mixer-istio-system
- kubectl describe ClusterRole istio-pilot-istio-system
+ ```command
+ $ kubectl describe ClusterRole istio-mixer-istio-system
+ $ kubectl describe ClusterRole istio-pilot-istio-system
```
- Delete existing ClusterRoleBindings and ClusterRole.
+ Delete existing `ClusterRoleBindings` and `ClusterRole`.
- ```bash
- kubectl delete ClusterRoleBinding istio-pilot-admin-role-binding-istio-system
- kubectl delete ClusterRoleBinding istio-mixer-admin-role-binding-istio-system
- kubectl delete ClusterRole istio-mixer-istio-system
+ ```command
+ $ kubectl delete ClusterRoleBinding istio-pilot-admin-role-binding-istio-system
+ $ kubectl delete ClusterRoleBinding istio-mixer-admin-role-binding-istio-system
+ $ kubectl delete ClusterRole istio-mixer-istio-system
```
- As istio-pilot-istio-system is also bound to istio-ingress-service-account, we will delete
- istio-pilot-istio-system in next step.
-
- Create istio-mixer-istio-system.yaml, which allows istio-mixer-service-account to read
- istio.io/key-and-cert, and istio.io/ca-root types of secret instances. Refer to the recorded
- copy of istio-mixer-istio-system and add access permissions to other resources.
-
- ```bash
+ As istio-pilot-istio-system is also bound to istio-ingress-service-account, we will delete
+ istio-pilot-istio-system in next step.
+
+ Create istio-mixer-istio-system.yaml, which allows istio-mixer-service-account to read
+ istio.io/key-and-cert, and istio.io/ca-root types of secret instances. Refer to the recorded
+ copy of istio-mixer-istio-system and add access permissions to other resources.
+
+ ```yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: istio-mixer-istio-system
- rules:
+ rules:
- apiGroups: [""] # "" indicates the core API group
resources: ["secrets"]
- resourceNames: ["istio.istio-ca-service-account"]
+ resourceNames: ["istio.istio-citadel-service-account"]
verbs: ["get", "list", "watch"]
- apiGroups: [""] # "" indicates the core API group
resources: ["secrets"]
- resourceNames: ["istio-ca-secret"]
+ resourceNames: ["istio-citadel-secret"]
verbs: ["get", "list", "watch"]
- ......
+ ......
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
@@ -354,39 +320,39 @@ The following are known limitations of Istio Ingress:
name: istio-mixer-istio-system
apiGroup: rbac.authorization.k8s.io
```
-
- ```bash
- kubectl apply -f istio-mixer-istio-system.yaml
+
+ ```command
+ $ kubectl apply -f istio-mixer-istio-system.yaml
```
1. Update RBAC set up for istio-pilot-service-account and istio-ingress-service-account
- Delete existing ClusterRoleBinding and ClusterRole.
-
- ```bash
- kubectl delete clusterrolebinding istio-ingress-admin-role-binding-istio-system
- kubectl delete ClusterRole istio-pilot-istio-system
+ Delete existing `ClusterRoleBinding` and `ClusterRole`.
+
+ ```command
+ $ kubectl delete clusterrolebinding istio-ingress-admin-role-binding-istio-system
+ $ kubectl delete ClusterRole istio-pilot-istio-system
```
-
- Create istio-pilot-istio-system.yaml, which allows istio-pilot-service-account to read
- istio.io/key-and-cert, and istio.io/ca-root types of secret instances. Refer to the recorded
- copy of istio-pilot-istio-system and add access permissions to other resources.
-
- ```bash
+
+ Create istio-pilot-istio-system.yaml, which allows istio-pilot-service-account to read
+ istio.io/key-and-cert, and istio.io/ca-root types of secret instances. Refer to the recorded
+ copy of istio-pilot-istio-system and add access permissions to other resources.
+
+ ```yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: istio-pilot-istio-system
- rules:
+ rules:
- apiGroups: [""] # "" indicates the core API group
resources: ["secrets"]
- resourceNames: ["istio.istio-ca-service-account"]
+ resourceNames: ["istio.istio-citadel-service-account"]
verbs: ["get", "list", "watch"]
- apiGroups: [""] # "" indicates the core API group
resources: ["secrets"]
- resourceNames: ["istio-ca-secret"]
+ resourceNames: ["istio-citadel-secret"]
verbs: ["get", "list", "watch"]
- ......
+ ......
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
@@ -401,28 +367,28 @@ The following are known limitations of Istio Ingress:
name: istio-pilot-istio-system
apiGroup: rbac.authorization.k8s.io
```
-
- ```bash
- kubectl apply -f istio-pilot-istio-system.yaml
- ```
-
- Create istio-ingress-istio-system.yaml which allows istio-ingress-service-account to read
- istio-ingress-certs as well as other secret instances. Refer to the recorded copy of
+
+ ```command
+ $ kubectl apply -f istio-pilot-istio-system.yaml
+ ```
+
+ Create istio-ingress-istio-system.yaml which allows istio-ingress-service-account to read
+ istio-ingress-certs as well as other secret instances. Refer to the recorded copy of
istio-pilot-istio-system and add access permissions to other resources.
-
- ```bash
+
+ ```yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: istio-ingress-istio-system
- rules:
+ rules:
- apiGroups: [""] # "" indicates the core API group
resources: ["secrets"]
- resourceNames: ["istio.istio-ca-service-account"]
+ resourceNames: ["istio.istio-citadel-service-account"]
verbs: ["get", "list", "watch"]
- apiGroups: [""] # "" indicates the core API group
resources: ["secrets"]
- resourceNames: ["istio-ca-secret"]
+ resourceNames: ["istio-citadel-secret"]
verbs: ["get", "list", "watch"]
......
- apiGroups: [""] # "" indicates the core API group
@@ -443,92 +409,82 @@ The following are known limitations of Istio Ingress:
name: istio-ingress-istio-system
apiGroup: rbac.authorization.k8s.io
```
-
- ```bash
- kubectl apply -f istio-ingress-istio-system.yaml
+
+ ```command
+ $ kubectl apply -f istio-ingress-istio-system.yaml
```
-
-1. Update RBAC set up for istio-ca-service-account
- Record ClusterRole istio-ca-istio-system.
- ```bash
- kubectl describe ClusterRole istio-ca-istio-system
+1. Update RBAC set up for istio-citadel-service-account
+
+ Record `ClusterRole` istio-citadel-istio-system.
+ ```command
+ $ kubectl describe ClusterRole istio-citadel-istio-system
```
-
- Create istio-ca-istio-system.yaml, which updates existing ClusterRole istio-ca-istio-system
- that allows istio-ca-service-account to read, create and modify all istio.io/key-and-cert, and
+
+ Create istio-citadel-istio-system.yaml, which updates existing `ClusterRole` istio-citadel-istio-system
+ that allows istio-citadel-service-account to read, create and modify all istio.io/key-and-cert, and
istio.io/ca-root types of secrets.
-
- ```bash
+
+ ```yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
- name: istio-ca-istio-system
+ name: istio-citadel-istio-system
rules:
- apiGroups: [""] # "" indicates the core API group
resources: ["secrets"]
- resourceNames: ["istio.istio-ca-service-account"]
+ resourceNames: ["istio.istio-citadel-service-account"]
verbs: ["get", "list", "watch", "create", "update"]
- apiGroups: [""] # "" indicates the core API group
resources: ["secrets"]
- resourceNames: ["istio-ca-secret"]
+ resourceNames: ["istio-citadel-secret"]
verbs: ["get", "list", "watch", "create", "update"]
......
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
- name: istio-ca-role-binding-istio-system
+ name: istio-citadel-role-binding-istio-system
subjects:
- kind: ServiceAccount
- name: istio-ca-service-account
+ name: istio-citadel-service-account
namespace: istio-system
roleRef:
kind: ClusterRole
- name: istio-ca-istio-system
+ name: istio-citadel-istio-system
apiGroup: rbac.authorization.k8s.io
```
- ```bash
- kubectl apply -f istio-ca-istio-system.yaml
- ```
-1. Verify that the new ClusterRoles work as expected
-
- ```bash
- kubectl auth can-i get secret/istio-ingress-certs --as system:serviceaccount:istio-system:istio-ingress-service-account -n istio-system
+ ```command
+ $ kubectl apply -f istio-citadel-istio-system.yaml
```
- whose output should be
- ```bash
+1. Verify that the new `ClusterRoles` work as expected
+
+ ```command
+ $ kubectl auth can-i get secret/istio-ingress-certs --as system:serviceaccount:istio-system:istio-ingress-service-account -n istio-system
yes
```
- In this command, we can replace verb "get" with "list" or "watch", and the output should always
+ In this command, we can replace verb "get" with "list" or "watch", and the output should always
be "yes". Now let us test with other service accounts.
-
- ```bash
- kubectl auth can-i get secret/istio-ingress-certs --as system:serviceaccount:istio-system:istio-pilot-service-account -n istio-system
- ```
- whose output should be
- ```bash
+
+ ```command
+ $ kubectl auth can-i get secret/istio-ingress-certs --as system:serviceaccount:istio-system:istio-pilot-service-account -n istio-system
no - Unknown user "system:serviceaccount:istio-system:istio-pilot-service-account"
```
- In this command, we can replace service account with istio-mixer-service-account, or
- istio-ca-service-account, we can also replace verb "get" with "watch" or "list", and the output
+ In this command, we can replace service account with istio-mixer-service-account, or
+ istio-citadel-service-account, we can also replace verb "get" with "watch" or "list", and the output
should look similarly.
- Accessibility to secret resources except istio-ingress-certs should remain the same for
- istio-ca-service-account, istio-ingress-service-account, istio-pilot-service-account and
+ Accessibility to secret resources except istio-ingress-certs should remain the same for
+ istio-citadel-service-account, istio-ingress-service-account, istio-pilot-service-account and
istio-mixer-service-account.
- ```bash
- kubectl auth can-i get secret/istio-ca-service-account-token-r14xm --as system:serviceaccount:istio-system:istio-ca-service-account -n istio-system
- ```
- whose output should be
- ```bash
+ ```command
+ $ kubectl auth can-i get secret/istio-citadel-service-account-token-r14xm --as system:serviceaccount:istio-system:istio-citadel-service-account -n istio-system
yes
```
-
+
1. Cleanup
- We can delete these newly defined ClusterRoles and ClusterRoleBindings, and restore original
- ClusterRoles and ClusterRoleBindings according to those recorded copies.
-
+ We can delete these newly defined `ClusterRoles` and `ClusterRoleBindings`, and restore original
+ `ClusterRoles` and `ClusterRoleBindings` according to those recorded copies.
## Using Istio Routing Rules with Ingress
@@ -572,13 +528,13 @@ instead of the expected 10s delay.
You can use other features of the route rules such as redirects, rewrites,
routing to multiple versions, regular expression based match in HTTP
-headers, websocket upgrades, timeouts, retries, etc. Please refer to the
+headers, WebSocket upgrades, timeouts, retries, etc. Please refer to the
[routing rules]({{home}}/docs/reference/config/istio.routing.v1alpha1.html)
for more details.
-> Note 1: Fault injection does not work at the Ingress
-
-> Note 2: When matching requests in the routing rule, use the same exact
+> Fault injection does not work at the Ingress
+>
+> When matching requests in the routing rule, use the same exact
> path or prefix as the one used in the Ingress specification.
## Understanding ingresses
@@ -587,7 +543,7 @@ Ingresses provide gateways for external traffic to enter the Istio service
mesh and make the traffic management and policy features of Istio available
for edge services.
-The servicePort field in the Ingress specification can take a port number
+The `servicePort` field in the Ingress specification can take a port number
(integer) or a name. The port name must follow the Istio port naming
conventions (e.g., `grpc-*`, `http2-*`, `http-*`, etc.) in order to
function properly. The name used must match the port name in the backend
@@ -601,19 +557,18 @@ an Istio route rule.
## Cleanup
1. Remove the secret and Ingress Resource definitions.
-
- ```bash
- kubectl delete ingress simple-ingress secure-ingress
- kubectl delete -n istio-system secret istio-ingress-certs
+
+ ```command
+ $ kubectl delete ingress simple-ingress secure-ingress
+ $ kubectl delete -n istio-system secret istio-ingress-certs
```
1. Shutdown the [httpbin](https://github.com/istio/istio/tree/master/samples/httpbin) service.
- ```bash
- kubectl delete -f samples/httpbin/httpbin.yaml
+ ```command
+ $ kubectl delete -f samples/httpbin/httpbin.yaml
```
-
## What's next
* Learn more about [Ingress Resources](https://kubernetes.io/docs/concepts/services-networking/ingress/).
diff --git a/_docs/tasks/traffic-management/mirroring.md b/_docs/tasks/traffic-management/mirroring.md
index 2aec571cdb0bb..265ef8c0df190 100644
--- a/_docs/tasks/traffic-management/mirroring.md
+++ b/_docs/tasks/traffic-management/mirroring.md
@@ -1,21 +1,20 @@
---
title: Mirroring
-overview: This task demonstrates the traffic shadowing/mirroring capabilities of Istio
+description: Demonstrates Istio's traffic shadowing/mirroring capabilities
-order: 60
+weight: 60
-layout: docs
-type: markdown
---
+{% include home.html %}
-This task demonstrates the traffic shadowing/mirroring capabilites of Istio. Traffic mirroring is a powerful concept that allows feature teams to bring changes to production with as little risk as possible. Mirroring brings a copy of live traffic to a mirrored service and happens out of band of the critical request path for the primary service.
-
+This task demonstrates Istio's traffic shadowing/mirroring capabilities. Traffic mirroring is a powerful concept that allows feature teams to bring
+changes to production with as little risk as possible. Mirroring brings a copy of live traffic to a mirrored service and happens out of band of the critical request path for the primary service.
## Before you begin
* Setup Istio by following the instructions in the
[Installation guide]({{home}}/docs/setup/).
-
+
* Start two versions of the `httpbin` service that have access logging enabled
httpbin-v1:
@@ -86,14 +85,12 @@ spec:
selector:
app: httpbin
EOF
-```
-
-
+```
* Start the `sleep` service so we can use `curl` to provide load
sleep service:
- ```bash
+```bash
cat < Note: This task assumes you are deploying the application on Kubernetes.
- All of the example commands are using the Kubernetes version of the rule yaml files
- (e.g., `samples/bookinfo/kube/route-rule-all-v1.yaml`). If you are running this
- task in a different environment, change `kube` to the directory that corresponds
- to your runtime (e.g., `samples/bookinfo/consul/route-rule-all-v1.yaml` for
- the Consul-based runtime).
+> This task assumes you are deploying the application on Kubernetes.
+All of the example commands are using the Kubernetes version of the rule yaml files
+(e.g., `samples/bookinfo/kube/route-rule-all-v1.yaml`). If you are running this
+task in a different environment, change `kube` to the directory that corresponds
+to your runtime (e.g., `samples/bookinfo/consul/route-rule-all-v1.yaml` for
+the Consul-based runtime).
## Content-based routing
@@ -34,25 +33,23 @@ star ratings.
This is because without an explicit default version set, Istio will
route requests to all available versions of a service in a random fashion.
-> Note: This task assumes you don't have any routes set yet. If you've already created conflicting route rules for the sample,
- you'll need to use `replace` rather than `create` in one or both of the following commands.
+> This task assumes you don't have any routes set yet. If you've already created conflicting route rules for the sample,
+you'll need to use `replace` rather than `create` in one or both of the following commands.
1. Set the default version for all microservices to v1.
- ```bash
- istioctl create -f samples/bookinfo/kube/route-rule-all-v1.yaml
+ ```command
+ $ istioctl create -f samples/bookinfo/kube/route-rule-all-v1.yaml
```
- > Note: In a Kubernetes deployment of Istio, you can replace `istioctl`
+ > In a Kubernetes deployment of Istio, you can replace `istioctl`
> with `kubectl` in the above, and for all other CLI commands.
> Note, however, that `kubectl` currently does not provide input validation.
You can display the routes that are defined with the following command:
- ```bash
- istioctl get routerules -o yaml
- ```
- ```yaml
+ ```command-output-as-yaml
+ $ istioctl get routerules -o yaml
apiVersion: config.istio.io/v1alpha2
kind: RouteRule
metadata:
@@ -124,16 +121,14 @@ route requests to all available versions of a service in a random fashion.
Lets enable the ratings service for test user "jason" by routing productpage traffic to
`reviews:v2` instances.
- ```bash
- istioctl create -f samples/bookinfo/kube/route-rule-reviews-test-v2.yaml
+ ```command
+ $ istioctl create -f samples/bookinfo/kube/route-rule-reviews-test-v2.yaml
```
Confirm the rule is created:
- ```bash
- istioctl get routerule reviews-test-v2 -o yaml
- ```
- ```yaml
+ ```command-output-as-yaml
+ $ istioctl get routerule reviews-test-v2 -o yaml
apiVersion: config.istio.io/v1alpha2
kind: RouteRule
metadata:
@@ -172,9 +167,9 @@ all users to v2, optionally in a gradual fashion. We'll explore this in a separa
* Remove the application routing rules.
- ```bash
- istioctl delete -f samples/bookinfo/kube/route-rule-all-v1.yaml
- istioctl delete -f samples/bookinfo/kube/route-rule-reviews-test-v2.yaml
+ ```command
+ $ istioctl delete -f samples/bookinfo/kube/route-rule-all-v1.yaml
+ $ istioctl delete -f samples/bookinfo/kube/route-rule-reviews-test-v2.yaml
```
* If you are not planning to explore any follow-on tasks, refer to the
diff --git a/_docs/tasks/traffic-management/request-timeouts.md b/_docs/tasks/traffic-management/request-timeouts.md
index c2a53883bb0ed..fca08844b098e 100644
--- a/_docs/tasks/traffic-management/request-timeouts.md
+++ b/_docs/tasks/traffic-management/request-timeouts.md
@@ -1,17 +1,14 @@
---
title: Setting Request Timeouts
-overview: This task shows you how to setup request timeouts in Envoy using Istio.
-
-order: 28
+description: This task shows you how to setup request timeouts in Envoy using Istio.
+
+weight: 28
-layout: docs
-type: markdown
---
{% include home.html %}
This task shows you how to setup request timeouts in Envoy using Istio.
-
## Before you begin
* Setup Istio by following the instructions in the
@@ -21,16 +18,16 @@ This task shows you how to setup request timeouts in Envoy using Istio.
* Initialize the application version routing by running the following command:
- ```bash
- istioctl create -f samples/bookinfo/kube/route-rule-all-v1.yaml
+ ```command
+ $ istioctl create -f samples/bookinfo/kube/route-rule-all-v1.yaml
```
-> Note: This task assumes you are deploying the application on Kubernetes.
- All of the example commands are using the Kubernetes version of the rule yaml files
- (e.g., `samples/bookinfo/kube/route-rule-all-v1.yaml`). If you are running this
- task in a different environment, change `kube` to the directory that corresponds
- to your runtime (e.g., `samples/bookinfo/consul/route-rule-all-v1.yaml` for
- the Consul-based runtime).
+> This task assumes you are deploying the application on Kubernetes.
+All of the example commands are using the Kubernetes version of the rule yaml files
+(e.g., `samples/bookinfo/kube/route-rule-all-v1.yaml`). If you are running this
+task in a different environment, change `kube` to the directory that corresponds
+to your runtime (e.g., `samples/bookinfo/consul/route-rule-all-v1.yaml` for
+the Consul-based runtime).
## Request timeouts
@@ -39,7 +36,7 @@ By default, the timeout is 15 seconds, but in this task we'll override the `revi
timeout to 1 second.
To see its effect, however, we'll also introduce an artificial 2 second delay in calls
to the `ratings` service.
-
+
1. Route requests to v2 of the `reviews` service, i.e., a version that calls the `ratings` service
```bash
@@ -84,7 +81,7 @@ to the `ratings` service.
but there is a 2 second delay whenever you refresh the page.
1. Now add a 1 second request timeout for calls to the `reviews` service
-
+
```bash
cat < Note: This task assumes you are deploying the application on Kubernetes.
- All of the example commands are using the Kubernetes version of the rule yaml files
- (e.g., `samples/bookinfo/kube/route-rule-all-v1.yaml`). If you are running this
- task in a different environment, change `kube` to the directory that corresponds
- to your runtime (e.g., `samples/bookinfo/consul/route-rule-all-v1.yaml` for
- the Consul-based runtime).
+> This task assumes you are deploying the application on Kubernetes.
+All of the example commands are using the Kubernetes version of the rule yaml files
+(e.g., `samples/bookinfo/kube/route-rule-all-v1.yaml`). If you are running this
+task in a different environment, change `kube` to the directory that corresponds
+to your runtime (e.g., `samples/bookinfo/consul/route-rule-all-v1.yaml` for
+the Consul-based runtime).
## Weight-based version routing
1. Set the default version for all microservices to v1.
- ```bash
- istioctl create -f samples/bookinfo/kube/route-rule-all-v1.yaml
+ ```command
+ $ istioctl create -f samples/bookinfo/kube/route-rule-all-v1.yaml
```
1. Confirm v1 is the active version of the `reviews` service by opening http://$GATEWAY_URL/productpage in your browser.
@@ -42,27 +40,25 @@ two steps: 50%, 100%.
You should see the Bookinfo application productpage displayed.
Notice that the `productpage` is displayed with no rating stars since `reviews:v1` does not access the ratings service.
- > Note: If you previously ran the [request routing](./request-routing.html) task, you may need to either log out
- as test user "jason" or delete the test rules that were created exclusively for him:
+ > If you previously ran the [request routing](./request-routing.html) task, you may need to either log out
+ as test user "jason" or delete the test rules that were created exclusively for him:
- ```bash
- istioctl delete routerule reviews-test-v2
+ ```command
+ $ istioctl delete routerule reviews-test-v2
```
1. First, transfer 50% of the traffic from `reviews:v1` to `reviews:v3` with the following command:
- ```bash
- istioctl replace -f samples/bookinfo/kube/route-rule-reviews-50-v3.yaml
+ ```command
+ $ istioctl replace -f samples/bookinfo/kube/route-rule-reviews-50-v3.yaml
```
Notice that we are using `istioctl replace` instead of `create`.
Confirm the rule was replaced:
- ```bash
- istioctl get routerule reviews-default -o yaml
- ```
- ```yaml
+ ```command-output-as-yaml
+ $ istioctl get routerule reviews-default -o yaml
apiVersion: config.istio.io/v1alpha2
kind: RouteRule
metadata:
@@ -83,13 +79,13 @@ two steps: 50%, 100%.
1. Refresh the `productpage` in your browser and you should now see *red* colored star ratings approximately 50% of the time.
- > Note: With the current Envoy sidecar implementation, you may need to refresh the `productpage` very many times
+ > With the current Envoy sidecar implementation, you may need to refresh the `productpage` very many times
> to see the proper distribution. It may require 15 refreshes or more before you see any change. You can modify the rules to route 90% of the traffic to v3 to see red stars more often.
1. When version v3 of the `reviews` microservice is considered stable, we can route 100% of the traffic to `reviews:v3`:
- ```bash
- istioctl replace -f samples/bookinfo/kube/route-rule-reviews-v3.yaml
+ ```command
+ $ istioctl replace -f samples/bookinfo/kube/route-rule-reviews-v3.yaml
```
You can now log into the `productpage` as any user and you should always see book reviews
@@ -108,8 +104,8 @@ For more about version routing with autoscaling, check out [Canary Deployments u
* Remove the application routing rules.
- ```bash
- istioctl delete -f samples/bookinfo/kube/route-rule-all-v1.yaml
+ ```command
+ $ istioctl delete -f samples/bookinfo/kube/route-rule-all-v1.yaml
```
* If you are not planning to explore any follow-on tasks, refer to the
@@ -118,4 +114,4 @@ For more about version routing with autoscaling, check out [Canary Deployments u
## What's next
-* Learn more about [request routing]({{home}}/docs/concepts/traffic-management/rules-configuration.html).
+* Learn more about [request routing]({{home}}/docs/concepts/traffic-management/request-routing.html).
diff --git a/_faq/general/how-do-i-contribute.md b/_faq/general/how-do-i-contribute.md
index 046e6a1d3ca77..a8485b5bf97f8 100644
--- a/_faq/general/how-do-i-contribute.md
+++ b/_faq/general/how-do-i-contribute.md
@@ -1,7 +1,6 @@
---
title: How can I contribute?
-order: 70
-type: markdown
+weight: 70
---
{% include home.html %}
@@ -9,5 +8,5 @@ Contributions are highly welcome. We look forward to community feedback, additio
The code repositories are hosted on [GitHub](https://github.com/istio). Please see our[Contribution Guidelines](https://github.com/istio/community/blob/master/CONTRIBUTING.md) to learn how to contribute.
-In addition to the code, there are other ways to contribute to the Istio [community]({{home}}/community), including on
+In addition to the code, there are other ways to contribute to the Istio [community]({{home}}/community.html), including on
[Stack Overflow](https://stackoverflow.com/questions/tagged/istio), and the [mailing list](https://groups.google.com/forum/#!forum/istio-users).
diff --git a/_faq/general/how-do-i-get-started.md b/_faq/general/how-do-i-get-started.md
index 7172e10a85048..05889f3c9169b 100644
--- a/_faq/general/how-do-i-get-started.md
+++ b/_faq/general/how-do-i-get-started.md
@@ -1,7 +1,6 @@
---
title: How do I get started using Istio?
-order: 30
-type: markdown
+weight: 30
---
{% include home.html %}
diff --git a/_faq/general/how-was-istio-started.md b/_faq/general/how-was-istio-started.md
index c087c2f8ae393..1e1bd1efa993f 100644
--- a/_faq/general/how-was-istio-started.md
+++ b/_faq/general/how-was-istio-started.md
@@ -1,7 +1,6 @@
---
title: How was Istio started?
-order: 50
-type: markdown
+weight: 50
---
{% include home.html %}
diff --git a/_faq/general/istio-doesnt-work.md b/_faq/general/istio-doesnt-work.md
index b1ee18cc0766e..3abcb6bee0bfc 100644
--- a/_faq/general/istio-doesnt-work.md
+++ b/_faq/general/istio-doesnt-work.md
@@ -1,7 +1,6 @@
---
title: Istio doesn't work - what do I do?
-order: 90
-type: markdown
+weight: 90
---
{% include home.html %}
diff --git a/_faq/general/istio-partners-and-vendors.md b/_faq/general/istio-partners-and-vendors.md
index d9d775dd32779..8c50fd49bf5e4 100644
--- a/_faq/general/istio-partners-and-vendors.md
+++ b/_faq/general/istio-partners-and-vendors.md
@@ -1,12 +1,8 @@
---
title: How can I discover more about Partner and Vendor opportunities?
-order: 75
-type: markdown
+weight: 75
---
{% include home.html %}
-
If you'd like to speak to the Istio team about a potential integration
and/or a partnership opportunity, please complete this [form](https://goo.gl/forms/ax2SdpC6FpVh9Th02).
-
-
diff --git a/_faq/general/roadmap.md b/_faq/general/roadmap.md
index bf5f4fe99965d..c7f045460ca6a 100644
--- a/_faq/general/roadmap.md
+++ b/_faq/general/roadmap.md
@@ -1,7 +1,6 @@
---
title: What is Istio's roadmap?
-order: 140
-type: markdown
+weight: 140
---
{% include home.html %}
diff --git a/_faq/general/what-deployment-environment.md b/_faq/general/what-deployment-environment.md
index 1ed655e0ce9e6..1e42ae36361ba 100644
--- a/_faq/general/what-deployment-environment.md
+++ b/_faq/general/what-deployment-environment.md
@@ -1,11 +1,10 @@
---
title: What deployment environments are supported?
-order: 60
-type: markdown
+weight: 60
---
{% include home.html %}
-Istio is designed and built to be platform-independent. For our
-{{ site.data.istio.version }} release, Istio supports environments running
+Istio is designed and built to be platform-independent. For our
+{{site.data.istio.version}} release, Istio supports environments running
container orchestration platforms such as Kubernetes (v1.7.4 or greater)
and Nomad (with Consul).
diff --git a/_faq/general/what-does-istio-mean.md b/_faq/general/what-does-istio-mean.md
index 1585315cebf95..8f69f7c519546 100644
--- a/_faq/general/what-does-istio-mean.md
+++ b/_faq/general/what-does-istio-mean.md
@@ -1,7 +1,6 @@
---
title: What does the word 'Istio' mean?
-order: 160
-type: markdown
+weight: 160
---
{% include home.html %}
diff --git a/_faq/general/what-is-istio.md b/_faq/general/what-is-istio.md
index 1c5f8afcc68d3..7808a159faedd 100644
--- a/_faq/general/what-is-istio.md
+++ b/_faq/general/what-is-istio.md
@@ -1,7 +1,6 @@
---
title: What is Istio?
-order: 10
-type: markdown
+weight: 10
---
{% include home.html %}
@@ -9,7 +8,7 @@ Istio is an open platform-independent service mesh that provides traffic managem
*Open*: Istio is being developed and maintained as open-source software. We encourage contributions and feedback from the community at-large.
-*Platform-independent*: Istio is not targeted at any specific deployment environment. During the initial stages of development, Istio will support
+*Platform-independent*: Istio is not targeted at any specific deployment environment. During the initial stages of development, Istio will support
Kubernetes-based deployments. However, Istio is being built to enable rapid and easy adaptation to other environments.
*Service mesh*: Istio is designed to manage communications between microservices and applications. Without requiring changes to the underlying services, Istio provides automated baseline traffic resilience, service metrics collection, distributed tracing, traffic encryption, protocol upgrades, and advanced routing functionality for all service-to-service communication.
diff --git a/_faq/general/what-is-the-license.md b/_faq/general/what-is-the-license.md
index 6edf7f9a40c0a..b5024f35276cb 100644
--- a/_faq/general/what-is-the-license.md
+++ b/_faq/general/what-is-the-license.md
@@ -1,7 +1,6 @@
---
title: What is the license?
-order: 40
-type: markdown
+weight: 40
---
{% include home.html %}
diff --git a/_faq/general/where-is-the-documentation.md b/_faq/general/where-is-the-documentation.md
index 39dc0e175cc72..3d3d3ccdfe1b6 100644
--- a/_faq/general/where-is-the-documentation.md
+++ b/_faq/general/where-is-the-documentation.md
@@ -1,21 +1,13 @@
---
title: Where is the documentation?
-order: 80
-type: markdown
+weight: 80
---
{% include home.html %}
Check out the [documentation]({{home}}/docs/) right here on istio.io. The docs include
[concept overviews]({{home}}/docs/concepts/),
-[task guides]({{home}}/docs/tasks/),
+[task guides]({{home}}/docs/tasks/),
[guides]({{home}}/docs/guides/),
and the [complete reference documentation]({{home}}/docs/reference/).
-Detailed developer-level documentation is maintained for each component in GitHub, alongside the code. Please visit each repository for those docs:
-
-* [Envoy](https://envoyproxy.github.io/envoy/)
-
-* [Pilot](https://github.com/istio/istio/tree/master/pilot/doc)
-
-* [Mixer](https://github.com/istio/istio/tree/master/mixer/doc)
-
+Detailed developer-level documentation is maintained on our [Wiki](https://github.com/istio/istio/wiki)
diff --git a/_faq/general/why-use-istio.md b/_faq/general/why-use-istio.md
index 851fa23c69de6..6f21b6257f64a 100644
--- a/_faq/general/why-use-istio.md
+++ b/_faq/general/why-use-istio.md
@@ -1,7 +1,6 @@
---
title: Why would I want to use Istio?
-order: 20
-type: markdown
+weight: 20
---
{% include home.html %}
diff --git a/_faq/mixer/attribute-expressions.md b/_faq/mixer/attribute-expressions.md
index 6efda9280b690..e7a73c20b4a4e 100644
--- a/_faq/mixer/attribute-expressions.md
+++ b/_faq/mixer/attribute-expressions.md
@@ -1,7 +1,6 @@
---
title: What is the full set of attribute expressions Mixer supports?
-order: 20
-type: markdown
+weight: 20
---
{% include home.html %}
diff --git a/_faq/mixer/mixer-self-monitoring.md b/_faq/mixer/mixer-self-monitoring.md
index e5539eef51f24..d771d3a0db6a2 100644
--- a/_faq/mixer/mixer-self-monitoring.md
+++ b/_faq/mixer/mixer-self-monitoring.md
@@ -1,7 +1,6 @@
---
title: Does Mixer provide any self-monitoring?
-order: 30
-type: markdown
+weight: 30
---
{% include home.html %}
@@ -17,8 +16,8 @@ function:
Mixer logs can be accessed via a `kubectl logs` command, as follows:
-```bash
-kubectl -n istio-system logs $(kubectl -n istio-system get pods -listio=mixer -o jsonpath='{.items[0].metadata.name}') mixer
+```command
+$ kubectl -n istio-system logs $(kubectl -n istio-system get pods -listio=mixer -o jsonpath='{.items[0].metadata.name}') mixer
```
Mixer trace generation is controlled by the command-line flag `traceOutput`. If
the flag value is set to `STDOUT` or `STDERR` trace data will be written
diff --git a/_faq/mixer/seeing-mixer-config.md b/_faq/mixer/seeing-mixer-config.md
index a8bcfeea6d319..6a197d75ec090 100644
--- a/_faq/mixer/seeing-mixer-config.md
+++ b/_faq/mixer/seeing-mixer-config.md
@@ -1,26 +1,20 @@
---
-title: How do I see all of the configuration for Mixer?
-order: 10
-type: markdown
+title: How do I see all Mixer's configuration?
+weight: 10
---
{% include home.html %}
Configuration for *instances*, *handlers*, and *rules* is stored as Kubernetes
[Custom Resources](https://kubernetes.io/docs/concepts/api-extension/custom-resources/).
-Configuration may be accessed by using `kubectl` to query the Kubernetes [API
-server](https://kubernetes.io/docs/admin/kube-apiserver/) for the resources.
+Configuration may be accessed by using `kubectl` to query the Kubernetes
+API server for the resources.
-#### Rules
+## Rules
To see the list of all rules, execute the following:
-```bash
-kubectl get rules --all-namespaces
-```
-
-Output will be similar to:
-
-```
+```command
+$ kubectl get rules --all-namespaces
NAMESPACE NAME KIND
default mongoprom rule.v1alpha2.config.istio.io
istio-system promhttp rule.v1alpha2.config.istio.io
@@ -30,11 +24,11 @@ istio-system stdio rule.v1alpha2.config.istio.io
To see an individual rule configuration, execute the following:
-```bash
-kubectl -n get rules -o yaml
+```command
+$ kubectl -n get rules -o yaml
```
-#### Handlers
+## Handlers
Handlers are defined based on Kubernetes [Custom Resource
Definitions](https://kubernetes.io/docs/concepts/api-extension/custom-resources/#customresourcedefinitions)
@@ -42,13 +36,8 @@ for adapters.
First, identify the list of adapter kinds:
-```bash
-kubectl get crd -listio=mixer-adapter
-```
-
-The output will be similar to:
-
-```
+```command
+$ kubectl get crd -listio=mixer-adapter
NAME KIND
deniers.config.istio.io CustomResourceDefinition.v1beta1.apiextensions.k8s.io
listcheckers.config.istio.io CustomResourceDefinition.v1beta1.apiextensions.k8s.io
@@ -63,24 +52,24 @@ svcctrls.config.istio.io CustomResourceDefinition.v1beta1.apiextensions.k8
Then, for each adapter kind in that list, issue the following command:
-```bash
-kubectl get --all-namespaces
+```command
+$ kubectl get --all-namespaces
```
Output for `stdios` will be similar to:
-```
+```plain
NAMESPACE NAME KIND
istio-system handler stdio.v1alpha2.config.istio.io
```
To see an individual handler configuration, execute the following:
-```bash
-kubectl -n get -o yaml
+```command
+$ kubectl -n get -o yaml
```
-#### Instances
+## Instances
Instances are defined according to Kubernetes [Custom Resource
Definitions](https://kubernetes.io/docs/concepts/api-extension/custom-resources/#customresourcedefinitions)
@@ -88,13 +77,8 @@ for instances.
First, identify the list of instance kinds:
-```bash
-kubectl get crd -listio=mixer-instance
-```
-
-The output will be similar to:
-
-```
+```command
+$ kubectl get crd -listio=mixer-instance
NAME KIND
checknothings.config.istio.io CustomResourceDefinition.v1beta1.apiextensions.k8s.io
listentries.config.istio.io CustomResourceDefinition.v1beta1.apiextensions.k8s.io
@@ -106,13 +90,13 @@ reportnothings.config.istio.io CustomResourceDefinition.v1beta1.apiextensions.
Then, for each instance kind in that list, issue the following command:
-```bash
-kubectl get --all-namespaces
+```command
+$ kubectl get --all-namespaces
```
Output for `metrics` will be similar to:
-```
+```plain
NAMESPACE NAME KIND
default mongoreceivedbytes metric.v1alpha2.config.istio.io
default mongosentbytes metric.v1alpha2.config.istio.io
@@ -126,6 +110,6 @@ istio-system tcpbytesent metric.v1alpha2.config.istio.io
To see an individual instance configuration, execute the following:
-```bash
-kubectl -n get -o yaml
+```command
+$ kubectl -n get -o yaml
```
diff --git a/_faq/mixer/why-mixer.md b/_faq/mixer/why-mixer.md
index 0b61ac8788417..c056304b80d1a 100644
--- a/_faq/mixer/why-mixer.md
+++ b/_faq/mixer/why-mixer.md
@@ -1,7 +1,6 @@
---
title: Why does Istio need Mixer?
-order: 0
-type: markdown
+weight: 1
---
{% include home.html %}
@@ -18,7 +17,7 @@ requirements. Keeping the components separate enables independent component-appr
- *Resource Usage*.
Istio depends on being able to deploy many instances of its proxy, making it important to minimize the
-cost of each individual instance. Moving Mixer's complex logic into a distinct component makes it
+cost of each individual instance. Moving Mixer's complex logic into a distinct component makes it
possible for Envoy to remain svelte and agile.
- *Reliability*.
@@ -28,7 +27,7 @@ it creates distinct failure domains which enables Envoy to continue operating ev
fails, preventing outages.
- *Isolation*.
-Mixer provides a level of insulation between Istio and the infrastructure backends. Each Envoy instance can be configured to have a
+Mixer provides a level of insulation between Istio and the infrastructure backends. Each Envoy instance can be configured to have a
very narrow scope of interaction, limiting the impact of potential attacks.
- *Extensibility*.
diff --git a/_faq/mixer/writing-custom-adapters.md b/_faq/mixer/writing-custom-adapters.md
index 70ceb25826818..a0a4482cb89aa 100644
--- a/_faq/mixer/writing-custom-adapters.md
+++ b/_faq/mixer/writing-custom-adapters.md
@@ -1,9 +1,8 @@
---
title: How can I write a custom adapter for Mixer?
-order: 40
-type: markdown
+weight: 40
---
{% include home.html %}
Learn how to implement a new adapter for Mixer by consulting the
-[Adapter Developer's Guide](https://github.com/istio/istio/blob/master/mixer/doc/adapters.md).
+[Adapter Developer's Guide](https://github.com/istio/istio/wiki/Mixer-Adapter-Dev-Guide).
diff --git a/_faq/security/accessing-control-services.md b/_faq/security/accessing-control-services.md
index 9574ea2b31a65..0144fe1553eff 100644
--- a/_faq/security/accessing-control-services.md
+++ b/_faq/security/accessing-control-services.md
@@ -1,7 +1,6 @@
---
title: How to disable Auth on clients to access the Kubernetes API Server (or any control services that don't have Istio sidecar)?
-order: 60
-type: markdown
+weight: 60
---
Starting with release 0.3, edit the `mtlsExcludedServices` list in Istio config
@@ -11,11 +10,11 @@ already contains `kubernetes.default.svc.cluster.local`, which is the default
service name of the Kubernetes API server.
For a quick reference, here are commands to edit Istio configmap and to restart pilot.
-```bash
-kubectl edit configmap -n istio-system istio
-kubectl delete pods -n istio-system -l istio=pilot
+```command
+$ kubectl edit configmap -n istio-system istio
+$ kubectl delete pods -n istio-system -l istio=pilot
```
-> Note: DO NOT use this approach to disable mTLS for services that are managed
+> Do not use this approach to disable mTLS for services that are managed
by Istio (i.e. using Istio sidecar). Instead, use service-level annotations
to overwrite the authentication policy (see above).
diff --git a/_faq/security/auth-mix-and-match.md b/_faq/security/auth-mix-and-match.md
index 44b787aa9fcd0..a565619959d6b 100644
--- a/_faq/security/auth-mix-and-match.md
+++ b/_faq/security/auth-mix-and-match.md
@@ -1,9 +1,8 @@
---
title: Can I enable Istio Auth with some services while disable others in the same cluster?
-order: 30
-type: markdown
+weight: 30
---
-Starting with release 0.3, you can use service-level annotations to disable (or enable) Istio Auth for particular service-port.
+Starting with release 0.3, you can use service-level annotations to disable (or enable) Istio Auth for particular service-port.
The annotation key should be `auth.istio.io/{port_number}`, and the value should be `NONE` (to disable), or `MUTUAL_TLS` (to enable).
Example: disable Istio Auth on port 9080 for service `details`.
diff --git a/_faq/security/cert-lifetime-config.md b/_faq/security/cert-lifetime-config.md
new file mode 100644
index 0000000000000..98152ef4bbd0b
--- /dev/null
+++ b/_faq/security/cert-lifetime-config.md
@@ -0,0 +1,61 @@
+---
+title: How to configure the lifetime for Istio certificates?
+weight: 70
+---
+{% include home.html %}
+
+For the workloads running in Kubernetes, the lifetime of their Istio certificates is controlled by the
+`workload-cert-ttl` flag on Citadel. The default value is 19 hours. This value should be no greater than
+`max-workload-cert-ttl` of Citadel.
+
+Citadel uses a flag `max-workload-cert-ttl` to control the maximum lifetime for Istio certificates issued to
+workloads. The default value is 7 days. If `workload-cert-ttl` on Citadel or node agent is greater than
+`max-workload-cert-ttl`, Citadel will fail issuing the certificate.
+
+Modify the `istio-auth.yaml` file to customize the Citadel configuration.
+The following modification specifies that the Istio certificates for workloads running in Kubernetes
+has 1 hours lifetime. Besides that, the maximum allowed Istio certificate lifetime is 48 hours.
+
+```plain
+...
+kind: Deployment
+...
+metadata:
+ name: istio-citadel
+ namespace: istio-system
+spec:
+ ...
+ template:
+ ...
+ spec:
+ ...
+ containers:
+ - name: citadel
+ ...
+ args:
+ - --workload-cert-ttl=1h # Lifetime of certificates issued to workloads in Kubernetes.
+ - --max-workload-cert-ttl=48h # Maximum lifetime of certificates issued to workloads by Citadel.
+```
+
+For the workloads running on VMs and bare metal hosts, the lifetime of their Istio certificates is specified by the
+`workload-cert-ttl` flag on each node agent. The default value is also 19 hours. This value should be no greater than
+`max-workload-cert-ttl` of Citadel.
+
+To customize this configuration, the argument for the node agent service should be modified.
+After [setting up the machines]({{home}}/docs/setup/kubernetes/mesh-expansion.html#setting-up-the-machines) for Istio
+mesh expansion, modify the file `/lib/systemd/system/istio-auth-node-agent.service` on the VMs or bare metal hosts:
+
+```plain
+...
+[Service]
+ExecStart=/usr/local/bin/node_agent --workload-cert-ttl=24h # Specify certificate lifetime for workloads on this machine.
+Restart=always
+StartLimitInterval=0
+RestartSec=10
+...
+```
+
+The above configuration specifies that the Istio certificates for workloads running on this VM or bare metal host
+will have 24 hours lifetime.
+
+After configuring the service, restart the node agent by running `systemctl daemon-reload`.
diff --git a/_faq/security/does-istio-support-authorization.md b/_faq/security/does-istio-support-authorization.md
index 0be72b64ab1de..67a128ea9868b 100644
--- a/_faq/security/does-istio-support-authorization.md
+++ b/_faq/security/does-istio-support-authorization.md
@@ -1,7 +1,6 @@
---
title: Does Istio Auth support authorization?
-order: 110
-type: markdown
+weight: 110
---
{% include home.html %}
diff --git a/_faq/security/enabling-disabling-mtls.md b/_faq/security/enabling-disabling-mtls.md
index 927bc1babf384..0c5718b5306cc 100644
--- a/_faq/security/enabling-disabling-mtls.md
+++ b/_faq/security/enabling-disabling-mtls.md
@@ -1,7 +1,6 @@
---
title: How can I enable/disable mTLS encryption after I installed Istio?
-order: 10
-type: markdown
+weight: 10
---
{% include home.html %}
@@ -10,14 +9,14 @@ uninstalling and re-installing Istio.
If you are an advanced user and understand the risks you can also do the following:
-```bash
-kubectl edit configmap -n istio-system istio
+```command
+$ kubectl edit configmap -n istio-system istio
```
-comment out or uncomment out `authPolicy: MUTUAL_TLS` to toggle mTLS and then
+comment out or uncomment `authPolicy: MUTUAL_TLS` to toggle mTLS and then
-```bash
-kubectl delete pods -n istio-system -l istio=pilot
+```command
+$ kubectl delete pods -n istio-system -l istio=pilot
```
to restart Pilot, after a few seconds (depending on your `*RefreshDelay`) your
diff --git a/_faq/security/https-overlay.md b/_faq/security/https-overlay.md
new file mode 100644
index 0000000000000..cb3234e23273b
--- /dev/null
+++ b/_faq/security/https-overlay.md
@@ -0,0 +1,8 @@
+---
+title: Can I install Istio sidecar for HTTPS services?
+weight: 170
+---
+{% include home.html %}
+
+Yes, you can. It works both with mutual TLS enabled and disabled. Refer to
+[how Istio mTLS works with HTTPS services]({{home}}/docs/tasks/security/https-overlay.html) for more information.
diff --git a/_faq/security/istio-to-not-istio.md b/_faq/security/istio-to-not-istio.md
index f99183bf552a9..d1649695c68ca 100644
--- a/_faq/security/istio-to-not-istio.md
+++ b/_faq/security/istio-to-not-istio.md
@@ -1,6 +1,5 @@
---
-title: Can a service with Istio Auth enabled communicate with a service without Istio?
-order: 20
-type: markdown
+title: Can a service with Istio Auth enabled communicate with a service without Istio?
+weight: 20
---
This is not supported currently, but will be in the near future.
diff --git a/_faq/security/k8s-api-server.md b/_faq/security/k8s-api-server.md
index 3e58db79e3f3d..e21daa60cb5da 100644
--- a/_faq/security/k8s-api-server.md
+++ b/_faq/security/k8s-api-server.md
@@ -1,7 +1,6 @@
---
title: Can I access the Kubernetes API Server with Auth enabled?
-order: 50
-type: markdown
+weight: 50
---
The Kubernetes API server does not support mutual TLS authentication, so
strictly speaking: no. However, if you use version 0.3 or later, see next
diff --git a/_faq/security/k8s-health-checks.md b/_faq/security/k8s-health-checks.md
index c81d5a207d578..1999a40bd868f 100644
--- a/_faq/security/k8s-health-checks.md
+++ b/_faq/security/k8s-health-checks.md
@@ -1,7 +1,6 @@
---
title: How can I use Kubernetes liveness and readiness for service health check with Istio Auth enabled?
-order: 40
-type: markdown
+weight: 40
---
If Istio Auth is enabled, http and tcp health check from kubelet will not
work since they do not have Istio Auth issued certs. A workaround is to
diff --git a/_faq/security/secret-encryption.md b/_faq/security/secret-encryption.md
new file mode 100644
index 0000000000000..edcd7d63d20a2
--- /dev/null
+++ b/_faq/security/secret-encryption.md
@@ -0,0 +1,9 @@
+---
+title: Is the secret encrypted for workload key and cert?
+weight: 125
+---
+{% include home.html %}
+
+By default, they are base64 encoded but not encrypted. However, the [secret encryption feature](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/) is supported in Kubernetes and you can do it by following the instruction.
+
+Notice that this feature is not enabled yet in Google Container Engine (GKE). While the data may not be encrypted inside the etcd running on the master node, the contents of the master node itself are encrypted, see [here](https://cloud.google.com/security/encryption-at-rest/default-encryption/#encryption_of_data_at_rest) for more info.
diff --git a/_faq/security/secure-ingress.md b/_faq/security/secure-ingress.md
index d3953f0654183..cdbd9c4a0b910 100644
--- a/_faq/security/secure-ingress.md
+++ b/_faq/security/secure-ingress.md
@@ -1,7 +1,6 @@
---
title: How to configure Istio Ingress to only accept TLS traffic?
-order: 130
-type: markdown
+weight: 130
---
{% include home.html %}
diff --git a/_faq/security/use-k8s-secrets.md b/_faq/security/use-k8s-secrets.md
index a8dfc145b1443..7bc09b6726b48 100644
--- a/_faq/security/use-k8s-secrets.md
+++ b/_faq/security/use-k8s-secrets.md
@@ -1,12 +1,12 @@
---
title: Does Istio Auth use Kubernetes secrets?
-order: 120
-type: markdown
+weight: 120
---
{% include home.html %}
Yes. The key and certificate distribution in Istio Auth is based on [Kubernetes secrets](https://kubernetes.io/docs/concepts/configuration/secret/).
-Secrets have known [security risks](https://kubernetes.io/docs/concepts/configuration/secret/#risks). The kubernetes team is working on [several features](https://docs.google.com/document/d/1T2y-9geg9EfHHtCDYTXptCa-F4kQ0RyiH-c_M1SyD0s) to improve
+Secrets have known [security risks](https://kubernetes.io/docs/concepts/configuration/secret/#risks). The Kubernetes team is working on
+[several features](https://docs.google.com/document/d/1T2y-9geg9EfHHtCDYTXptCa-F4kQ0RyiH-c_M1SyD0s) to improve
Kubernetes secret security, from secret encryption to node-level access control. And as of version 1.6, Kubernetes introduces
[RBAC authorization](https://kubernetes.io/docs/admin/authorization/rbac/), which can provide fine-grained secrets management.
diff --git a/_faq/setup/consul-app-not-working.md b/_faq/setup/consul-app-not-working.md
index 1a3e1be8adeb9..6cc0345243516 100644
--- a/_faq/setup/consul-app-not-working.md
+++ b/_faq/setup/consul-app-not-working.md
@@ -1,8 +1,7 @@
---
title: Consul - My application isn't working, where can I troubleshoot this?
-order: 40
-type: markdown
+weight: 40
---
{% include home.html %}
-Please ensure all required containers are running: etcd, istio-apiserver, consul, registrator, pilot. If one of them is not running, you may find the {containerID} using `docker ps -a` and then use `docker logs {containerID}` to read the logs.
+Please ensure all required containers are running: etcd, istio-apiserver, consul, registrator, pilot. If one of them is not running, you may find the {containerID} using `docker ps -a` and then use `docker logs {containerID}` to read the logs.
diff --git a/_faq/setup/consul-unset-context.md b/_faq/setup/consul-unset-context.md
index 718601a5a3fb9..f651daf9a5f22 100644
--- a/_faq/setup/consul-unset-context.md
+++ b/_faq/setup/consul-unset-context.md
@@ -1,7 +1,6 @@
---
title: Consul - How do I unset the context changed by istioctl at the end?
-order: 50
-type: markdown
+weight: 50
---
{% include home.html %}
diff --git a/_faq/setup/eureka-app-not-working.md b/_faq/setup/eureka-app-not-working.md
index 603f4d21aa6c6..445592587e2e8 100644
--- a/_faq/setup/eureka-app-not-working.md
+++ b/_faq/setup/eureka-app-not-working.md
@@ -1,8 +1,7 @@
---
title: Eureka - My application isn't working, where can I troubleshoot this?
-order: 60
-type: markdown
+weight: 60
---
{% include home.html %}
-Please ensure all required containers are running: etcd, istio-apiserver, consul, registrator, istio-pilot. If one of them is not running, you may find the {containerID} using `docker ps -a` and then use `docker logs {containerID}` to read the logs.
+Please ensure all required containers are running: etcd, istio-apiserver, consul, registrator, istio-pilot. If one of them is not running, you may find the {containerID} using `docker ps -a` and then use `docker logs {containerID}` to read the logs.
diff --git a/_faq/setup/eureka-unset-context.md b/_faq/setup/eureka-unset-context.md
index 25a1b40158ea2..9cdd3b1d4cb36 100644
--- a/_faq/setup/eureka-unset-context.md
+++ b/_faq/setup/eureka-unset-context.md
@@ -1,7 +1,6 @@
---
title: Eureka - How do I unset the context changed by `istioctl` at the end?
-order: 70
-type: markdown
+weight: 70
---
{% include home.html %}
diff --git a/_faq/setup/k8s-checking-cluster-alpha-features.md b/_faq/setup/k8s-checking-cluster-alpha-features.md
index 0c3a718585c62..68adbafacea81 100644
--- a/_faq/setup/k8s-checking-cluster-alpha-features.md
+++ b/_faq/setup/k8s-checking-cluster-alpha-features.md
@@ -1,7 +1,6 @@
---
title: Kubernetes - How do I check if my cluster has enabled the alpha features required for automatic sidecar injection?
-order: 10
-type: markdown
+weight: 10
---
{% include home.html %}
@@ -10,8 +9,8 @@ Automatic sidecar injection requires the
Run the following command to check if the initializer has been enabled
(empty output indicates that initializers are not enabled):
-```bash
-kubectl api-versions | grep admissionregistration
+```command
+$ kubectl api-versions | grep admissionregistration
```
In addition, the Kubernetes API server must be started with the Initializer plugin [enabled](https://kubernetes.io/docs/admin/extensible-admission-controllers/#enable-initializers-alpha-feature). Failure to enable the `Initializer` plugin will result in the following error when trying to create the initializer deployment.
diff --git a/_faq/setup/k8s-migrating.md b/_faq/setup/k8s-migrating.md
index 522da1f72beeb..b22425b3f9389 100644
--- a/_faq/setup/k8s-migrating.md
+++ b/_faq/setup/k8s-migrating.md
@@ -1,8 +1,7 @@
---
-title: Kubernetes - Can I migrate an existing installation from Istio v0.1.x to v0.2.x?
-order: 30
-type: markdown
+title: Kubernetes - Can I migrate an existing installation from Istio 0.1.x to 0.2.x?
+weight: 30
---
{% include home.html %}
-Upgrading from Istio 0.1.x to 0.2.x is not supported. You must uninstall Istio v0.1, _including pods with Istio sidecars_ and start with a fresh install of Istio v0.2.
+Upgrading from Istio 0.1.x to 0.2.x is not supported. You must uninstall Istio 0.1, _including pods with Istio sidecars_ and start with a fresh install of Istio 0.2.
diff --git a/_faq/setup/k8s-sidecar-injection-not-working.md b/_faq/setup/k8s-sidecar-injection-not-working.md
index e040ad50a29ab..73a23a4581098 100644
--- a/_faq/setup/k8s-sidecar-injection-not-working.md
+++ b/_faq/setup/k8s-sidecar-injection-not-working.md
@@ -1,7 +1,6 @@
---
title: Kubernetes - How can I debug problems with automatic sidecar injection?
-order: 20
-type: markdown
+weight: 20
---
{% include home.html %}
diff --git a/_faq/traffic-management/ingress-with-no-route-rules.md b/_faq/traffic-management/ingress-with-no-route-rules.md
index 9814d62225b40..f44190b3f55e1 100644
--- a/_faq/traffic-management/ingress-with-no-route-rules.md
+++ b/_faq/traffic-management/ingress-with-no-route-rules.md
@@ -1,7 +1,6 @@
---
title: Can I use standard Ingress specification without any route rules?
-order: 40
-type: markdown
+weight: 40
---
{% include home.html %}
diff --git a/_faq/traffic-management/unreachable-services.md b/_faq/traffic-management/unreachable-services.md
index 84e38321d5f36..a947440574181 100644
--- a/_faq/traffic-management/unreachable-services.md
+++ b/_faq/traffic-management/unreachable-services.md
@@ -1,9 +1,8 @@
---
title: How come some of my services are unreachable after creating route rules?
-order: 30
-type: markdown
+weight: 30
---
{% include home.html %}
-This is an known issue with the current Envoy sidecar implementation. After two seconds of creating the
+This is an known issue with the current Envoy sidecar implementation. After two seconds of creating the
rule, services should become available.
diff --git a/_faq/traffic-management/viewing-current-rules.md b/_faq/traffic-management/viewing-current-rules.md
index c19819e12ae88..add215237bad2 100644
--- a/_faq/traffic-management/viewing-current-rules.md
+++ b/_faq/traffic-management/viewing-current-rules.md
@@ -1,7 +1,6 @@
---
title: How can I view the current route rules I have configured with Istio?
-order: 10
-type: markdown
+weight: 10
---
{% include home.html %}
diff --git a/_faq/traffic-management/weighted-rules-not-working.md b/_faq/traffic-management/weighted-rules-not-working.md
index efd7080f576a7..ebe19fd12c628 100644
--- a/_faq/traffic-management/weighted-rules-not-working.md
+++ b/_faq/traffic-management/weighted-rules-not-working.md
@@ -1,7 +1,6 @@
---
title: Why is creating a weighted route rule to split traffic between two versions of a service not working as expected?
-order: 20
-type: markdown
+weight: 20
---
{% include home.html %}
diff --git a/_glossary/adapters.md b/_glossary/adapters.md
index 307f6f88cfb99..eb3b72be1b060 100644
--- a/_glossary/adapters.md
+++ b/_glossary/adapters.md
@@ -1,6 +1,5 @@
---
title: Adapters
-type: markdown
---
{% include home.html %}
@@ -10,4 +9,4 @@ monitoring, quotas, ACL checking, and more.
The exact set of adapters used at runtime is determined through configuration and can easily be
extended to target new or custom infrastructure backends.
-[Learn more about adapters]({{home}}/docs/concepts/policy-and-control/mixer.html#adapters).
\ No newline at end of file
+[Learn more about adapters]({{home}}/docs/concepts/policy-and-control/mixer.html#adapters).
diff --git a/_glossary/attribute.md b/_glossary/attribute.md
index 911e1b6af7df0..cfebd53d05a06 100644
--- a/_glossary/attribute.md
+++ b/_glossary/attribute.md
@@ -1,6 +1,5 @@
---
title: Attribute
-type: markdown
---
{% include home.html %}
diff --git a/_glossary/destination.md b/_glossary/destination.md
index a9174d8c657ff..56142dc9a1428 100644
--- a/_glossary/destination.md
+++ b/_glossary/destination.md
@@ -1,6 +1,5 @@
---
title: Destination
-type: markdown
---
The remote upstream service [Envoy](#envoy) is talking to on behalf of a [source](#source) [workload](#workload).
There can be one or more [service versions](#service-version) for a given [service](#service) and Envoy chooses the version based on
diff --git a/_glossary/envoy.md b/_glossary/envoy.md
index 8a2ea10f9da2c..50ddd26d1055c 100644
--- a/_glossary/envoy.md
+++ b/_glossary/envoy.md
@@ -1,6 +1,5 @@
---
title: Envoy
-type: markdown
---
The high-performance proxy that Istio uses to mediate inbound and outbound traffic for all [services](#service) in the
[service mesh](#service-mesh). [Learn more about Envoy](https://envoyproxy.github.io/envoy/).
diff --git a/_glossary/mixer-handler.md b/_glossary/mixer-handler.md
index 6863c25a56bb8..4d58d0b458a5b 100644
--- a/_glossary/mixer-handler.md
+++ b/_glossary/mixer-handler.md
@@ -1,6 +1,5 @@
---
title: Mixer Handler
-type: markdown
---
{% include home.html %}
diff --git a/_glossary/mixer-instance.md b/_glossary/mixer-instance.md
index 96665a8148208..5ee21759012ce 100644
--- a/_glossary/mixer-instance.md
+++ b/_glossary/mixer-instance.md
@@ -1,6 +1,5 @@
---
title: Mixer Instance
-type: markdown
---
{% include home.html %}
diff --git a/_glossary/mixer.md b/_glossary/mixer.md
index 44eba9f4744ab..793d311daa890 100644
--- a/_glossary/mixer.md
+++ b/_glossary/mixer.md
@@ -1,6 +1,5 @@
---
title: Mixer
-type: markdown
---
{% include home.html %}
diff --git a/_glossary/mutual-tls.md b/_glossary/mutual-tls.md
index 3ce556cac2774..819252016ec07 100644
--- a/_glossary/mutual-tls.md
+++ b/_glossary/mutual-tls.md
@@ -1,6 +1,5 @@
---
title: Mutual TLS Authentication
-type: markdown
---
{% include home.html %}
diff --git a/_glossary/pilot.md b/_glossary/pilot.md
index 593e35c0229af..061d4592c8d46 100644
--- a/_glossary/pilot.md
+++ b/_glossary/pilot.md
@@ -1,5 +1,4 @@
---
title: Pilot
-type: markdown
---
The Istio component that programs the [Envoy](#envoy) proxies, responsible for service discovery, load balancing, and routing.
diff --git a/_glossary/secure-naming.md b/_glossary/secure-naming.md
index d3031d272f750..bb87837dfb86d 100644
--- a/_glossary/secure-naming.md
+++ b/_glossary/secure-naming.md
@@ -1,6 +1,5 @@
---
title: Secure Naming
-type: markdown
---
Provides a mapping between a [service name](#service-name) and the [workload principals](#workload-principal) that are authorized to
run the [workloads](#workload) implementing a [service](#service).
diff --git a/_glossary/service-consumer.md b/_glossary/service-consumer.md
index 235eec4f124ba..d5a383b182876 100644
--- a/_glossary/service-consumer.md
+++ b/_glossary/service-consumer.md
@@ -1,5 +1,4 @@
---
title: Service Consumer
-type: markdown
---
The agent that is using a [service](#service).
diff --git a/_glossary/service-endpoint.md b/_glossary/service-endpoint.md
index 044f00261d08a..76a2dee5075e2 100644
--- a/_glossary/service-endpoint.md
+++ b/_glossary/service-endpoint.md
@@ -1,6 +1,5 @@
---
title: Service Endpoint
-type: markdown
---
The network-reachable manifestation of a [service](#service).
Service endpoints are exposed by [workloads](#workload). Not all services have service endpoints.
diff --git a/_glossary/service-mesh.md b/_glossary/service-mesh.md
index ff410e744c05c..b97452fa8cf2e 100644
--- a/_glossary/service-mesh.md
+++ b/_glossary/service-mesh.md
@@ -1,6 +1,5 @@
---
title: Service Mesh
-type: markdown
---
A shared set of names and identities that allows for common policy enforcement and telemetry collection.
[Service names](#service-name) and [workload principals](#workload-principal) are unique within a service mesh.
diff --git a/_glossary/service-name.md b/_glossary/service-name.md
index d3e70de519c7e..f30fc6c8b5258 100644
--- a/_glossary/service-name.md
+++ b/_glossary/service-name.md
@@ -1,6 +1,5 @@
---
title: Service Name
-type: markdown
---
A unique name for a [service](#service), identifying it within the [service mesh](#service-mesh).
A service may not be renamed and maintain its identity, each service name is unique.
diff --git a/_glossary/service-operator.md b/_glossary/service-operator.md
index 9f4e6d87a1cb1..d1e25b1892865 100644
--- a/_glossary/service-operator.md
+++ b/_glossary/service-operator.md
@@ -1,6 +1,5 @@
---
title: Service Operator
-type: markdown
---
The agent that manages a [service](#service) within a [service mesh](#service-mesh) by manipulating configuration state
and monitoring the service's health via a variety of dashboards.
diff --git a/_glossary/service-producer.md b/_glossary/service-producer.md
index 10cd4db92d84c..e59d092cf489c 100644
--- a/_glossary/service-producer.md
+++ b/_glossary/service-producer.md
@@ -1,5 +1,4 @@
---
title: Service Producer
-type: markdown
---
The agent that creates a [service](#service).
diff --git a/_glossary/service-version.md b/_glossary/service-version.md
index c70eda54582e8..862488cad9278 100644
--- a/_glossary/service-version.md
+++ b/_glossary/service-version.md
@@ -1,6 +1,5 @@
---
title: Service Version
-type: markdown
---
Distinct variants of a [service](#service), typically backed by a different versions of a [workload](#workload) binary.
Common scenarios where multiple [service versions](#service-version) may be used include A/B testing, canary rollouts, etc.
diff --git a/_glossary/service.md b/_glossary/service.md
index 650690421222a..c3cb7cc3e6016 100644
--- a/_glossary/service.md
+++ b/_glossary/service.md
@@ -1,6 +1,5 @@
---
title: Service
-type: markdown
---
A delineated group of related behaviors within a [service mesh](#service-mesh). Services are identified using a
[service name](#service-name),
diff --git a/_glossary/source.md b/_glossary/source.md
index 83b890faea8c8..9466770f093c2 100644
--- a/_glossary/source.md
+++ b/_glossary/source.md
@@ -1,8 +1,7 @@
---
title: Source
-type: markdown
---
The downstream client of the [Envoy](#envoy) proxy.
Within the [service mesh](#service-mesh) a source is typically a
-[workload](#workload), but the source for ingress traffic may include other clients such as a
+[workload](#workload), but the source for ingress traffic may include other clients such as a
browser or mobile app.
diff --git a/_glossary/workload-id.md b/_glossary/workload-id.md
index 7a19fad143f06..2e6af7eda36c7 100644
--- a/_glossary/workload-id.md
+++ b/_glossary/workload-id.md
@@ -1,6 +1,5 @@
---
title: Workload ID
-type: markdown
---
A unique identifier for an individual instance of a [workload](#workload).
Like [workload name](#workload-name), the workload ID is not a strongly verified property and should not be used
diff --git a/_glossary/workload-name.md b/_glossary/workload-name.md
index 9258d916f1fcc..d4c5387690345 100644
--- a/_glossary/workload-name.md
+++ b/_glossary/workload-name.md
@@ -1,6 +1,5 @@
---
title: Workload Name
-type: markdown
---
A unique name for a [workload](#workload), identifying it within the [service mesh](#service-mesh).
Unlike the [service name](#service-name) and the [workload principal], the workload name is not a
diff --git a/_glossary/workload-principal.md b/_glossary/workload-principal.md
index 059044f10890f..6a8d2e6ca22e3 100644
--- a/_glossary/workload-principal.md
+++ b/_glossary/workload-principal.md
@@ -1,10 +1,9 @@
---
title: Workload Principal
-type: markdown
---
Identifies the verifiable authority under which a [workload](#workload) runs.
Istio's service-to-service authentication is used to produce the workload principal.
By default workload principals are compliant with the SPIFFE ID format.
* Multiple [workloads](#workload) may share the same workload principal, but each workload has a single canonical workload
- principal
+ principal
* Workload principals are accessible in Istio configuration as the `source.user` and `destination.user` [attributes](#attribute).
diff --git a/_glossary/workload.md b/_glossary/workload.md
index 5500576b7a89c..06851d4afe7f1 100644
--- a/_glossary/workload.md
+++ b/_glossary/workload.md
@@ -1,8 +1,7 @@
---
title: Workload
-type: markdown
---
-A process/binary deployed by operators in Istio, typically represented by entities such as containers, pods, or VMs.
+A process/binary deployed by operators in Istio, typically represented by entities such as containers, pods, or VMs.
* A workload can expose zero or more [service endpoints](#service-endpoint).
* A workload can consume zero or more [services](#service).
* Each workload has a single canonical [service name](#service-name) associated with it, but
diff --git a/_help/bugs.md b/_help/bugs.md
index 0fcc3fbec193b..4747f76f0237d 100644
--- a/_help/bugs.md
+++ b/_help/bugs.md
@@ -1,17 +1,15 @@
---
title: Reporting Bugs
-overview: What to do about bugs
+description: What to do about bugs
-order: 35
+weight: 35
-layout: help
-type: markdown
redirect_from: /bugs
toc: false
---
{% include home.html %}
-Oh no! You found a bug?
+Oh no! You found a bug?
Search our [issue database](https://github.com/istio/issues/issues/) to see if we already know about
your problem and learn about when
diff --git a/_help/faq/general.html b/_help/faq/general.html
index 89c5df50d2f2d..4616e72aa1f55 100644
--- a/_help/faq/general.html
+++ b/_help/faq/general.html
@@ -1,8 +1,8 @@
---
title: General
-overview: General Q&A
+description: General Q&A
-order: 10
+weight: 10
layout: help
---
diff --git a/_help/faq/index.md b/_help/faq/index.md
index 0f1d3e54ac709..6630e01a97ac4 100644
--- a/_help/faq/index.md
+++ b/_help/faq/index.md
@@ -1,11 +1,9 @@
---
title: FAQ
-overview: Frequently Asked Questions about Istio.
+description: Frequently Asked Questions about Istio.
-order: 20
+weight: 20
-layout: help
-type: markdown
toc: false
redirect_from:
@@ -39,7 +37,7 @@ You've got questions? We've got answers!
{% if cat == qcat %}
{% assign name = q.path | downcase | split: '/' | last | remove: ".md" %}
- {{q.title}}
+ {{q.title}}
{% endif %}
{% endfor %}
diff --git a/_help/faq/mixer.html b/_help/faq/mixer.html
index abbc6e6c42783..5f754d07c0dcd 100644
--- a/_help/faq/mixer.html
+++ b/_help/faq/mixer.html
@@ -1,8 +1,8 @@
---
title: Mixer
-overview: Mixer Q&A
+description: Mixer Q&A
-order: 40
+weight: 40
layout: help
---
diff --git a/_help/faq/security.html b/_help/faq/security.html
index 58771291665c2..dab199124ffd6 100644
--- a/_help/faq/security.html
+++ b/_help/faq/security.html
@@ -1,8 +1,8 @@
---
title: Security
-overview: Security Q&A
+description: Security Q&A
-order: 30
+weight: 30
layout: help
---
diff --git a/_help/faq/setup.html b/_help/faq/setup.html
index 242be527b0e3a..4e13965cc0e77 100644
--- a/_help/faq/setup.html
+++ b/_help/faq/setup.html
@@ -1,8 +1,8 @@
---
title: Setup
-overview: Setup Q&A
+description: Setup Q&A
-order: 20
+weight: 20
layout: help
---
diff --git a/_help/faq/traffic-management.html b/_help/faq/traffic-management.html
index 11ae8c2f982ec..ed5f51599ab1a 100644
--- a/_help/faq/traffic-management.html
+++ b/_help/faq/traffic-management.html
@@ -1,8 +1,8 @@
---
title: Traffic Management
-overview: Traffic Management Q&A
+description: Traffic Management Q&A
-order: 50
+weight: 50
layout: help
---
diff --git a/_help/glossary.md b/_help/glossary.md
index f9b2f6527e4f7..f378632e9090f 100644
--- a/_help/glossary.md
+++ b/_help/glossary.md
@@ -1,11 +1,9 @@
---
title: Glossary
-overview: A glossary of common Istio terms.
+description: A glossary of common Istio terms.
-order: 30
+weight: 30
-layout: help
-type: markdown
redirect_from:
- "/glossary"
- "/docs/welcome/glossary.html"
diff --git a/_help/index.md b/_help/index.md
index e664e1cb55892..8f261e78586ac 100644
--- a/_help/index.md
+++ b/_help/index.md
@@ -1,15 +1,22 @@
---
title: Help!
-overview: A bunch of resources to help you deploy, configure and use Istio.
+description: A bunch of resources to help you deploy, configure and use Istio.
-order: 10
+weight: 10
-layout: help
-type: markdown
toc: false
---
+{% include home.html %}
-{% include section-index.html docs=site.help %}
+Here are some resources to help you deploy, configure and use Istio.
-And don't forget our vibrant [community]({{home}}/community) that's always ready to lend a hand
+- [Frequently Asked Questions]({{home}}/help/faq/). You ask 'em, we'll answer 'em.
+
+- [Glossary]({{home}}/help/glossary.html). A glossary of common Istio terms.
+
+- [Reporting Bugs]({{home}}/help/bugs.html). What to do when you find a bug.
+
+- [Troubleshooting Guide]({{home}}/help/troubleshooting.html). Practical advice on practical problems with Istio.
+
+And don't forget our vibrant [community]({{home}}/community.html) that's always ready to lend a hand
with thorny problems.
diff --git a/_help/troubleshooting.md b/_help/troubleshooting.md
index b7964a5ef3816..8e7ff35420392 100644
--- a/_help/troubleshooting.md
+++ b/_help/troubleshooting.md
@@ -1,18 +1,55 @@
---
title: Troubleshooting Guide
-overview: Practical advice on practical problems with Istio
+description: Practical advice on practical problems with Istio
-order: 40
+weight: 40
-layout: help
-type: markdown
redirect_from: /troubleshooting
+force_inline_toc: true
---
{% include home.html %}
Oh no! You're having trouble? Below is a list of solutions to common problems.
+## Verifying connectivity to Istio Pilot
+
+Verifying connectivity to Pilot is a useful troubleshooting step. Every proxy container in the service mesh should be able to communicate with Pilot. This can be accomplished in a few simple steps:
+
+1. Get the name of the Istio Ingress pod:
+
+```command
+$ INGRESS_POD_NAME=$(kubectl get po -n istio-system | grep ingress\- | awk '{print$1}')
+```
+
+1. Exec into the Istio Ingress pod:
+
+```command
+$ kubectl exec -it $INGRESS_POD_NAME -n istio-system /bin/bash
+```
+
+1. Unless you installed Istio using the debug proxy image (`istioctl kube-inject --debug=true`), you need to
+install curl.
+
+```command
+$ apt-get update && apt-get install -y curl
+```
+
+1. Test connectivity to Pilot using cURL. The following example cURL's the v1 registration API using default Pilot configuration parameters and mTLS enabled:
+
+```command
+$ curl -k --cert /etc/certs/cert-chain.pem --cacert /etc/certs/root-cert.pem --key /etc/certs/key.pem https://istio-pilot:15003/v1/registration
+```
+
+If mTLS is disabled:
+
+```command
+$ curl http://istio-pilot:15003/v1/registration
+```
+
+You should receive a response listing the "service-key" and "hosts" for each service in the mesh.
+
## No traces appearing in Zipkin when running Istio locally on Mac
+
Istio is installed and everything seems to be working except there are no traces showing up in Zipkin when there
should be.
@@ -22,10 +59,13 @@ when you select a very long date range in Zipkin you will see the traces appeari
You can also confirm this problem by comparing the date inside a docker container to outside:
-```bash
-docker run --entrypoint date gcr.io/istio-testing/ubuntu-16-04-slave:latest
+```command
+$ docker run --entrypoint date gcr.io/istio-testing/ubuntu-16-04-slave:latest
Sun Jun 11 11:44:18 UTC 2017
-date -u
+```
+
+```command
+$ date -u
Thu Jun 15 02:25:42 UTC 2017
```
@@ -33,11 +73,12 @@ To fix the problem, you'll need to shutdown and then restart Docker before reins
## Envoy won't connect to my HTTP/1.0 service
-Envoy requires HTTP/1.1 or HTTP/2 traffic for upstream services. For example, when using [NGINX](https://www.nginx.com/) for serving traffic behind Envoy, you will need to set the [proxy_http_version](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_http_version) directive in your NGINX config to be "1.1", since the NGINX default is 1.0
+Envoy requires HTTP/1.1 or HTTP/2 traffic for upstream services. For example, when using [NGINX](https://www.nginx.com/) for serving traffic behind Envoy, you
+will need to set the [proxy_http_version](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_http_version) directive in your NGINX config to be "1.1", since the NGINX default is 1.0
Example config:
-```
+```plain
upstream http_backend {
server 127.0.0.1:8080;
@@ -56,7 +97,7 @@ server {
}
```
-## No grafana output when connecting from a local web client to Istio remotely hosted
+## No Grafana output when connecting from a local web client to Istio remotely hosted
Validate the client and server date and time match.
@@ -77,7 +118,7 @@ The expected flow of metrics is:
1. The instances are handed to Mixer adapters for processing and backend storage.
1. The backend storage systems record metrics data.
-The default installations of Mixer ship with a [Prometheus](http://prometheus.io/)
+The default installations of Mixer ship with a [Prometheus](https://prometheus.io/)
adapter, as well as configuration for generating a basic set of metric
values and sending them to the Prometheus adapter. The
[Prometheus add-on]({{home}}/docs/tasks/telemetry/querying-metrics.html#about-the-prometheus-add-on)
@@ -98,8 +139,8 @@ Check these metrics.
In Kubernetes environments, execute the following command:
- ```bash
- kubectl -n istio-system port-forward 9093 &
+ ```command
+ $ kubectl -n istio-system port-forward 9093 &
```
1. Verify successful report calls.
@@ -109,7 +150,7 @@ Check these metrics.
You should see something like:
- ```
+ ```plain
grpc_server_handled_total{grpc_code="OK",grpc_method="Report",grpc_service="istio.mixer.v1.Mixer",grpc_type="unary"} 68
```
@@ -126,13 +167,8 @@ or [manual]({{home}}/docs/setup/kubernetes/sidecar-injection.html#manual-sidecar
In Kubernetes environments, issue the following command:
- ```bash
- kubectl get rules --all-namespaces
- ```
-
- With the default configuration, you should see something like:
-
- ```
+ ```command
+ $ kubectl get rules --all-namespaces
NAMESPACE NAME KIND
istio-system promhttp rule.v1alpha2.config.istio.io
istio-system promtcp rule.v1alpha2.config.istio.io
@@ -149,13 +185,8 @@ or [manual]({{home}}/docs/setup/kubernetes/sidecar-injection.html#manual-sidecar
In Kubernetes environments, issue the following command:
- ```bash
- kubectl get prometheuses.config.istio.io --all-namespaces
- ```
-
- The expected output is:
-
- ```
+ ```command
+ $ kubectl get prometheuses.config.istio.io --all-namespaces
NAMESPACE NAME KIND
istio-system handler prometheus.v1alpha2.config.istio.io
```
@@ -168,13 +199,8 @@ or [manual]({{home}}/docs/setup/kubernetes/sidecar-injection.html#manual-sidecar
In Kubernetes environments, issue the following command:
- ```bash
- kubectl get metrics.config.istio.io --all-namespaces
- ```
-
- The expected output is:
-
- ```
+ ```command
+ $ kubectl get metrics.config.istio.io --all-namespaces
NAMESPACE NAME KIND
istio-system requestcount metric.v1alpha2.config.istio.io
istio-system requestduration metric.v1alpha2.config.istio.io
@@ -204,7 +230,7 @@ or [manual]({{home}}/docs/setup/kubernetes/sidecar-injection.html#manual-sidecar
You should find something like:
- ```
+ ```plain
mixer_config_resolve_count{error="false",target="details.default.svc.cluster.local"} 56
mixer_config_resolve_count{error="false",target="ingress.istio-system.svc.cluster.local"} 67
mixer_config_resolve_count{error="false",target="mongodb.default.svc.cluster.local"} 18
@@ -222,8 +248,8 @@ or [manual]({{home}}/docs/setup/kubernetes/sidecar-injection.html#manual-sidecar
In Kubernetes environments, retrieve the Mixer logs via:
- ```bash
- kubectl -n istio-system logs mixer
+ ```command
+ $ kubectl -n istio-system logs mixer
```
Look for errors related to your configuration or your service in the
@@ -243,7 +269,7 @@ More on viewing Mixer configuration can be found [here]({{home}}/help/faq/mixer.
You should find something like:
- ```
+ ```plain
mixer_adapter_dispatch_count{adapter="prometheus",error="false",handler="handler.prometheus.istio-system",meshFunction="metric",response_code="OK"} 114
mixer_adapter_dispatch_count{adapter="prometheus",error="true",handler="handler.prometheus.default",meshFunction="metric",response_code="INTERNAL"} 4
mixer_adapter_dispatch_count{adapter="stdio",error="false",handler="handler.stdio.istio-system",meshFunction="logentry",response_code="OK"} 104
@@ -262,8 +288,8 @@ More on viewing Mixer configuration can be found [here]({{home}}/help/faq/mixer.
In Kubernetes environment, check the Mixer logs via:
- ```bash
- kubectl -n istio-system logs mixer
+ ```command
+ $ kubectl -n istio-system logs mixer
```
Filter for lines including something like `Report 0 returned with: INTERNAL
@@ -277,8 +303,8 @@ More on viewing Mixer configuration can be found [here]({{home}}/help/faq/mixer.
In Kubernetes environments, setup port-forwarding as follows:
- ```bash
- kubectl -n istio-system port-forward $(kubectl -n istio-system get pod -l app=prometheus -o jsonpath='{.items[0].metadata.name}') 9090:9090 &
+ ```command
+ $ kubectl -n istio-system port-forward $(kubectl -n istio-system get pod -l app=prometheus -o jsonpath='{.items[0].metadata.name}') 9090:9090 &
```
1. Visit [http://localhost:9090/config](http://localhost:9090/config).
@@ -301,16 +327,41 @@ More on viewing Mixer configuration can be found [here]({{home}}/help/faq/mixer.
## How can I debug issues with the service mesh?
+### With [istioctl](https://istio.io/docs/reference/commands/istioctl.html#istioctl%20proxy-config)
+
+Istioctl allows you to inspect the current xDS of a given Envoy from its admin interface (locally) or from Pilot using the `proxy-config` or `pc` command.
+
+For example, to retrieve the configured clusters in an Envoy via the admin interface run the following command:
+
+```command
+istioctl proxy-config endpoint clusters
+```
+
+To retrieve endpoints for a given pod in the application namespace from Pilot run the following command:
+
+```command
+istioctl proxy-config pilot -n application eds
+```
+
+The `proxy-config` command also allows you to retrieve the state of the entire mesh from Pilot using the following command:
+
+```command
+istioctl proxy-config pilot mesh ads
+```
+
### With [GDB](https://www.gnu.org/software/gdb/)
To debug Istio with `gdb`, you will need to run the debug images of Envoy / Mixer / Pilot. A recent `gdb` and the golang extensions (for Mixer/Pilot or other golang components) is required.
-1. `kubectl exec -it PODNAME -c [proxy | mixer | pilot]`
-1. Find process ID: ps ax
-1. gdb -p PID binary
-1. For go: info goroutines, goroutine x bt
+1. `kubectl exec -it PODNAME -c [proxy | mixer | pilot]`
-### With [Tcpdump](http://www.tcpdump.org/tcpdump_man.html)
+1. Find process ID: ps ax
+
+1. gdb -p PID binary
+
+1. For go: info goroutines, goroutine x bt
+
+### With [Tcpdump](https://www.tcpdump.org/tcpdump_man.html)
Tcpdump doesn't work in the sidecar pod - the container doesn't run as root. However any other container in the same pod will see all the packets, since the network namespace is shared. `iptables` will also see the pod-wide config.
@@ -320,7 +371,7 @@ Communication between Envoy and the app happens on 127.0.0.1, and is not encrypt
Check your `ulimit -a`. Many systems have a 1024 open file descriptor limit by default which will cause Envoy to assert and crash with:
-```bash
+```plain
[2017-05-17 03:00:52.735][14236][critical][assert] assert failure: fd_ != -1: external/envoy/source/common/network/connection_impl.cc:58
```
@@ -328,30 +379,27 @@ Make sure to raise your ulimit. Example: `ulimit -n 16384`
## Headless TCP Services Losing Connection from Istiofied Containers
-If `istio-ca` is deployed, Envoy is restarted every 15 minutes to refresh certificates.
+If `istio-citadel` is deployed, Envoy is restarted every 15 minutes to refresh certificates.
This causes the disconnection of TCP streams or long-running connections between services.
You should build resilience into your application for this type of
disconnect, but if you still want to prevent the disconnects from
-happening, you will need to disable mTLS and the `istio-ca` deployment.
+happening, you will need to disable mTLS and the `istio-citadel` deployment.
First, edit your istio config to disable mTLS
-```
-# comment out or uncomment out authPolicy: MUTUAL_TLS to toggle mTLS and then
-kubectl edit configmap -n istio-system istio
-
-# restart pilot and wait a few minutes
-kubectl delete pods -n istio-system -l istio=pilot
+```command
+$ kubectl edit configmap -n istio-system istio
+$ kubectl delete pods -n istio-system -l istio=pilot
```
-Next, scale down the `istio-ca` deployment to disable Envoy restarts.
+Next, scale down the `istio-citadel` deployment to disable Envoy restarts.
-```
-kubectl scale --replicas=0 deploy/istio-ca -n istio-system
+```command
+$ kubectl scale --replicas=0 deploy/istio-citadel -n istio-system
```
-This should stop istio from restarting Envoy and disconnecting TCP connections.
+This should stop Istio from restarting Envoy and disconnecting TCP connections.
## Envoy Process High CPU Usage
@@ -361,13 +409,9 @@ CPU usage, even when Envoy isn't doing anything. In order to bring the
CPU usage down for larger deployments, increase the refresh interval for
Envoy to something higher, like 30 seconds.
-```
-# increase the field rdsRefreshDelay in the mesh and defaultConfig section
-# set the refresh interval to 30s
-kubectl edit configmap -n istio-system istio
-
-# restart pilot and wait a few minutes
-kubectl delete pods -n istio-system -l istio=pilot
+```command
+$ kubectl edit configmap -n istio-system istio
+$ kubectl delete pods -n istio-system -l istio=pilot
```
Also make sure to reinject the sidecar into all of your pods, as
@@ -376,11 +420,38 @@ their configuration needs to be updated as well.
Afterwards, you should see CPU usage fall back to 0-1% while idling.
Make sure to tune these values for your specific deployment.
-*Warning:*: Changes created by routing rules will take up to 2x refresh interval to propagate to the sidecars.
-While the larger refresh interval will reduce CPU usage, updates caused by routing rules may cause a period
-of HTTP 404s (upto 2x the refresh interval) until the Envoy sidecars get all relevant configuration.
+*Warning:*: Changes created by routing rules will take up to 2x refresh interval to propagate to the sidecars.
+While the larger refresh interval will reduce CPU usage, updates caused by routing rules may cause a period
+of HTTP 404s (up to 2x the refresh interval) until the Envoy sidecars get all relevant configuration.
## Kubernetes webhook setup script files are missing from 0.5 release package
-NOTE: The 0.5.0 and 0.5.1 releases are missing scripts to provision webhook certificates. Download the missing files from [here](https://raw.githubusercontent.com/istio/istio/master/install/kubernetes/webhook-create-signed-cert.sh) and [here](https://raw.githubusercontent.com/istio/istio/master/install/kubernetes/webhook-patch-ca-bundle.sh). Subsqeuent releases (> 0.5.1) should include these missing files.
+> The 0.5.0 and 0.5.1 releases are missing scripts to provision webhook certificates. Download the missing files
+from [here](https://raw.githubusercontent.com/istio/istio/release-0.7/install/kubernetes/webhook-create-signed-cert.sh) and [here](https://raw.githubusercontent.com/istio/istio/release-0.7/install/kubernetes/webhook-patch-ca-bundle.sh). Subsequent releases (> 0.5.1) include these files.
+
+## Automatic sidecar injection will fail if the kube-apiserver has proxy settings
+
+This was tested on 0.5.0 with the additional files required as referenced in the above issue. When the Kube-apiserver included
+proxy settings such as:
+
+```yaml
+env:
+ - name: http_proxy
+ value: http://proxy-wsa.esl.foo.com:80
+ - name: https_proxy
+ value: http://proxy-wsa.esl.foo.com:80
+ - name: no_proxy
+ value: 127.0.0.1,localhost,dockerhub.foo.com,devhub-docker.foo.com,10.84.100.125,10.84.100.126,10.84.100.127
+```
+The sidecar injection would fail. The only related failure logs was in the kube-apiserver log:
+
+```plain
+W0227 21:51:03.156818 1 admission.go:257] Failed calling webhook, failing open sidecar-injector.istio.io: failed calling admission webhook "sidecar-injector.istio.io": Post https://istio-sidecar-injector.istio-system.svc:443/inject: Service Unavailable
+```
+
+Make sure both pod and service CIDRs are not proxied according to *_proxy variables. Check the kube-apiserver files and logs to verify the configuration and whether any requests are being proxied.
+
+A workaround is to remove the proxy settings from the kube-apiserver manifest and restart the server or use a later version of kubernetes.
+An issue was filed in kubernetes related to this and has since been closed. [https://github.com/kubernetes/kubeadm/issues/666](https://github.com/kubernetes/kubeadm/issues/666)
+[https://github.com/kubernetes/kubernetes/pull/58698#discussion_r163879443](https://github.com/kubernetes/kubernetes/pull/58698#discussion_r163879443)
diff --git a/_includes/collection_nav.html b/_includes/collection_nav.html
new file mode 100644
index 0000000000000..84ed782b3af87
--- /dev/null
+++ b/_includes/collection_nav.html
@@ -0,0 +1,59 @@
+{% comment %}
+Assigns the next_page_url, next_page_title, next_page_description, prev_page_url, prev_page_title, and prev_page_description variables the
+URLs, titles and descriptions of the next and previous pages within the current collection, relative to the current page.
+{% endcomment %}
+
+{% comment %}
+ Find the current page in the sorted set of blog posts we generated for the sidebar
+{% endcomment %}
+
+{% assign index = 0 %}
+{% for u in urls %}
+ {% if u == page.url %}
+ {% assign index = forloop.index0 %}
+ {% break %}
+ {% endif %}
+{% endfor %}
+
+{% assign next_index = index %}
+{% for i in (0..10) %}
+ {% assign next_index = next_index | minus: 1 %}
+
+ {% if next_index < 0 %}
+ {% break %}
+ {% endif
+
+ {% assign YY = "THIS IS NEEDED, OTHERWISE THE URL ASSIGNMENT BELOW DOESN'T HAVE ANY EFFECT!" %}
+
+ {% assign url = urls[next_index] %}
+ {% assign components = url | split: "/" %}
+ {% assign name = components | last %}
+
+ {% if name != "index.html" %}
+ {% assign next_page_url = urls[next_index] %}
+ {% assign next_page_title = titles[next_index] %}
+ {% assign next_page_description = descriptions[next_index] %}
+ {% break %}
+ {% endif %}
+{% endfor %}
+
+{% assign max = urls | size %}
+{% assign prev_index = index %}
+{% for i in (0..10) %}
+ {% assign prev_index = prev_index | plus: 1 %}
+ {% if prev_index >= max %}
+ {% break %}
+ {% endif
+
+ {% assign YY = "THIS IS NEEDED, OTHERWISE THE URL ASSIGNMENT BELOW DOESN'T HAVE ANY EFFECT!" %}
+
+ {% assign url = urls[prev_index] %}
+ {% assign components = url | split: "/" %}
+ {% assign name = components | last %}
+ {% if name != "index.html" %}
+ {% assign prev_page_url = urls[prev_index] %}
+ {% assign prev_page_title = titles[prev_index] %}
+ {% assign prev_page_description = descriptions[prev_index] %}
+ {% break %}
+ {% endif %}
+{% endfor %}
diff --git a/_includes/faq.html b/_includes/faq.html
index ad6ef8741574b..491d0402932a8 100644
--- a/_includes/faq.html
+++ b/_includes/faq.html
@@ -1,5 +1,5 @@
-{% assign faqs = site.faq | sort: "order" %}
+{% assign faqs = site.faq | sort: "weight" %}
{% for q in faqs %}
{% assign comp = q.path | split: '/' %}
{% assign qcat = comp[1] %}
diff --git a/_includes/figure.html b/_includes/figure.html
deleted file mode 100644
index 55a03bce2be30..0000000000000
--- a/_includes/figure.html
+++ /dev/null
@@ -1,28 +0,0 @@
-{% comment %}
-Purpose:
- Inserts a figure into a page. The user of this template specifies the
- relative width of the figure in percentage, and an aspect ratio value in
- lieu of the Y coordinate. Through CSS trickery, these two values let us
- calculate the actual width and height of the image at render time in such
- a way that it avoids the typical 'shifting text' problem as images are
- loaded asynchronously.
-
-Usage:
- {% include figure.html width='%' ratio='%'
- img=''
- alt=''
- title=''
- caption=''
- %}
-{% endcomment %}
-
-