From 3baec8f7654f9be2865d6c58befd730f42d971dc Mon Sep 17 00:00:00 2001 From: artembo Date: Tue, 6 Jul 2021 00:15:03 +0300 Subject: [PATCH] Add pull/push workflow for translations - add cleanup.py for re-arranging po files - push translation for pushing sources to crowdin on pull request when rst is changed - add pull-translation.yml for getting translations from crowdin on demand when translation is ready Closes #104 --- .github/workflows/destroy-deployment.yml | 24 + .github/workflows/pull-translation.yml | 51 ++ .github/workflows/push-translation.yml | 57 ++ .github/workflows/upload-translations.yml | 40 + .gitignore | 3 + doc/README.md | 20 + doc/cleanup.py | 46 + doc/conf.py | 23 + doc/crowdin.yaml | 24 + .../cartridge_kubernetes_guide/index.po | 807 ++++++++++++++++++ doc/requirements.txt | 3 + 11 files changed, 1098 insertions(+) create mode 100644 .github/workflows/destroy-deployment.yml create mode 100644 .github/workflows/pull-translation.yml create mode 100644 .github/workflows/push-translation.yml create mode 100644 .github/workflows/upload-translations.yml create mode 100644 doc/README.md create mode 100755 doc/cleanup.py create mode 100644 doc/conf.py create mode 100644 doc/crowdin.yaml create mode 100644 doc/locale/ru/LC_MESSAGES/cartridge_kubernetes_guide/index.po create mode 100644 doc/requirements.txt diff --git a/.github/workflows/destroy-deployment.yml b/.github/workflows/destroy-deployment.yml new file mode 100644 index 00000000..5e7ec3e0 --- /dev/null +++ b/.github/workflows/destroy-deployment.yml @@ -0,0 +1,24 @@ +name: Destroy-deployments + +on: + pull_request: + paths: + - 'doc/**/*' + types: + - closed +jobs: + destroy-deployment: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + with: + token: "${{ secrets.GITHUB_TOKEN }}" + + - name: Set branch name from source branch + run: echo "BRANCH_NAME=${GITHUB_HEAD_REF##*/}" >> $GITHUB_ENV + + - name: Remove dev server deployment at ${{env.DEPLOYMENT_NAME}} + uses: strumwolf/delete-deployment-environment@v2 + with: + token: "${{ secrets.TARANTOOLBOT_TOKEN }}" + environment: "translation-${{env.BRANCH_NAME}}" diff --git a/.github/workflows/pull-translation.yml b/.github/workflows/pull-translation.yml new file mode 100644 index 00000000..f61fa598 --- /dev/null +++ b/.github/workflows/pull-translation.yml @@ -0,0 +1,51 @@ +name: Pull translations + +on: + workflow_dispatch: + branches: + - '!master' +jobs: + pull-translations: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + with: + token: ${{secrets.TARANTOOLBOT_TOKEN}} + + - name: Set branch name from source branch + run: echo "BRANCH_NAME=${GITHUB_REF##*/}" >> $GITHUB_ENV + + - name: Setup Python environment + uses: actions/setup-python@v2 + + - name: Setup Python requirements + run: | + python -m pip install --upgrade pip + pip install -r doc/requirements.txt + + - name: Pull translations from Crowdin + uses: crowdin/github-action@1.0.21 + with: + config: 'doc/crowdin.yaml' + upload_sources: false + upload_translations: false + push_translations: false + download_translations: true + download_language: 'ru' + crowdin_branch_name: ${{env.BRANCH_NAME}} + debug_mode: true + env: + GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} + CROWDIN_PERSONAL_TOKEN: ${{secrets.CROWDIN_PERSONAL_TOKEN}} + + - name: Cleanup translation files + run: | + sudo chown -R runner:docker doc/locale/ru/LC_MESSAGES + python doc/cleanup.py po + + - name: Commit translation files + uses: stefanzweifel/git-auto-commit-action@v4.1.2 + with: + commit_message: "Update translations" + file_pattern: "*.po" diff --git a/.github/workflows/push-translation.yml b/.github/workflows/push-translation.yml new file mode 100644 index 00000000..113e5691 --- /dev/null +++ b/.github/workflows/push-translation.yml @@ -0,0 +1,57 @@ +name: Push translation sources + +on: + pull_request: + paths: + - 'doc/**/*.rst' + - 'doc/conf.py' + - '.github/workflows/push-translation.yml' +jobs: + push-translation-sources: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + + - name: Set branch name from source branch + run: echo "BRANCH_NAME=${GITHUB_HEAD_REF##*/}" >> $GITHUB_ENV + + - name: Start translation service deployment + uses: bobheadxi/deployments@v0.5.2 + id: translation + with: + step: start + token: ${{secrets.GITHUB_TOKEN}} + env: translation-${{env.BRANCH_NAME}} + ref: ${{github.head_ref}} + + - name: Setup Python environment + uses: actions/setup-python@v2 + + - name: Setup Python requirements + run: | + python -m pip install --upgrade pip + pip install -r doc/requirements.txt + + - name: Build pot files + run: python -m sphinx . doc/locale/en -c doc -b gettext + + - name: Push POT files to crowdin + uses: crowdin/github-action@1.0.21 + with: + upload_sources: true + upload_translations: false + crowdin_branch_name: ${{env.BRANCH_NAME}} + config: 'doc/crowdin.yaml' + env: + GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} + CROWDIN_PERSONAL_TOKEN: ${{secrets.CROWDIN_PERSONAL_TOKEN}} + + - name: update deployment status + uses: bobheadxi/deployments@v0.5.2 + with: + step: finish + token: ${{secrets.GITHUB_TOKEN}} + status: ${{job.status}} + deployment_id: ${{steps.translation.outputs.deployment_id}} + env_url: https://crowdin.com/project/tarantool-cartridge-cli/ru#/${{env.BRANCH_NAME}} diff --git a/.github/workflows/upload-translations.yml b/.github/workflows/upload-translations.yml new file mode 100644 index 00000000..75a88c4e --- /dev/null +++ b/.github/workflows/upload-translations.yml @@ -0,0 +1,40 @@ +name: Update translations on the main branch + +on: + push: + paths: + - 'doc/**/*.rst' + - 'doc/locale/**/*.po' + - '.github/workflows/upload-translations.yml' + branches: + - master +jobs: + autocommit-pot-files: + runs-on: ubuntu-latest + + steps: + + - name: Checkout + uses: actions/checkout@v2 + + - name: Setup Python environment + uses: actions/setup-python@v2 + + - name: Setup Python requirements + run: | + python -m pip install --upgrade pip + pip install -r doc/requirements.txt + + - name: Build pot files + run: python -m sphinx . doc/locale/en -c doc -b gettext + + - name: Push Pot-files to crowdin + uses: crowdin/github-action@1.1.0 + with: + config: 'doc/crowdin.yaml' + upload_sources: true + upload_translations: true + import_eq_suggestions: true + env: + GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} + CROWDIN_PERSONAL_TOKEN: ${{secrets.CROWDIN_PERSONAL_TOKEN}} diff --git a/.gitignore b/.gitignore index 88230f48..e1c51707 100644 --- a/.gitignore +++ b/.gitignore @@ -85,3 +85,6 @@ deploy/* ci/helm-chart/templates/crds/tarantool.io_*s_crd.yaml ci/helm-chart/templates/crds/tarantool_*_cr.yaml + +doc/locale/en/ +doc/output/ diff --git a/doc/README.md b/doc/README.md new file mode 100644 index 00000000..4787c892 --- /dev/null +++ b/doc/README.md @@ -0,0 +1,20 @@ +[![Crowdin](https://badges.crowdin.net/tarantool-operator/localized.svg)](https://crowdin.com/project/tarantool-operator) + +# Tarantool Kubernetes Operator documentation +Part of Tarantool documentation, published to +https://www.tarantool.io/en/doc/latest/book/cartridge/cartridge_kubernetes_guide/ + +## Create pot files from rst +```bash +python -m sphinx doc doc/locale/en -c doc -b gettext +``` + +## Create/update po from pot files +```bash +sphinx-intl update -p doc/locale/en -d doc/locale -l ru +``` + +## Build documentation to doc/output +```bash +python -m sphinx doc doc/output -c doc +``` diff --git a/doc/cleanup.py b/doc/cleanup.py new file mode 100755 index 00000000..ed45c18d --- /dev/null +++ b/doc/cleanup.py @@ -0,0 +1,46 @@ +#! /usr/bin/env python3 +import argparse +from glob import glob +from polib import pofile, POFile, _BaseFile + +parser = argparse.ArgumentParser(description='Cleanup PO and POT files') +parser.add_argument('extension', type=str, choices=['po', 'pot', 'both'], + help='cleanup files with extension: po, pot or both') + + +class PoFile(POFile): + + def __unicode__(self): + return _BaseFile.__unicode__(self) + + def metadata_as_entry(self): + class M: + def __unicode__(self, _): + return '' + return M() + + +def cleanup_files(extension): + mask = f'**/*.{extension}' + for file_path in glob(mask, recursive=True): + print(f'cleanup {file_path}') + po_file: POFile = pofile(file_path, klass=PoFile) + po_file.header = '' + po_file.metadata = {} + po_file.metadata_is_fuzzy = False + + for item in po_file: + item.occurrences = None + + po_file.save() + + +if __name__ == "__main__": + + args = parser.parse_args() + + if args.extension in ['po', 'both']: + cleanup_files('po') + + if args.extension in ['pot', 'both']: + cleanup_files('pot') diff --git a/doc/conf.py b/doc/conf.py new file mode 100644 index 00000000..b2587732 --- /dev/null +++ b/doc/conf.py @@ -0,0 +1,23 @@ +import sys +import os + +sys.path.insert(0, os.path.abspath('')) + +master_doc = 'doc/cartridge_kubernetes_guide/index' + +source_suffix = '.rst' + +project = u'Tarantool-operator' + +exclude_patterns = [ + 'doc/locale', + 'doc/output', + 'doc/README.md', + 'doc/cleanup.py', + 'doc/requirements.txt', +] + +language = 'en' +locale_dirs = ['./doc/locale'] +gettext_compact = False +gettext_location = True diff --git a/doc/crowdin.yaml b/doc/crowdin.yaml new file mode 100644 index 00000000..81b756fc --- /dev/null +++ b/doc/crowdin.yaml @@ -0,0 +1,24 @@ +# https://support.crowdin.com/configuration-file/ +# https://support.crowdin.com/cli-tool-v3/#configuration + +"project_id" : "463364" +"base_path" : "doc/locale" +"base_url": "https://crowdin.com" +"api_token_env": "CROWDIN_PERSONAL_TOKEN" + + +"preserve_hierarchy": true + +files: [ + { + "source" : "/en/**/*.pot", + "translation" : "/%locale_with_underscore%/LC_MESSAGES/**/%file_name%.po", + "update_option" : "update_as_unapproved", + + "languages_mapping" : { + "locale_with_underscore" : { + "ru" : "ru", + } + }, + } +] diff --git a/doc/locale/ru/LC_MESSAGES/cartridge_kubernetes_guide/index.po b/doc/locale/ru/LC_MESSAGES/cartridge_kubernetes_guide/index.po new file mode 100644 index 00000000..5822ffe6 --- /dev/null +++ b/doc/locale/ru/LC_MESSAGES/cartridge_kubernetes_guide/index.po @@ -0,0 +1,807 @@ + +msgid "Tarantool Cartridge on Kubernetes" +msgstr "" + +msgid "" +"This guide covers the full life cycle of a Tarantool Cartridge app--from " +"developing the app to operating it on Kubernetes." +msgstr "" + +msgid "Contents" +msgstr "" + +msgid "Installation tools" +msgstr "" + +msgid "The following tools are needed:" +msgstr "" + +msgid "" +"**cartridge-cli** is a utility for managing Cartridge applications. We need " +"the version 2.3.0 or higher. Installation instructions are available `here " +"`__. If the " +"installation is successful, the *cartridge* utility will be available in the" +" system." +msgstr "" + +msgid "" +"**kubectl** is a Kubernetes cluster management tool. We need the version " +"1.16 or higher. Installation instructions can be found `here " +"`__." +msgstr "" + +msgid "" +"**helm** is a package manager for Kubernetes apps. We need the version " +"3.3.x. Installation instructions can be found `here " +"`__." +msgstr "" + +msgid "" +"**minikube** is a tool for creating a local Kubernetes cluster. We need the " +"version 1.12 or higher. Installation instructions can be found `here " +"`__." +msgstr "" + +msgid "" +"**kind** (optional) is another tool for creating a local cluster. It can be " +"used instead of the minikube. We need the version 0.6.0 or higher. " +"Installation instructions can be found `here " +"`__." +msgstr "" + +msgid "Creating an application" +msgstr "" + +msgid "" +"Let's create a Cartridge application named ``test-app`` using ``cartridge-" +"cli``:" +msgstr "" + +msgid "In the ``test-app`` directory, we get the app created from a template:" +msgstr "" + +msgid "" +"The app is fully functional and can respond to the HTTP GET request " +"``/hello``." +msgstr "" + +msgid "Check the cartridge version in *test-app-scm-1.rockspec*:" +msgstr "" + +msgid "" +"The version of Cartridge must be **>= 2.3.0**. Starting from this version, " +"Cartridge waits for an instance to become available on its DNS address " +"during the instance start. This is required for correct operations on " +"Kubernetes. For versions below 2.3.0, an application must be customized " +"independently. See the `example `_ of how " +"to do this." +msgstr "" + +msgid "Building the application" +msgstr "" + +msgid "Let's create a Docker image using ``cartridge-cli``:" +msgstr "" + +msgid "Upload the image to the Docker registry:" +msgstr "" + +msgid "" +"You must be logged in via ``docker login`` and have access rights to the " +"target registry." +msgstr "" + +msgid "Creating a Kubernetes cluster" +msgstr "" + +msgid "" +"If you have a ready-made cluster in the cloud, you can use it. If not, we " +"suggest two ways of how to create a local cluster:" +msgstr "" + +msgid "using :ref:`minikube `" +msgstr "" + +msgid "using :ref:`kind `." +msgstr "" + +msgid "Using *minikube*" +msgstr "" + +msgid "" +"Create a Kubernetes cluster of version 1.16.4 with 4GB of RAM (recommended):" +msgstr "" + +msgid "Wait for the cluster state to be *Ready*:" +msgstr "" + +msgid "Using *kind*" +msgstr "" + +msgid "" +"Create a Kubernetes cluster of version 1.16.4 by using the *kind* utility as" +" an alternative to *minikube*:" +msgstr "" + +msgid "Let's check the cluster status:" +msgstr "" + +msgid "Launch the application" +msgstr "" + +msgid "" +"To install the Tarantool Kubernetes operator and deploy the cluster, we will" +" use the ``helm`` utility. Charts are published in our repository. Let’s add" +" it:" +msgstr "" + +msgid "Two charts are available in the repository:" +msgstr "" + +msgid "" +"The ``tarantool/tarantool-operator`` chart installs and configures the " +"operator that manages Tarantool Cartridge clusters." +msgstr "" + +msgid "" +"The ``tarantool/cartridge`` chart is a template for creating Tarantool " +"Cartridge clusters. With the default settings, this chart deploys an example" +" application consisting of 3 instances. The chart works only in conjunction " +"with the Tarantool Kubernetes operator." +msgstr "" + +msgid "" +"Use the same version with both charts. If you set the ``tarantool-operator``" +" chart to version 0.0.8, set the ``cartridge`` chart to the same version " +"0.0.8." +msgstr "" + +msgid "Install *tarantool-operator* in the *tarantool* namespace:" +msgstr "" + +msgid "Let's wait until a pod with the operator is ready to work:" +msgstr "" + +msgid "" +"In the meantime, let’s talk about what the Tarantool operator is and why it " +"is needed." +msgstr "" + +msgid "Tarantool Kubernetes operator" +msgstr "" + +msgid "" +"This is a Kubernetes application that can manage Tarantool Cartridge " +"resources." +msgstr "" + +msgid "What does this mean for us?" +msgstr "" + +msgid "" +"We don't need to know how to perform administrative actions such as joining " +"a node or creating a replica set. The operator knows how to do this better, " +"and if you set the value for its desired system configuration, it begins to " +"bring the cluster to the desired state." +msgstr "" + +msgid "" +"The Tarantool Kubernetes operator itself is an implementation of the " +"Kubernetes Operator design pattern. It offers the automation of work with " +"user resources using controllers that respond to various events and changes." +msgstr "" + +msgid "The following links can help you understand this pattern:" +msgstr "" + +msgid "" +"`Official description on kubernetes.io " +"`_;" +msgstr "" + +msgid "" +"`Overview from the creators of the pattern (CoreOS) " +"`_;" +msgstr "" + +msgid "" +"`Post on Habr from Lamoda about the development of the operator " +"`_." +msgstr "" + +msgid "" +"In the meantime, our pod with ``tarantool-operator`` went into a *Running* " +"state. The next step is to install the app using the ``tarantool/cartridge``" +" helm chart. To do this, prepare a description of the desired system." +msgstr "" + +msgid "Deploying a Tarantool Cartridge application" +msgstr "" + +msgid "" +"After you have deployed the cluster and installed the operator, you can move" +" to the next step--launching the app." +msgstr "" + +msgid "" +"We will deploy the app using the ``tarantool/cartridge`` chart. This is a " +"template. Run it with the default settings and get our example application " +"that has 3 instances. If you define your own settings, you can deploy any " +"application of any topology using the Tarantool Cartridge." +msgstr "" + +msgid "" +"Let's have a look at the settings in the ``values.yaml`` file. Comments " +"provide a description of each parameter:" +msgstr "" + +msgid "With this configuration we will get the following:" +msgstr "" + +msgid "A Tarantool Cartridge cluster called ``test-app``." +msgstr "" + +msgid "Two replica sets in the cluster: ``routers`` and ``storages``." +msgstr "" + +msgid "One Tarantool instance in the ``routers`` replica set." +msgstr "" + +msgid "Two instances, master and replica, in the ``storages`` replica set." +msgstr "" + +msgid "" +"Each replica set performs the roles listed in the ``RolesToAssign`` " +"parameter." +msgstr "" + +msgid "Install the app:" +msgstr "" + +msgid "Let's wait for all the pods to launch:" +msgstr "" + +msgid "" +"To check the cluster, we forward ports from one of the pods and go to the " +"Cartridge dashboard:" +msgstr "" + +msgid "" +"Now the Tarantool Cartridge Web UI is available at " +"``http://localhost:8081``." +msgstr "" + +msgid "Cluster management" +msgstr "" + +msgid "Adding a new replica" +msgstr "" + +msgid "To increase the number of replicas in a replica set:" +msgstr "" + +msgid "Change the configuration in the ``values.yaml`` file." +msgstr "" + +msgid "Update the app using the ``helm upgrade`` command." +msgstr "" + +msgid "" +"The ``ReplicaCount`` parameter is responsible for the number of instances in" +" a replica set. Set it to ``3`` for the ``storages`` replica set:" +msgstr "" + +msgid "Update the app:" +msgstr "" + +msgid "" +"Let's wait until all the new pods go into the **Running** state and are " +"displayed in the Cartridge Web UI." +msgstr "" + +msgid "The ``storages`` replica set has 3 instances: 1 master and 2 replicas." +msgstr "" + +msgid "Adding a shard (replica set)" +msgstr "" + +msgid "" +"The ``ReplicaSetCount`` parameter defines the number of replicas of the same" +" type." +msgstr "" + +msgid "Let's increase the number of the ``routers`` replica sets to ``2``:" +msgstr "" + +msgid "Let's wait for the new pod to start:" +msgstr "" + +msgid "Updating application version" +msgstr "" + +msgid "" +"Currently, the app logic contains one HTTP endpoint ``/hello`` that returns " +"the string ``Hello world!`` in response to a GET request." +msgstr "" + +msgid "To check this out, let's forward the ports to the desired node:" +msgstr "" + +msgid "And then execute the request:" +msgstr "" + +msgid "" +"Let's add another endpoint that will return the string \"Hello world, new " +"version of the app!\". To do this, add another ``httpd:route`` in the " +"``init`` function in the ``app/roles/custom.lua`` role:" +msgstr "" + +msgid "Pack the new version of the app:" +msgstr "" + +msgid "Upload the new image version to the Docker registry:" +msgstr "" + +msgid "" +"Update the ``values.yaml`` configuration file by specifying a new " +"``image.tag``:" +msgstr "" + +msgid "Update the app on Kubernetes:" +msgstr "" + +msgid "" +"Tarantool Kubernetes operator uses the **OnDelete** update policy. This " +"means that the update has reached the cluster, but the pods will update the " +"app image only after a restart:" +msgstr "" + +msgid "Lets wait for the pods to start again and check the update:" +msgstr "" + +msgid "Running multiple Tarantool Cartridge clusters in different namespaces" +msgstr "" + +msgid "" +"Tarantool Kubernetes operator can manage Tarantool Cartridge clusters only " +"in its own namespace. Therefore, to deploy multiple Cartridge clusters in " +"different namespaces you need to deploy an operator in each of them." +msgstr "" + +msgid "" +"To install an operator in several namespaces, just specify the required " +"namespace during installation:" +msgstr "" + +msgid "" +"These commands set the operator to the namespace ``NS_1`` and the namespace " +"``NS_2``. Then, in each of them, you can run a Tarantool Cartridge cluster." +msgstr "" + +msgid "" +"Finally, we have two namespaces. Each has an operator and a Tarantool " +"Cartridge cluster." +msgstr "" + +msgid "Deleting a cluster" +msgstr "" + +msgid "To remove a cluster, execute the following command:" +msgstr "" + +msgid "" +"After a while, all the pods of our application will disappear. Among the " +"pods in the ``tarantool`` namespace, only the Tarantool Kubernetes operator " +"will remain." +msgstr "" + +msgid "If you need to remove the Tarantool Kubernetes operator, execute:" +msgstr "" + +msgid "" +"``helm uninstall`` does not remove persistent volumes. To remove them, you " +"need to additionally perform the following:" +msgstr "" + +msgid "Failover" +msgstr "" + +msgid "" +"Failover - replicaset leader selection mechanism. You can read more about it" +" `here " +"`__." +msgstr "" + +msgid "" +"The ability to configure failover via kubernetes manifests will appear later" +msgstr "" + +msgid "Eventual mode" +msgstr "" + +msgid "Default mode. Uses SWIM protocol to detect failures." +msgstr "" + +msgid "Stateful mode" +msgstr "" + +msgid "" +"Uses external storage for coordination. To work, you need to enable a " +"``failover-coordinator`` role on several instances." +msgstr "" + +msgid "" +"To do this, add the role in *values.yml* to the description of the " +"replicasets:" +msgstr "" + +msgid "" +"Ability to update the roles is available in the Tarantool operator version " +"later than 0.0.8" +msgstr "" + +msgid "And run upgrading:" +msgstr "" + +msgid "" +"After we have at least one active role ``failover-coordinator``, we can " +"enable stateful mode. It has two state providers: etcd and stateboard." +msgstr "" + +msgid "etcd" +msgstr "" + +msgid "" +"The etcd cluster is deployed independently, if you don't have it - the " +"easiest way to install etcd is `etcd-operator " +"`_ with " +"helm chart." +msgstr "" + +msgid "" +"We'll need a list of available etc cluster IP`s, the prefix for storage keys" +" and credentials (user name and password)." +msgstr "" + +msgid "" +"How to set up stateful failover can be found on the documentation `page " +"`__." +msgstr "" + +msgid "Stateboard" +msgstr "" + +msgid "" +"How to install a stateboard can be found on this documentation `page " +"`__." +msgstr "" + +msgid "Troubleshooting" +msgstr "" + +msgid "" +"When creating, updating, or scaling a cluster, errors may occur due to lack " +"of physical resources." +msgstr "" + +msgid "Let's examine possible error indications, root causes and solutions." +msgstr "" + +msgid "Insufficient CPU" +msgstr "" + +msgid "" +"After executing ``helm install / upgrade`` the pods remain in the " +"**Pending** state." +msgstr "" + +msgid "It looks like this:" +msgstr "" + +msgid "Let's take a look at the events of one of the pending pods:" +msgstr "" + +msgid "" +"It is now clear that we don't have enough CPU. You can reduce the allocated " +"CPU size in the ``values.yaml`` configuration file--the ``CPUallocation`` " +"parameter." +msgstr "" + +msgid "Insufficient disk space" +msgstr "" + +msgid "" +"After executing ``helm install/upgrade`` the pods remain in the " +"**ContainerCreating** state. Let's take a look at the events:" +msgstr "" + +msgid "" +"Such events indicate that there is not enough disk space to create storages." +" You can change the size of the allocated memory using the ``DiskSize`` " +"parameter in the *values.yaml* file for replica sets. The error can also be " +"resolved by increasing the size of the physical cluster disk." +msgstr "" + +msgid "CrashLoopBackOff status" +msgstr "" + +msgid "" +"Pods do not start and have the status ``CrashLoopBackOff``. In short, this " +"means that the container starts and crashes soon after due to an error in " +"the code." +msgstr "" + +msgid "" +"Doing a kubectl describe pod will give us more information on that pod:" +msgstr "" + +msgid "" +"We see that the container cannot start. Rather, the container starts, but " +"after starting it stops due to an internal error. To understand what is " +"happening to him, let's see it's logs:" +msgstr "" + +msgid "" +"We see that the application crashes with an error: ``unhandled error``. This" +" is an example of an error. In reality, there can be any other error that " +"leads to the crash of the Tarantool instance. Fix the bug in the application" +" and update the application to the new version." +msgstr "" + +msgid "Recreating replicas" +msgstr "" + +msgid "" +"You may need to recreate the replicas: delete existing replicas, create new " +"ones and join them back to the replicaset. Recreating replicas may be " +"necessary when, for example, replication breaks down." +msgstr "" + +msgid "Let's see how to do this. For example, you have a ``storage`` role:" +msgstr "" + +msgid "" +"Based on this description, after installation you will have the following " +"pods:" +msgstr "" + +msgid "" +"Let's try to reduce the number of replicas in the storage replicaset. To do " +"so, change the ``ReplicaCount`` number for the ``storage`` role from ``3`` " +"to ``2`` and run ``upgrade``:" +msgstr "" + +msgid "" +"You will see that ``storage-0-2`` and ``storage-1-2`` become \"Terminating\"" +" and then disappear from the pods list:" +msgstr "" + +msgid "Let's check what the cluster looks like on the web UI:" +msgstr "" + +msgid "" +"Replicas storage-0-2 and storage-1-2 have a note \"Server status is 'dead'\"" +" next to them." +msgstr "" + +msgid "" +"Here we have turned off every third replica of the ``storage`` role. Note " +"that we did not expel these replicas from the cluster. If we want to return " +"them and not lose data, return the required number of replicas of the " +"storage role and run ``upgrade`` again." +msgstr "" + +msgid "" +"However, if you need to delete some replicas' data, you can delete the " +"corresponding :abbr:`PVC (persistent volume claim)` before upgrading." +msgstr "" + +msgid "" +"It can be seen that the PVC pods that we deleted still exist. Let's remove " +"data of the ``storage-1-2``:" +msgstr "" + +msgid "" +"Now you need to return the value ``3`` in the ``ReplicaCount`` field of the " +"storage role and run ``upgrade``:" +msgstr "" + +msgid "" +"After a while, new pods will be up and configured. The pod whose data was " +"deleted may get stuck in the ``unconfigured`` state. If this happens, try to" +" restart it:" +msgstr "" + +msgid "" +"Why does it work? The Tarantool operator does not expel nodes from the " +"cluster, but only \"shuts them down\". Therefore, it is impossible to reduce" +" the number of replicas in this way. But you can recreate it, since the UID " +"of each instance is generated based on its name, for example " +"``storage-1-2``. This ensures that the new instance with the given name " +"replaces the old one." +msgstr "" + +msgid "" +"This method is recommended only when there is no other way. It has its own " +"limitations:" +msgstr "" + +msgid "" +"Restarting nodes is possible only in descending order of the number in the " +"replicaset. If you have a replicaset with ``node-0-0``, ``node-0-1``, " +"``node-0-2``, and ``node-0-3``, and you want to recreate only ``node-0-1``, " +"then the nodes ``node-0-1``, ``node-0-2``, and ``node-0-3`` will also " +"restart with it." +msgstr "" + +msgid "" +"All nodes that belong to the selected role will be restarted. It isn't " +"possible to select a specific replicaset and only restart its instances." +msgstr "" + +msgid "" +"If the replicaset leader number is more than the number of restarted " +"replica, restarting can stop the leader. It will make the replicaset unable " +"to receive new write requests. Please be very careful with reconnecting " +"replicas." +msgstr "" + +msgid "Customization" +msgstr "" + +msgid "" +"For most cases, the ``tarantool/cartridge`` helm chart is enough for you. " +"However, if customization is required, you can continue to use the chart by " +"making your own changes. You can also ``deployment.yaml`` and ``kubectl`` " +"instead of ``helm``." +msgstr "" + +msgid "Sidecar containers" +msgstr "" + +msgid "" +"What are they? With Kubernetes, it is possible to create several containers " +"inside one pod that share common resources such as disk storage and network " +"interfaces. Such containers are called sidecar." +msgstr "" + +msgid "" +"Learn more about this architectural pattern `here " +"`__." +msgstr "" + +msgid "" +"For implementation on Kubernetes, it is necessary to expand the container " +"park in the description of the required resource. Let's try to add another " +"service container with ``nginx`` to each pod containing a container with a " +"Tarantool instance based on `this `_ " +"article." +msgstr "" + +msgid "" +"To do this, you will need to change the ``tarantool/cartridge`` chart. You " +"can find it `here `__. Add a new container with " +"``nginx`` to the ``ReplicasetTemplate`` which can be found in the " +"``templates/deployment.yaml`` file." +msgstr "" + +msgid "" +"It is important to describe additional containers strictly after the pim-" +"storage container. Otherwise, problems may occur when updating the version " +"of the application." +msgstr "" + +msgid "" +"By default, the Tarantool Kubernetes operator chooses the first one in the " +"list as the application container." +msgstr "" + +msgid "" +"Now, let's start the installation specifying the path to the directory with " +"the customized chart:" +msgstr "" + +msgid "If everything goes well, it will be visible in the pod list:" +msgstr "" + +msgid "``READY 2/2`` means that 2 containers are ready inside the pod." +msgstr "" + +msgid "Installation in an internal network" +msgstr "" + +msgid "Delivery of tools" +msgstr "" + +msgid "" +"We need to bring the ``tarantool-cartridge`` and ``tarantool-operator`` " +"charts and the image of your application inside the internal network." +msgstr "" + +msgid "You can download the charts from the following links:" +msgstr "" + +msgid "" +"`tarantool-operator v0.0.8 `_" +msgstr "" + +msgid "" +"`cartridge v0.0.8 `_." +msgstr "" + +msgid "" +"Next, you need to pack a Docker image with the ``tarantool-operator``. " +"First, let's pull the required version from the Docker Hub:" +msgstr "" + +msgid "And pack it into the archive:" +msgstr "" + +msgid "" +"After delivering the archive with the container to the target location, you " +"need to load the image to your Docker:" +msgstr "" + +msgid "" +"All that remains is to push the image to the internal Docker registry. We " +"will use an example Docker registry hosted on ``localhost:5000``:" +msgstr "" + +msgid "" +"You can deliver the image with the application using the method described " +"above." +msgstr "" + +msgid "Installing the Tarantool Kubernetes operator" +msgstr "" + +msgid "" +"Let's describe the custom operator values in the ``operator_values.yaml`` " +"file:" +msgstr "" + +msgid "" +"And install the operator specifying the path to the archive with chart:" +msgstr "" + +msgid "Check the installation:" +msgstr "" + +msgid "Installing the Tarantool Cartridge app" +msgstr "" + +msgid "" +"We have pushed the app image to the local Docker registry beforehand. What " +"remains is to customize the ``values.yaml`` file by specifying the available" +" repository:" +msgstr "" + +msgid "" +"The complete configuration of the ``values.yaml`` can be found in the " +"instructions for installing the Tarantool Cartridge application described in" +" the guide earlier." +msgstr "" + +msgid "It remains to unpack the Cartridge chart:" +msgstr "" + +msgid "And run the installation by specifying the path to the chart:" +msgstr "" + +msgid "" +"Let's take a look at the pods to make sure the installation is successful:" +msgstr "" + +msgid "image2" +msgstr "" + +msgid "image3" +msgstr "" diff --git a/doc/requirements.txt b/doc/requirements.txt new file mode 100644 index 00000000..2fe89f07 --- /dev/null +++ b/doc/requirements.txt @@ -0,0 +1,3 @@ +Sphinx==4.0.2 +sphinx-intl==2.0.1 +polib==1.1.1