diff --git a/src/reference/.gitignore b/src/reference/.gitignore index fcdcaced..7b5b5c78 100644 --- a/src/reference/.gitignore +++ b/src/reference/.gitignore @@ -1,3 +1,3 @@ -modules/ROOT/nav.adoc -modules/ROOT/pages/_partials/dyn/ -modules/ROOT/assets/images/graphviz/ +modules/*/nav.adoc +modules/*/pages/_partials/dyn/ +modules/*/assets/images/graphviz/ diff --git a/src/reference/Makefile b/src/reference/Makefile index 5a204793..c0b8dab7 100644 --- a/src/reference/Makefile +++ b/src/reference/Makefile @@ -1,13 +1,16 @@ ## Rudder User Documentation Makefile -.PHONY: clean $(ADOC_DYN_FILES) +.PHONY: clean $(ADOC_DYN_FILES) modules/ROOT/nav.adoc -ADOC_SRC_FILES = $(shell find modules/ROOT/ -type f -name '*.adoc') +ADOC_SRC_FILES = $(shell find modules/ROOT/pages -type f -name '*.adoc') ADOC_DYN_FILES = generic_methods.adoc hooks.adoc rudder.adoc GRAPHVIZ_FILES = $(shell find modules/ROOT/assets/graphviz -type f -name '*.dot') GRAPHVIZ_IMAGES = $(addprefix modules/ROOT/assets/images/graphviz/, $(notdir $(GRAPHVIZ_FILES:dot=png))) +MODULES = ROOT installation + content: $(GRAPHVIZ_IMAGES) $(ADOC_DYN_FILES) modules/ROOT/nav.adoc +## nav.adoc ## Dynamic content @@ -17,7 +20,10 @@ content: $(GRAPHVIZ_IMAGES) $(ADOC_DYN_FILES) modules/ROOT/nav.adoc cp dependencies/$@ modules/ROOT/pages/_partials/dyn/$@ modules/ROOT/nav.adoc: $(ADOC_SRC_FILES) - ./tools/generate-nav.py > modules/ROOT/nav.adoc + ./tools/generate-nav.py ROOT "Introduction" > modules/ROOT/nav.adoc + ./tools/generate-nav.py installation "Installation" > modules/installation/nav.adoc + ./tools/generate-nav.py usage "Usage" > modules/usage/nav.adoc + ./tools/generate-nav.py administration "Administration" > modules/administration/nav.adoc modules/ROOT/assets/images/graphviz/%.png: modules/ROOT/assets/graphviz/%.dot mkdir -p modules/ROOT/assets/images/graphviz diff --git a/src/reference/antora.yml b/src/reference/antora.yml index 3237f2bd..8a18a93d 100644 --- a/src/reference/antora.yml +++ b/src/reference/antora.yml @@ -3,4 +3,9 @@ title: Reference manual version: "5.0" nav: - modules/ROOT/nav.adoc +- modules/installation/nav.adoc +- modules/usage/nav.adoc +- modules/administration/nav.adoc +- modules/plugins/nav.adoc +- modules/reference/nav.adoc diff --git a/src/reference/modules/ROOT/assets/images/cover.png b/src/reference/modules/ROOT/assets/images/cover.png deleted file mode 100644 index 4c33d7fe..00000000 Binary files a/src/reference/modules/ROOT/assets/images/cover.png and /dev/null differ diff --git a/src/reference/modules/ROOT/assets/images/cover.svg b/src/reference/modules/ROOT/assets/images/cover.svg deleted file mode 100644 index 8deda090..00000000 --- a/src/reference/modules/ROOT/assets/images/cover.svg +++ /dev/null @@ -1,123 +0,0 @@ - - - - - - - - - - - - image/svg+xml - - - - - - - - - Rudder User Documentation - Normation SASJonathan ClarkeNicolas CharlesFabrice Flore-Thébault - - diff --git a/src/reference/modules/ROOT/assets/images/root-server-components.png b/src/reference/modules/ROOT/assets/images/root-server-components.png deleted file mode 100644 index a6f385db..00000000 Binary files a/src/reference/modules/ROOT/assets/images/root-server-components.png and /dev/null differ diff --git a/src/reference/modules/ROOT/assets/images/rudder-configurationpolicy-rules.png b/src/reference/modules/ROOT/assets/images/rudder-configurationpolicy-rules.png deleted file mode 100644 index 7a79bc29..00000000 Binary files a/src/reference/modules/ROOT/assets/images/rudder-configurationpolicy-rules.png and /dev/null differ diff --git a/src/reference/modules/ROOT/assets/images/rudder-nodemanagement-acceptnewnodes.png b/src/reference/modules/ROOT/assets/images/rudder-nodemanagement-acceptnewnodes.png deleted file mode 100644 index 2bd9e5a1..00000000 Binary files a/src/reference/modules/ROOT/assets/images/rudder-nodemanagement-acceptnewnodes.png and /dev/null differ diff --git a/src/reference/modules/ROOT/assets/images/rudder-packages.png b/src/reference/modules/ROOT/assets/images/rudder-packages.png deleted file mode 100644 index 3aa439a7..00000000 Binary files a/src/reference/modules/ROOT/assets/images/rudder-packages.png and /dev/null differ diff --git a/src/reference/modules/ROOT/nav.list b/src/reference/modules/ROOT/nav.list new file mode 100644 index 00000000..ad72aba2 --- /dev/null +++ b/src/reference/modules/ROOT/nav.list @@ -0,0 +1,3 @@ +index.adoc +key_features.adoc +architecture_and_dependencies.adoc diff --git a/src/reference/modules/ROOT/pages/00_introduction/01_presentation.adoc b/src/reference/modules/ROOT/pages/00_introduction/01_presentation.adoc deleted file mode 100644 index f9bb0ae6..00000000 --- a/src/reference/modules/ROOT/pages/00_introduction/01_presentation.adoc +++ /dev/null @@ -1,90 +0,0 @@ -== Introduction - -[[what-is-rudder]] -=== What is Rudder? - -image::big-rudder-logo.png["Rudder logo", align="center"] - -Rudder is an easy to use, web-driven, role-based solution for IT Infrastructure -Automation and Compliance. With a focus on continuously checking configurations -and centralising real-time status data, Rudder can show a high-level summary -(_ISO 27001 rules are at 100%!_) and break down noncompliance issues to a deep -technical level (_Host prod-web-03: SSH server configuration allows root logins_). - -A few things that make Rudder stand out: - -* A *simple framework* allows you to *extend the built-in rules* to implement - specific low-level configuration patterns, however complex they may be, using - simple building blocks (_ensure package installed in version X_, _ensure file content_, - _ensure line in file_, etc.). A graphical builder lowers the technical level required to use this. -* Each policy can be independently set to be automatically *checked or enforced* - on a policy or host level. In Enforce mode, each remediation action is recorded, - showing the value of these invisible fixes. -* Rudder works on almost *every kind of device*, so you’ll be managing physical - and virtual servers in the data center, cloud instances, and embedded IoT devices - in the same way. -* Rudder is designed for *critical environments* where a *security* breach can mean - more than a blip in the sales stats. Built-in features include change requests, - audit logs, and strong authentication. -* Rudder relies on an agent that needs to be installed on all hosts to audit. - The *agent is very lightweight* (10 to 20 MB of RAM at peak) and *blazingly fast* - (it’s written in C and takes less than 10 seconds to verify 100 rules). Installation - is self-contained, via a single package, and can auto-update to limit agent - management burden. -* Rudder is a *true and professional open source* solution—the team behind Rudder - doesn’t believe in the dual-speed licensing approach that makes you reinstall - everything and promotes open source as little more than a “demo version.” - -Rudder is an established project with *several 10000s of node managed*, in companies -from small to biggest-in-their-field. Typical deployments manage 100s to 1000s of nodes. -The biggest known deployment in 2017 is about 7000 nodes. - -image::dashboard-overview.png["Rudder dashboard", align="center"] - -==== Made for production environments - -We believe that there is a growing impedence mismatch between the Short Time of -application development and deployement, and the Long Time of the infrastructure. -The latter need rationalisation, stability and conformity before catching the hyped -techno of the day, to be able to deliver reliable technical platform, continuously -working with a minimum of risks. - -Rudder was made for the Long Time, to help team deliver efficient infrastructures with -simplicity, giving them feedback where needed, keeping them alert of possible -incoming problem, continously checking conformity to their rules, and all of that -whatever the infrastructure they choose to build. - -image::introduction/build_run_devops.png["Modern IT production of services and Open Source automation tools stack", align="center"] - -To achieve these goals, Rudder goes beyond simple automation of commands or -configurations. Rudder continuously maintains your infrastructure to keep it -conform with your configurations and security rules. - -At each level (global, by configuration policy, by node, etc), you can choose to -either *Audit* the component - and no modification at all will made on it -, or to -*Enforce* the policy, automatically correcting a drift if needed. - -==== Different roles for a better accessibility - -Rudder was thought from the start for plug&play-ability: easy to install and to -upgrade, easy to start with and growth with. - -Rudder comes with a graphical interface, a standard library of configuration -policy ready to use, and a graphical rule editor. - -image::introduction/web_api_cli.png["Use what best feets your need: Web interface, API, or console", align="center"] - -Developers can script Rudder through its APIs and security teams can check -conformity level to their policies or inventory (both software and hardware) of a -server at any time. - - -==== Universality - -Rudder agent is extremely fast, light, and versatile. It works on a wide variety -of OS or hardware, from physical server to cloud instance, user laptops or even -Digital Cities and IoT objects. - -image::introduction/agent_output.png["Versatile agent", align="center"] - - diff --git a/src/reference/modules/ROOT/pages/10_installation/00_introduction.adoc b/src/reference/modules/ROOT/pages/10_installation/00_introduction.adoc deleted file mode 100644 index fee0e7fb..00000000 --- a/src/reference/modules/ROOT/pages/10_installation/00_introduction.adoc +++ /dev/null @@ -1,2 +0,0 @@ -== Installation - diff --git a/src/reference/modules/ROOT/pages/10_installation/05_requirements/00_intro.adoc b/src/reference/modules/ROOT/pages/10_installation/05_requirements/00_intro.adoc deleted file mode 100644 index 53aa9f4f..00000000 --- a/src/reference/modules/ROOT/pages/10_installation/05_requirements/00_intro.adoc +++ /dev/null @@ -1,4 +0,0 @@ - -[[rudder-installation-requirements]] -=== Requirements - diff --git a/src/reference/modules/ROOT/pages/10_installation/05_requirements/05_requirements.adoc b/src/reference/modules/ROOT/pages/10_installation/05_requirements/05_requirements.adoc deleted file mode 100644 index 89cb9fbb..00000000 --- a/src/reference/modules/ROOT/pages/10_installation/05_requirements/05_requirements.adoc +++ /dev/null @@ -1,28 +0,0 @@ - -[[configure-the-network]] -==== Networking - -.Network Flows -[options="header"] -|======================= -|To|From|Port|Usage -|Root Server|User or API client| *tcp/443* (https) | Access Web interface/API -.6+|Policy Server |Any Node|*udp/514* (or tcp/514) |Send reports -.3+|Linux or AIX Node | *tcp/443* (https/WebDAV) | Send inventories -| *tcp/5309* |Fetch policies -| _tcp/5310 (optional)_ |Debug policy copy -|AIX Node | *tcp/80* (http/WebDAV) | Send inventories -|Windows DSC Node | *tcp/443* (https/WebDAV) | Send inventories and fetch policies -|Linux or AIX Node | Policy Server | _tcp/5309 (optional)_ | Trigger remote agent run -|======================= - -Note: The Policy Server is the server configured to manage the node, and can be -either a Root Server or a Relay Server. - -===== DNS - Name resolution - -If you want to be able to trigger agent runs from the Root Server (without -having to wait for regular automated run), -you will need your Root Server (or Relay Server) to be able to resolve your nodes -using the provided hostname. - diff --git a/src/reference/modules/ROOT/pages/10_installation/05_requirements/10_jvm_requirements.adoc b/src/reference/modules/ROOT/pages/10_installation/05_requirements/10_jvm_requirements.adoc deleted file mode 100644 index e8cfcd83..00000000 --- a/src/reference/modules/ROOT/pages/10_installation/05_requirements/10_jvm_requirements.adoc +++ /dev/null @@ -1,25 +0,0 @@ - -[[jvm-requirements]] -==== JVM Security Policy - -Rudder needs `unlimited strength` security policy because it uses a variety of advanced -hashing and cryptographic algorithms only available in that mode. - -Any recent JVM (JDK 8 > 8u161, all JDK 9 and more recent) is configured by default with this policy. - -You can check your case by running the following command on your server: - ----- - -jrunscript -e 'exit (javax.crypto.Cipher.getMaxAllowedKeyLength("RC5") >= 256 ? 0 : 1);'; echo $? - ----- - -If it returns 0, you have the correct policy. In other cases, you will need to change it. - -For that, you can download the -http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html[`unlimited strength` policy for JDK 8 here]. - - -Then, simply copy the `java.policy` file into `$JAVA_HOME/jre/lib/security/java.policy`. - diff --git a/src/reference/modules/ROOT/pages/10_installation/05_requirements/21_supported_architecture.adoc b/src/reference/modules/ROOT/pages/10_installation/05_requirements/21_supported_architecture.adoc deleted file mode 100644 index 8ed0c48b..00000000 --- a/src/reference/modules/ROOT/pages/10_installation/05_requirements/21_supported_architecture.adoc +++ /dev/null @@ -1,65 +0,0 @@ -[[node-supported-os, the list of supported Operating Systems for Nodes]] - -==== Fully supported Operating Systems - -Fully supported Operating Systems are systems that are frequently built and tested on our servers. -Partially suported Operating Systems are systems that have been built and tested at least once but that have not seen continuous flow of fixes. - -===== For Rudder Nodes - -The following operating systems are supported for Rudder Nodes and packages are -available for these platforms: - -GNU/Linux: - -* Debian 5 to 9 - -* RedHat Enterprise Linux (RHEL) / RHEL-like 3 and 5 to 7 - -* SuSE Linux Enterprise Server (SLES) 10 SP3, 11 and 12 - -* Ubuntu 10.04 LTS (Lucid), 12.04 LTS (Precise), 14.04 LTS (Trusty), 16.04 LTS (Xenial), 18.04 LTS (Bionic) - -Other Unix systems: - -* IBM AIX 5.3, 6.1 and 7.1 - -Windows: - -* xref:10_installation/05_requirements/21_supported_architecture.adoc#install-on-windows[Microsoft Windows] Server 2008 R2, 2012, 2012 R2, 2016 - -[TIP] - -[[install-on-windows, Install on Microsoft Windows]] - -.Windows and AIX Nodes - -==== - -* On Windows, installing Rudder requires the DSC (Desired State Configuration) plugin and Powershell 4.0 or more -* For IBM AIX, pre-built RPM packages are distributed by Normation only - -Hence, as a starting point, we suggest that you only use Linux machines. Once -you are accustomed to Rudder, contact Normation to obtain a demo version for -these platforms. - -==== - - -[[server-supported-os, the list of supported Operating Systems for Root server]] - -===== For Rudder Root Server - -The following operating systems are supported as a Root server: - -GNU/Linux: - -* Debian 8 and 9 - -* RedHat Enterprise Linux (RHEL) / RHEL-like 6 and 7 - -* SuSE Linux Enterprise Server (SLES) 11 SP1 and SP3, 12 SP1, 12 SP2 - -* Ubuntu 14.04 LTS (Trusty), 16.04 LTS (Xenial) - - diff --git a/src/reference/modules/ROOT/pages/10_installation/05_requirements/22_unsupported_architecture.adoc b/src/reference/modules/ROOT/pages/10_installation/05_requirements/22_unsupported_architecture.adoc deleted file mode 100644 index 7b659e90..00000000 --- a/src/reference/modules/ROOT/pages/10_installation/05_requirements/22_unsupported_architecture.adoc +++ /dev/null @@ -1,48 +0,0 @@ -==== Partially supported Operating Systems - -Fully supported Operating Systems are systems that are frequently built and tested on our servers. -Partially suported Operating Systems are systems that have been built and tested at least once but that have not seen continuous flow of fixes. - -[WARNING] - -.Partially supported Operating Systems - -==== - -It is possible to use Rudder on other platforms than the fully supported ones. -However, we haven't tested the application on them, and can't currently supply -any packages for them. Moreover, some Techniques may not work properly. If you -wish to get Rudder support on those systems, please get in touch with us! - -A reference about how to manually build a Rudder agent is available on Rudder's -documentation here: xref:90_reference/40_build_agent.adoc#_building_the_rudder_agent[Building the Rudder Agent] - -==== - -===== For Rudder Nodes - -The following operating systems have had an agent built using xref:90_reference/40_build_agent.adoc#_building_the_rudder_agent[Building the Rudder Agent]: - -* FreeBSD - -* Slackware - -* Solaris 10 and 11 - -* Raspbian, based on jessie (via dpkg) - -* Debian 8 on ARM (armhf version) (via dpkg) - -* OpenSUSE (via rpm) - - -You can also follow the documentation instructions to build and install Rudder Agent locally on your favorite linux distribution. -Even if this distribution has not been tested by us, it has a reasonable chance of success. - - -===== For Rudder Root Server - -We advise against using an unsupported OS for Rudder server because the server contains -much more code than the agent. This code is tailored against specific OS versions -to work around many system limitations and specificities. - diff --git a/src/reference/modules/ROOT/pages/10_installation/05_requirements/23_cloud.adoc b/src/reference/modules/ROOT/pages/10_installation/05_requirements/23_cloud.adoc deleted file mode 100644 index 985f43a1..00000000 --- a/src/reference/modules/ROOT/pages/10_installation/05_requirements/23_cloud.adoc +++ /dev/null @@ -1,10 +0,0 @@ - -[[rudder-cloud-compatibility]] -==== Cloud compatibility - -The agent provides an abstraction that permits a high level management of the infrastructure. -This abstraction is independant of the underlying hardware. This also works for the cloud - -we can define configuration rules in Rudder that will be applied as well inside a cloud instance as in a virtual server or in a physical machine of a datacenter. - -Any cloud instance based on one of the supported operating system is automatically supported. - diff --git a/src/reference/modules/ROOT/pages/10_installation/05_requirements/30_hardware_specifications.adoc b/src/reference/modules/ROOT/pages/10_installation/05_requirements/30_hardware_specifications.adoc deleted file mode 100644 index 257fce6e..00000000 --- a/src/reference/modules/ROOT/pages/10_installation/05_requirements/30_hardware_specifications.adoc +++ /dev/null @@ -1,95 +0,0 @@ - -[[node-hardware-requirements]] -==== Hardware specifications for Rudder Agent - -Rudder agent has a very small footprint, and only consumes: - -* 10 to 20 MB of RAM during an agent run -* a few kB on the network to check or update its policies -* a few kB on the network to report -* around 100 MB of disk space for the installed files and the workspace - -These figures will vary depending on your configuration (backup retention, -number of configured components to check, etc...). - - -[[server-hardware-requirements]] -==== Hardware specifications and sizing for Rudder Root Server - -A dedicated server is strongly recommended, either physical or virtual with at least one dedicated core. -Rudder Server runs on both 32 (if available) and 64 bit versions of every supported Operating System. - -[NOTE] - -==== - -Rudder does not fear big infrastructures. It is currently used in production in -infrastructure with more than *7000* nodes. - -==== - -===== Memory - -The required amount of RAM mainly depends on the number of managed nodes. A general rule for the minimal value on a stand-alone server is: - -* less than 50 nodes: 2 GB -* between 50 and 1000 nodes: 4 GB -* more than 1000 nodes: 4 GB + 1 GB of RAM by 500 nodes above 1000. - -When managing more than 1000 nodes, we also recommend you to use a multiserver -installation for Rudder as described in chapter xref:43_advanced_administration/77_distributed_rudder.adoc#multiserver-rudder[Multiserver Rudder]. - -When your server has more than 2 GB of RAM, you have to configure the RAM allocated -to the Java Virtual Machine as explained in the section -xref:43_advanced_administration/20_application_tuning.adoc#_configure_ram_allocated_to_jetty[about webapplication RAM configuration]. - -When your server has more than 4 GB, you may need to also tune the PostgresSQL -server, as explained in the xref:43_advanced_administration/20_application_tuning.adoc#_optimize_postgresql_server[Optimize PostgreSQL Server] -section. - -[TIP] - -==== - -As an example, a Rudder server which manages 2600 nodes (with a lot of policies -checked) will need: - -* A server with 8 GB of RAM, -* 4 GB of RAM will be allocated to the JVM. - -In our load-tests, with such a configuration, the server is not stressed and -the user experience is good. - -==== - -===== Disk - -The PostgreSQL database will take up most disk space needed by Rudder. The storage -necessary for the database can be estimated by counting around -150 to 400 kB by Directive, by Node and by day of retention of node's -execution reports (the default is 4 days): - ----- -max_space = number of Directives * number of Nodes * retention duration in days * 400 kB ----- - -For example, a default installation with 500 nodes and an average of -50 Directives by node, should require between *14 GB and 38 GB* of disk space -for PostgreSQL. - -Follow the xref:43_advanced_administration/20_application_tuning.adoc#_reports_retention[Reports Retention] section to configure the -retention duration. - - -[WARNING] - -==== - -Be careful to correctly size your */var* partition. Compliance data are growing -fast, and PostgreSQL doesn't like at all to encounter a write error because -the disk is full. It is also adviced to set-up your monitoring to check for -available space on that partition. - -==== - - diff --git a/src/reference/modules/ROOT/pages/10_installation/10_install_server/00_install_intro.adoc b/src/reference/modules/ROOT/pages/10_installation/10_install_server/00_install_intro.adoc deleted file mode 100644 index 470c0707..00000000 --- a/src/reference/modules/ROOT/pages/10_installation/10_install_server/00_install_intro.adoc +++ /dev/null @@ -1,63 +0,0 @@ - -[[install-server]] -=== Install Rudder Server - -This chapter covers the installation of a Rudder Root Server, from the -specification of the underlying server, to the initial setup of the application. - -Before all, you need to setup a server according to -xref:10_installation/05_requirements/21_supported_architecture.adoc#server-supported-os[the server specifications]. You should also -xref:10_installation/05_requirements/05_requirements.adoc#configure-the-network[configure the network]. These topics are covered in the -Architecture chapter. - -Ideally, this machine should have Internet access, but this is not a strict requirement. - -As Rudder data can grow really fast depending on your number of managed nodes and number of rules, it is advised to separate partitions to prevent /var getting full and break your system. -Special attention should be given to: - -======= - -/var/lib/pgsql:: -(OS dependent). -Please see the xref:10_installation/05_requirements/05_requirements.adoc#_database_maintenance[database maintenance] chapter for more details about the -PostgreSQL database size estimation. - -/var/rudder:: -Contains most of your server information, the configuration-repository, LDAP database, etc... -Rudder application-related files should stay under 1GB, but the size of the configuration-repository will -depend of the amount of data you store in it, especially in the shared-files folder (files that will get -distributed to the agents using the "Download a file for the shared folder" Technique). - -/var/log/rudder:: -Report logs (/var/log/rudder/reports) size will depend on the amount of nodes you manage. -It is possible to reduce this drastically by unticking "Log all reports received to /var/log/rudder/reports/all.log" -under the Administration - Settings tab in the Rudder web interface. This will prevent Rudder from recording this logs -in a text file on disk, and will only store them in the SQL database. This saves on space, and doesn't remove any -functionality, but does however make debugging harder. - -======= - -[NOTE] - -.Files installed by the application - -===== - -+/etc+:: System-wide configuration files are stored here: init scripts, -configuration for apache, logrotate and rsyslog. - -+/opt/rudder+:: Non variable application files are stored here. - -+/opt/rudder/etc+:: Configuration files for Rudder services are stored here. - -+/var/log/rudder+:: Log files for Rudder services are stored here. - -+/var/rudder+:: Variable data for Rudder services are stored here. - -+/var/rudder/configuration-repository/techniques+:: Techniques are stored here. - -+/var/rudder/cfengine-community+:: Data for CFEngine Community is stored here. - -+/usr/share/doc/rudder*+:: Documentation about Rudder packages. - -===== diff --git a/src/reference/modules/ROOT/pages/10_installation/10_install_server/11_install_root_server_debian.adoc b/src/reference/modules/ROOT/pages/10_installation/10_install_server/11_install_root_server_debian.adoc deleted file mode 100644 index f4b0c9ed..00000000 --- a/src/reference/modules/ROOT/pages/10_installation/10_install_server/11_install_root_server_debian.adoc +++ /dev/null @@ -1,34 +0,0 @@ -==== Install Rudder Root server on Debian or Ubuntu - -===== Add the Rudder packages repository - -Rudder requires Java RE (version 8 at least) which is not packaged by default on Debian 8 nor Ubuntu 14.04. - -The Java RE 8 for Debian or Ubuntu can be found through Oracle's website: https://www.java.com - -include::{partialsdir}/apt_key.adoc[] - - -Then run the following commands as root: - ----- - -echo "deb http://www.rudder-project.org/apt-4.3/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/rudder.list -apt-get update - ----- - -This will add the package repository and finally update the local package cache. - -===== Install your Rudder Root Server - -To begin the installation, you should simply install the rudder-server-root -metapackage, which will install the required components: - ----- - -apt-get install rudder-server-root - ----- - -include::{partialsdir}/initial_config.adoc[] diff --git a/src/reference/modules/ROOT/pages/10_installation/10_install_server/12_install_root_server_sles.adoc b/src/reference/modules/ROOT/pages/10_installation/10_install_server/12_install_root_server_sles.adoc deleted file mode 100644 index aa05ba80..00000000 --- a/src/reference/modules/ROOT/pages/10_installation/10_install_server/12_install_root_server_sles.adoc +++ /dev/null @@ -1,73 +0,0 @@ -[[install-server-sles, Install Rudder Root server on SLES]] -==== Install Rudder Root server on SLES - -===== Configure the package manager - -Rudder requires Java RE (version 7 at least) that is not always packaged by SuSE on all versions - -* PostgreSQL 9 -* Java RE (version 8 at least). - -It is also recommended to use PostgreSQL >= 9.2 for optimal performances. - -PostgreSQL 9.4 can be installed through the OpenSuSE build service: https://build.opensuse.org/project/show/server:database:postgresql -or through the system repositories, on SLES 11 SP4 and later systems. - -The Java RE 8 for SLES11 can be found through Oracle's website: https://www.java.com - -Also, Rudder server requires the +git+ software, that can be found on SLES SDK DVD under the name +git-core+. - -[WARNING] - -==== - -SLES 11 pre SP4 will try to install PostgreSQL 8.x by default, which is not recommended for Rudder and will cause serious performance degradation, and requires much more disk space in the long run. - -It is really recommended to either add the OpenSuSE build service repository, or install postgresql9x-server (if available) beforehand to prevent the system from choosing the default PostgreSQL version. - -==== - -include::{partialsdir}/zypper_segfault.adoc[] - - -[WARNING] - -==== - -Zypper seems to be quite tolerant to missing dependencies and will let you install rudder-server-root even if you are missing -something like +git-core+ for example, if nothing provides it or you did not install it beforehand. - -Special care should be taken during initial installation not to say "Continue anyway" if Zypper does complain a dependency can -not be resolved and asks what to do. - -==== - -===== Add the Rudder packages repository - -include::{partialsdir}/rpm_key.adoc[] - - -Then run the following commands as root: - ----- - -zypper ar -n "Rudder SLES repository" http://www.rudder-project.org/rpm-4.3/SLES_11/ Rudder -zypper refresh - ----- - -This will add the Rudder package repository, then update the local package -cache. - -===== Install your Rudder Root Server - -To begin the installation, you should simply install the rudder-server-root -metapackage, which will install the required components: - ----- - -zypper in rudder-server-root - ----- - -include::{partialsdir}/initial_config.adoc[] diff --git a/src/reference/modules/ROOT/pages/10_installation/10_install_server/13_install_root_server_centos_rhel.adoc b/src/reference/modules/ROOT/pages/10_installation/10_install_server/13_install_root_server_centos_rhel.adoc deleted file mode 100644 index 83a51bed..00000000 --- a/src/reference/modules/ROOT/pages/10_installation/10_install_server/13_install_root_server_centos_rhel.adoc +++ /dev/null @@ -1,86 +0,0 @@ -==== Install Rudder Root server on RHEL-like systems - -===== Add the Rudder packages repository - -include::{partialsdir}/rpm_key.adoc[] - - -Then run the following command as root: - ----- - -echo '[Rudder_4.3] -name=Rudder 4.3 EL repository -baseurl=http://www.rudder-project.org/rpm-4.3/RHEL_$releasever/ -gpgcheck=1 -gpgkey=https://www.rudder-project.org/rpm-repos/rudder_rpm_key.pub' > /etc/yum.repos.d/rudder.repo - ----- - -===== Install your Rudder Root Server - -To begin the installation, you should simply install the rudder-server-root -metapackage, which will install the required components: - ----- - -yum install rudder-server-root - ----- - -On Red Hat-like systems, a firewall setup is enabled by default, and would need to be adjusted -for Rudder to operate properly. You have to allow all the flows described in the -xref:10_installation/05_requirements/05_requirements.adoc#configure-the-network[network] section. - - -[TIP] - -==== - -On EL6, the /etc/sysconfig/iptables file configures the firewall: - ----- - -*filter -:INPUT ACCEPT [0:0] -:FORWARD ACCEPT [0:0] -:OUTPUT ACCEPT [0:0] --A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT --A INPUT -p icmp -j ACCEPT --A INPUT -i lo -j ACCEPT -# Allow SSH access (Maintenance) --A INPUT -m state --state NEW -m tcp -p tcp --dport 22 -j ACCEPT -# Allow HTTPS access (Rudder) --A INPUT -m state --state NEW -m tcp -p tcp --dport 443 -j ACCEPT --A INPUT -j REJECT --reject-with icmp-host-prohibited --A FORWARD -j REJECT --reject-with icmp-host-prohibited -COMMIT - ----- - -The important line to have access to the Web interface being: - ----- - -# Allow HTTPS access (Rudder) --A INPUT -m state --state NEW -m tcp -p tcp --dport 443 -j ACCEPT - ----- - -==== - -[TIP] - -==== - -On EL7, the default firewall is firewalld, and you can enable HTTP/S access by running - ----- - -firewall-cmd --permanent --zone=public --add-port=443/tcp - ----- - -==== - -include::{partialsdir}/initial_config.adoc[] diff --git a/src/reference/modules/ROOT/pages/10_installation/11_install_agent/00_install_agent.adoc b/src/reference/modules/ROOT/pages/10_installation/11_install_agent/00_install_agent.adoc deleted file mode 100644 index 71847055..00000000 --- a/src/reference/modules/ROOT/pages/10_installation/11_install_agent/00_install_agent.adoc +++ /dev/null @@ -1,43 +0,0 @@ - -[[install-agent]] -=== Install Rudder Agent - -This chapter gives a general presentation of the Rudder Agent, and describes -the different configuration steps to deploy the Rudder agent on the Nodes you -wish to manage. Each Operating System has its own set of installation procedures. - -The machines managed by Rudder are called Nodes, and can either be physical or virtual. -For a machine to become a managed Node, you have to install the Rudder Agent on it. -The Node will afterwards register itself on the server. And finally, the Node should -be acknowledged in the Rudder Server interface to become a managed Node. For a more detailed -description of the workflow, please refer to the xref:21_node_management/20_node_management.adoc#_node_management_2[Node Management] -documentation. - -[NOTE] - -.Components - -===== - -This agent contains the following tools: - -. The community version of http://www.cfengine.com[CFEngine], a powerful open -source configuration management tool. - -. http://fusioninventory.org/[FusionInventory], an inventory software. - -. An initial configuration set for the agent, to bootstrap the Rudder Root Server -access. - -These components are recognized for their reliability and minimal impact on -performances. Our tests showed their memory consumption is usually under 10 MB -of RAM during their execution. So you can safely install them on your servers. - -We grouped all these tools in one package, to ease the Rudder Agent -installation. - -To get the list of supported Operating systems, please refer to -xref:10_installation/05_requirements/21_supported_architecture.adoc#node-supported-os[the list of supported Operating Systems for the Nodes]. - -===== - diff --git a/src/reference/modules/ROOT/pages/10_installation/11_install_agent/10_install_agent_debian.adoc b/src/reference/modules/ROOT/pages/10_installation/11_install_agent/10_install_agent_debian.adoc deleted file mode 100644 index 32984bc3..00000000 --- a/src/reference/modules/ROOT/pages/10_installation/11_install_agent/10_install_agent_debian.adoc +++ /dev/null @@ -1,32 +0,0 @@ -==== Install Rudder Agent on Debian or Ubuntu - -include::{partialsdir}/syslog.adoc[] - -include::{partialsdir}/apt_key.adoc[] - -Then add Rudder's package repository: - ----- - -echo "deb http://www.rudder-project.org/apt-4.3/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/rudder.list - ----- - -Update your local package database to retrieve the list of packages available on our repository: - ----- - -sudo apt-get update - ----- - -Install the +rudder-agent+ package: - ----- - -sudo apt-get install rudder-agent - ----- - -You can now xref:10_installation/11_install_agent/80_agent_configuration.adoc#_configure_and_validate[configure the agent]. - diff --git a/src/reference/modules/ROOT/pages/10_installation/11_install_agent/20_install_agent_centos_rhel.adoc b/src/reference/modules/ROOT/pages/10_installation/11_install_agent/20_install_agent_centos_rhel.adoc deleted file mode 100644 index 63d1b2c1..00000000 --- a/src/reference/modules/ROOT/pages/10_installation/11_install_agent/20_install_agent_centos_rhel.adoc +++ /dev/null @@ -1,46 +0,0 @@ -==== Install Rudder Agent on RHEL-like systems - -include::{partialsdir}/syslog.adoc[] - -include::{partialsdir}/rpm_key.adoc[] - -Then define a yum repository for Rudder: - ----- - -echo '[Rudder_4.3] -name=Rudder 4.3 EL repository -baseurl=http://www.rudder-project.org/rpm-4.3/RHEL_$releasever/ -gpgcheck=1 -gpgkey=https://www.rudder-project.org/rpm-repos/rudder_rpm_key.pub' > /etc/yum.repos.d/rudder.repo - ----- - -[TIP] - -==== - -The RPM can be directly downloaded for a standalone installation, -from the following URL: http://www.rudder-project.org/rpm-4.1/RHEL_7/ -(or RHEL_6, RHEL_5, etc, depending on your host's OS version) - -==== - -Install the package: - ----- - -yum install rudder-agent - ----- - -Or: - ----- - -yum install rudder-agent-4.2.0-1.EL.7.x86_64.rpm - ----- - -You can now xref:10_installation/11_install_agent/80_agent_configuration.adoc#_configure_and_validate[configure the agent]. - diff --git a/src/reference/modules/ROOT/pages/10_installation/11_install_agent/30_install_agent_suse.adoc b/src/reference/modules/ROOT/pages/10_installation/11_install_agent/30_install_agent_suse.adoc deleted file mode 100644 index 128bd1ea..00000000 --- a/src/reference/modules/ROOT/pages/10_installation/11_install_agent/30_install_agent_suse.adoc +++ /dev/null @@ -1,63 +0,0 @@ -==== Install Rudder Agent on SLES - -include::{partialsdir}/syslog.adoc[] - -Following commands are executed as the +root+ user. - -include::{partialsdir}/zypper_segfault.adoc[] - -include::{partialsdir}/rpm_key.adoc[] - -Then add the Rudder packages repository: - -* on SLES 12: - ----- - -zypper ar -n 'Rudder SLES 12 repository' http://www.rudder-project.org/rpm-4.3/SLES_12/ Rudder - ----- - -* on SLES 11: - ----- - -zypper ar -n 'Rudder SLES repository' http://www.rudder-project.org/rpm-4.3/SLES_11_SP1/ Rudder - ----- - -* on SLES 10: - ----- - -zypper sa 'http://www.rudder-project.org/rpm-4.3/SLES_10_SP3/' Rudder - ----- - -Update your local package database to retrieve the list of packages available on our repository: - ----- - -zypper ref - ----- - -Install the +rudder-agent+ package: - ----- - -zypper install rudder-agent - ----- - -[TIP] - -==== - -The use the the +rug+ package manager on SLES 10 is strongly discouraged, due to poor performance -and possible stability issues. - -==== - -You can now xref:10_installation/11_install_agent/80_agent_configuration.adoc#_configure_and_validate[configure the agent]. - diff --git a/src/reference/modules/ROOT/pages/10_installation/11_install_agent/80_agent_configuration.adoc b/src/reference/modules/ROOT/pages/10_installation/11_install_agent/80_agent_configuration.adoc deleted file mode 100644 index dc00a57c..00000000 --- a/src/reference/modules/ROOT/pages/10_installation/11_install_agent/80_agent_configuration.adoc +++ /dev/null @@ -1,31 +0,0 @@ -[[_configure_and_validate]] -==== Configure and validate - -===== Configure Rudder Agent - -Configure the IP address or hostname of the Rudder Root Server in the following file - ----- - -echo '' > /var/rudder/cfengine-community/policy_server.dat - ----- - -[TIP] - -===== - -We advise you to use the +IP address+ of the Rudder Root Server. The DNS name of -this server can also be accepted if you have a trusted DNS infrastructure -with proper reverse resolutions. - -===== - -You can now start the Rudder service with: - ----- - -service rudder-agent start - ----- - diff --git a/src/reference/modules/ROOT/pages/10_installation/11_install_agent/90_validation.adoc b/src/reference/modules/ROOT/pages/10_installation/11_install_agent/90_validation.adoc deleted file mode 100644 index 220a6e4d..00000000 --- a/src/reference/modules/ROOT/pages/10_installation/11_install_agent/90_validation.adoc +++ /dev/null @@ -1,22 +0,0 @@ -===== Validate new Node - -Several minutes after the start of the agent, a new Node should be pending in -the Rudder web interface. You will be able to browse its inventory, and accept it to manage its -configuration with Rudder. - -You may force the agent to run and send an inventory by issuing the following command: - ----- - -rudder agent inventory - ----- - -You may force the agent execution by issuing the following command: - ----- - -rudder agent run - ----- - diff --git a/src/reference/modules/ROOT/pages/12_upgrade/00_upgrade.adoc b/src/reference/modules/ROOT/pages/12_upgrade/00_upgrade.adoc deleted file mode 100644 index 81a28c7e..00000000 --- a/src/reference/modules/ROOT/pages/12_upgrade/00_upgrade.adoc +++ /dev/null @@ -1,14 +0,0 @@ -== Upgrade - -This short chapter covers the upgrade of the Rudder Server Root and Rudder Agent -from older versions to the latest version. - -The upgrade is quite similar to the installation. - -A big effort has been made to ensure that all upgrade steps are performed -automatically by packaging scripts. Therefore, you shouldn't have to do any -upgrade procedures manually, but you will note that several data migrations -occur during the upgrade process. - - - diff --git a/src/reference/modules/ROOT/pages/12_upgrade/05_caution.adoc b/src/reference/modules/ROOT/pages/12_upgrade/05_caution.adoc deleted file mode 100644 index bb726232..00000000 --- a/src/reference/modules/ROOT/pages/12_upgrade/05_caution.adoc +++ /dev/null @@ -1,51 +0,0 @@ -=== Upgrade notes - -[[_upgrade_from_rudder_4_0_or_older]] -==== Upgrade from Rudder 4.0 or older - -Direct upgrades from 4.0.x and older are no longer supported on 4.3. -If you are still running one of those, either on servers or nodes, -please first upgrade to one of the supported versions, and then upgrade to 4.3. - -==== Upgrade from Rudder 4.1 or 4.2 - -Migration from 4.1 or 4.2 are supported, so you can upgrade directly to 4.3. - -==== Compatibility between Rudder agent 4.3 and older server versions - -===== 4.1.x and 4.2.x servers - -Rudder agents 4.3.x are compatible with 4.1 and 4.2 Rudder servers. - -===== Older servers - -Rudder agents 4.3.x are not compatible with Rudder servers older than 4.1. -You need to upgrade your server to a compatible version before the agents. - -==== Compatibility between Rudder server 4.3 and older agent versions - -===== 4.1.x and 4.2.x agents - -Rudder agent 4.1.x and 4.2.x are fully compatible with Rudder server 4.3.x. It is -therefore not strictly necessary to update your agents to 4.3.x. - -===== Older agents - -These agents are not compatible with Rudder 4.3, and you have to upgrade them. -Be careful to follow the upgrade path explained xref:12_upgrade/05_caution.adoc#_upgrade_from_rudder_4_0_or_older[above]. - -==== Protocol for reporting - -Rudder uses syslog messages over UDP by default for reporting (since 3.1), but if you upgraded -your server from a previous version, you will keep the previous setting which uses -syslog messages over TCP. - -You should consider switching to UDP (in *Administration* -> *Settings* -> *Protocol*), -as it will prevent breaking your server in -case of networking or load issues, or if you want to manage a lot of nodes. -The only drawback is that you can lose reports in these situations. It does not -affects the reliability of policy enforcement, but may only temporarily affects -reporting on the server. -Read xref:43_advanced_administration/20_application_tuning.adoc#_rsyslog[perfomance notes] about rsyslog for detailed information. - - diff --git a/src/reference/modules/ROOT/pages/12_upgrade/10_upgrade_debian.adoc b/src/reference/modules/ROOT/pages/12_upgrade/10_upgrade_debian.adoc deleted file mode 100644 index c227970c..00000000 --- a/src/reference/modules/ROOT/pages/12_upgrade/10_upgrade_debian.adoc +++ /dev/null @@ -1,69 +0,0 @@ -=== On Debian or Ubuntu - -Following commands are executed as the +root+ user. - -Add the Rudder project repository: - ----- - -echo "deb http://www.rudder-project.org/apt-4.3/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/rudder.list - ----- - -Update your local package database to retrieve the list of packages available on our repository: - ----- - -apt-get update - ----- - -For Rudder Server, upgrade all the packages associated to +rudder-server-root+: - -* With apt-get: - ----- - -apt-get install rudder-server-root ncf ncf-api-virtualenv - ----- - -and after the upgrade of these packages, restart jetty to apply the changes on the Web application: - ----- - -service rudder-jetty restart - ----- - -For Rudder Agent, upgrade the +rudder-agent+ package: - ----- - -apt-get install rudder-agent - ----- - -[WARNING] - -==== - -Rudder includes a script for upgrading all files, databases, etc... which need -migrating. Therefore, you should not replace your old files by the new ones -when apt-get/aptitude asks about this, unless you want to reset all your parameters. - -==== - -[WARNING] - -==== - -Rudder 4.1 requires Java RE version 8 or more, which is not packaged be default on Debian 7 nor Ubuntu 14.04 -On these platforms, prior to upgrade Rudder, you will need to install Java RE 8, either from Oracle site https://www.java.com -or through any other means of your choice - -==== - - -You can now xref:12_upgrade/60_technique_upgrade.adoc#_technique_upgrade[upgrade your local techniques]. - diff --git a/src/reference/modules/ROOT/pages/12_upgrade/20_upgrade_rhel.adoc b/src/reference/modules/ROOT/pages/12_upgrade/20_upgrade_rhel.adoc deleted file mode 100644 index b9e7fbf2..00000000 --- a/src/reference/modules/ROOT/pages/12_upgrade/20_upgrade_rhel.adoc +++ /dev/null @@ -1,64 +0,0 @@ -=== On RHEL or CentOS - -Following commands are executed as the +root+ user. - -Update your yum repository: - ----- - -echo '[Rudder_4.3] -name=Rudder 4.3 Repository -baseurl=http://www.rudder-project.org/rpm-4.3/RHEL_$releasever/ -gpgcheck=1 -gpgkey=https://www.rudder-project.org/rpm-repos/rudder_rpm_key.pub' > /etc/yum.repos.d/rudder.repo - ----- - -[TIP] - -==== - -Replace RHEL_7 with your Enterprise Linux version if necessary. - -==== - -==== Rudder server - -For Rudder server, upgrade the +rudder-*+ and +ncf+-related packages: - ----- - -yum update "rudder-*" ncf ncf-api-virtualenv - ----- - -and after the upgrade of these packages, restart jetty to apply the changes on the Web application: - ----- - -service rudder-jetty restart - ----- - -From version 3.1, Rudder provides an SELinux policy. You can enable it after upgrading your server with: - ----- - -sed -i "s%^\s*SELINUX=.*%SELINUX=enabled%" /etc/sysconfig/selinux -setenforce 1 - ----- - -==== Rudder agent - -For Rudder agent, upgrade the +rudder-agent+ package: - ----- - -yum update rudder-agent - ----- - -You can now xref:12_upgrade/60_technique_upgrade.adoc#_technique_upgrade[upgrade your local techniques]. - - diff --git a/src/reference/modules/ROOT/pages/12_upgrade/30_upgrade_suse.adoc b/src/reference/modules/ROOT/pages/12_upgrade/30_upgrade_suse.adoc deleted file mode 100644 index 080d05d2..00000000 --- a/src/reference/modules/ROOT/pages/12_upgrade/30_upgrade_suse.adoc +++ /dev/null @@ -1,80 +0,0 @@ -=== On SLES - -Following commands are executed as the +root+ user. - -Add the Rudder packages repository: - -* On a SLES 11 system: - ----- - -zypper ar -n "Rudder SLES repository" http://www.rudder-project.org/rpm-4.3/SLES_11_SP1/ Rudder - ----- - -* On a SLES 10 system: - ----- - -zypper sa "http://www.rudder-project.org/rpm-4.3/SLES_10_SP3/" Rudder - ----- - - -Update your local package database to retrieve the list of packages available on our repository: - ----- - -zypper ref - ----- - - -For Rudder Server, upgrade all the packages associated to +rudder-server-root+: - ----- - -zypper update "rudder-*" "ncf*" - ----- - -[WARNING] - -==== - -SLES 11 pre SP4 uses PostgreSQL 8.x by default, which is not recommended for Rudder and will cause serious performance degradation, and requires much more disk space in the long run. - -Rudder 4.0 is tested for PostgreSQL 9.2 and higher. It still works with version 8.4 or 9.1, but not warranties are made that this will hold in the future. It is really recommanded to migrate to PostgreSQL 9.2 at least. - -Please look at xref:10_installation/10_install_server/12_install_root_server_sles.adoc#install-server-sles[Install Rudder Root server on SLES] for details. - -==== - -[WARNING] - -==== - -Rudder 4.1 requires Java RE version 8 or more, which is not packaged be default on SLES 11 -On this platform, prior to upgrade Rudder, you will need to install Java RE 8, either from Oracle site https://www.java.com -or through any other means of your choice - -==== - -and after the upgrade of these packages, restart jetty to apply the changes on the Web application: - ----- - -service rudder-jetty restart - ----- - -For Rudder Agent, upgrade the +rudder-agent+ package: - ----- - -zypper update rudder-agent - ----- - -You can now xref:12_upgrade/60_technique_upgrade.adoc#_technique_upgrade[upgrade your local techniques]. - diff --git a/src/reference/modules/ROOT/pages/12_upgrade/60_technique_upgrade.adoc b/src/reference/modules/ROOT/pages/12_upgrade/60_technique_upgrade.adoc deleted file mode 100644 index 2d4ed0d9..00000000 --- a/src/reference/modules/ROOT/pages/12_upgrade/60_technique_upgrade.adoc +++ /dev/null @@ -1,40 +0,0 @@ -[[_technique_upgrade]] -=== Technique upgrade - -At the first installation, Rudder will automatically deploy a Technique library in the -`/var/rudder/configuration-repository/techniques` directory. - -When upgrading Rudder to another version, a new (updated) Technique library will be deployed -in `/opt/rudder/share/techniques`, and Rudder will automatically take care of updating the system -Techniques in the configuration-repository directory. - -However, the other Techniques will not be updated automatically (yet), so you will have to do -it yourself. - -[CAUTION] - -==== - -Please keep in mind that if you did manual modifications on the Techniques in existing directories, -or created new versions of them, you will have some merging work to do. - -==== - -To upgrade you local techniques, run the following commands on the Rudder Root Server: - ----- - -cd /var/rudder/configuration-repository -cp -a /opt/rudder/share/techniques/* techniques/ -git status -# Now, inspect the differences. If no conflict is noticeable, then go ahead. -git add techniques/ -git commit -m "Technique upgrade" # Here, put a meaningful message about why you are updating. -rudder server reload-techniques - ----- - -This last command will reload the Technique library and trigger a full redeployment on nodes. - -Please check that the deployment is successful in the Rudder web interface. - diff --git a/src/reference/modules/ROOT/pages/20_usage/00_usage_intro.adoc b/src/reference/modules/ROOT/pages/20_usage/00_usage_intro.adoc deleted file mode 100644 index cfeac64d..00000000 --- a/src/reference/modules/ROOT/pages/20_usage/00_usage_intro.adoc +++ /dev/null @@ -1,7 +0,0 @@ -== Web interface usage - -This chapter is a general presentation of the Rudder Web Interface. You will -find how to authenticate in the application, a description of the design of the -screen, and some explanations about usage of common user interface items like -the search fields and the reporting screens. - diff --git a/src/reference/modules/ROOT/pages/20_usage/10_web_interface.adoc b/src/reference/modules/ROOT/pages/20_usage/10_web_interface.adoc deleted file mode 100644 index a38ba2e4..00000000 --- a/src/reference/modules/ROOT/pages/20_usage/10_web_interface.adoc +++ /dev/null @@ -1,90 +0,0 @@ -=== Authentication - -When accessing the Rudder web interface, a login / password is required. The -default account is "admin" (Password: admin). - -You can change the user accounts by following the xref:30_basic_administration/80_user_management.adoc#user-management[User management] -procedure. - -=== Presentation of Rudder Web Interface - -The web interface is organised according to the concepts described earlier. It -is divided in three logical parts: Node Management, Configuration Management -and Administration. - -==== Rudder Home - -The home page summarizes the content of the other parts and provides quick links -for the most common actions. - -.Rudder Homepage - -image::rudder-home.png[Home menu] - -==== Node Management - -In the Node Management section, you will find the list of all Nodes, the validation tool for new -Nodes, a search engine for validated Nodes, and the management tool for groups -of Nodes. - -.List of Nodes - -image::nodes.png[Nodes] - -.Node compliance - -image::node-compliance.png[Node Compliance] - -.Groups - -image::groups.png[Groups] - -==== Configuration Management - -In the Configuration Management section, you can select the Techniques, -configure the Directives and manage the Rules and check their compliance. - -.Rules screen - -image::Rule_config.png[Rules list] - -.Rule compliance - -image::Rule_compliance.png[Rule compliance] - -.Directive list - -image::Directive_management.png[Directives] - -==== Utilities - -This section contains tools useful for your everyday usage of Rudder. -This is where you will find the technique editor, the event logs table -or the change requests if you have enabled that feature. - -.Event Logs - -image::event_log.png[Event logs] - -.Technique Editor - -image::technique_editor/1-rudder-technique-editor.png[Technique editor] - -.Technique details - -image::technique_editor/5-configure-generic-method.png[Technique details] - -==== Settings - -The Settings section provides you a way to modify your Rudder setup: you can setup the -available networks for the Policy Server, configure agent run and policy mode, -enable web interface options and manage installed plugins. - -.Settings screen - -image::rudder-admin-settings.png[Settings] - -.Changing global agent run - -image::Global_run_settings.png[Global run settings] - diff --git a/src/reference/modules/ROOT/pages/21_node_management/20_node_management.adoc b/src/reference/modules/ROOT/pages/21_node_management/20_node_management.adoc deleted file mode 100644 index a30eb3f4..00000000 --- a/src/reference/modules/ROOT/pages/21_node_management/20_node_management.adoc +++ /dev/null @@ -1,48 +0,0 @@ -[[_node_management_2]] -== Node management - -[[inventory, Node Inventory]] - -image::node_workflow.svg[] - -=== Node inventory - -image::nodes.png[] - -Rudder integrates a node inventory tool which harvest useful information -about the nodes. This information is used by Rudder to handle the nodes, and -you can use the inventory information for Configuration Management purposes: -search Nodes, create Groups of Nodes, determine some configuration management -variables. - -In the Rudder Web Interface, each time you see a Node name, you can click on it -and display the collection of information about this Node. The inventory is -organized as following: first tab is a 'summary' of administrative information -about the Node; other tabs are specialized for 'hardware', 'network' interfaces, -and 'software' for every Node; tabs for 'reports' and 'logs' are added on -Rudder managed Nodes. - -The 'Node Summary' presents administrative information like the Node -'Hostname', 'Operating System', 'Rudder Client name', 'Rudder ID' and 'Date' -when the inventory was 'last received'. When the Node has been validated, some -more information is displayed like the Node 'Name' and the 'Date first -accepted in Rudder'. - -The 'hardware' information is organized as following: 'General', 'File -systems', 'Bios', 'Controllers', 'Memory', 'Port', 'Processor', 'Slot', 'Sound', -'Storage', 'Video'. - -'Network' connections are detailed as following: 'Name' of the interface on the -system, 'IP address', 'Network Mask', usage of 'DHCP' or static configuration, -'MAC address', 'Type' of connection, 'Speed' of the connection and 'Status'. - -And finally, you get the list of every 'software' package present on the -system, including version and description. - -On Nodes managed by Rudder, the 'Compliance Reports' tab displays information about the -status of the latest run of Rudder Agent, whereas the 'Technical Logs' tab displays -information about changes for the Node. - -image::node-compliance.png[] - - diff --git a/src/reference/modules/ROOT/pages/21_node_management/21_accept_new_nodes.adoc b/src/reference/modules/ROOT/pages/21_node_management/21_accept_new_nodes.adoc deleted file mode 100644 index 3d777e90..00000000 --- a/src/reference/modules/ROOT/pages/21_node_management/21_accept_new_nodes.adoc +++ /dev/null @@ -1,35 +0,0 @@ - -[[accept-new-nodes, Accept new Nodes]] - -=== Accept new Nodes - -At the starting point, the Rudder Server doesn't know anything about the Nodes. -After the installation of the Rudder Agent, each Node registers itself to the -Rudder Server, and sends a first inventory. Every new Node must be manually -validated in the Rudder Web Interface to become part of Rudder Managed Nodes. -This task is performed in the *Node Management > Accept new Nodes* section of -the application. You can select Nodes waiting for an approval, and determine -whether you consider them as valid or not. Click on each Node name to display -the extended inventory. Click on the magnifying glass icon to display the -policies which will be applied after the validation. - -.Accept the new Node +debian-node.rudder-project.org+ - -==== - -. Install and configure the Rudder Agent on the new Node -+debian-node.rudder-project.org+ - -. Wait a few minutes for the first run of the Rudder Agent. - -. Navigate to *Node Management > Accept new Nodes*. - -. Select the new Node in the list. - -. Validate the Node. - -. The Node is now integrated in Rudder, you can search it using the search -tools. - -==== - diff --git a/src/reference/modules/ROOT/pages/21_node_management/23_groups_of_nodes.adoc b/src/reference/modules/ROOT/pages/21_node_management/23_groups_of_nodes.adoc deleted file mode 100644 index a9212750..00000000 --- a/src/reference/modules/ROOT/pages/21_node_management/23_groups_of_nodes.adoc +++ /dev/null @@ -1,35 +0,0 @@ - -[[groups, Groups]] - -=== Group of Nodes - -You can create Group of Nodes based on search criteria to ease attribution of -Rules in Configuration Management. The creation of groups can be done from the -'Node Management > Search Nodes' page, or directly from the Groups list in -'Node Management > Groups'. A group can be either Dynamic or Static. - -include::{partialsdir}/glossary/dynamic-group.adoc[] - -include::{partialsdir}/glossary/static-group.adoc[] - -image::groups.png[] - -.Create a dynamic group for Linux Nodes with +ssh+ having an ip address in 192.18.42.x. - -==== - -To create that dynamic group like described above, You first have to create a new group -with group type set to +Dynamic+. Then you have to set its search request to: - -. Operator: +AND+. - -. First search line: 'Node', 'Operating System', +=+, 'Linux'. - -. Second search line: 'Software', 'Name', +=+, +ssh+. - -. Third search line: 'Node summary', 'Ip address', +Regex+, '192\ .168\ .\d\ . .*' . - -Finally, you have to click on Search to populate the group and click on Save to actually save it. - -==== - diff --git a/src/reference/modules/ROOT/pages/23_configuration_management/30_configuration_management.adoc b/src/reference/modules/ROOT/pages/23_configuration_management/30_configuration_management.adoc deleted file mode 100644 index 2e2a689e..00000000 --- a/src/reference/modules/ROOT/pages/23_configuration_management/30_configuration_management.adoc +++ /dev/null @@ -1,23 +0,0 @@ -== Configuration concepts - -We adopted the following terms to describe the configurations in Rudder: - -==== - -include::{partialsdir}/glossary/technique.adoc[] - -include::{partialsdir}/glossary/directive.adoc[] - -include::{partialsdir}/glossary/rule.adoc[] - -include::{partialsdir}/glossary/applied-policy.adoc[] - -==== - -As illustrated in this summary diagram, the rules are linking the -functions of inventory management and configuration management. - -.Concepts diagram - -image::configuration_concepts.svg[] - diff --git a/src/reference/modules/ROOT/pages/23_configuration_management/31_techniques.adoc b/src/reference/modules/ROOT/pages/23_configuration_management/31_techniques.adoc deleted file mode 100644 index 7a87ae0d..00000000 --- a/src/reference/modules/ROOT/pages/23_configuration_management/31_techniques.adoc +++ /dev/null @@ -1,33 +0,0 @@ -=== Techniques - -==== Concepts - -A Technique defines a set of operations and configurations to reach the -desired behaviour. This includes the initial set-up, but also a regular check on -the parameters, and automatic repairs (when possible). - -All the Techniques are built with the possibility to change only part of a -service configuration: each parameter may be either active, either set on the -"Don't change" value, that will let the default values or in place. This allows -for a progressive deployment of the configuration management. - -Finally, the Techniques will generate a set of reports which are sent to -the Rudder Root Server, which will let you analyse the percentage of compliance -of your policies, and soon, detailed reports on their application. - -==== Manage the Techniques - -The Techniques shipped with Rudder are presented in a library that you can -reorganize in *Configuration > Techniques*. The library -is organized in two parts: the available Techniques, and the selection -made by the user. - -include::{partialsdir}/glossary/technique-library.adoc[] - -include::{partialsdir}/glossary/active-techniques.adoc[] - -==== Create new Techniques - -The standard library only provides the most common Techniques. You can create -new Technique with the xref:23_configuration_management/41_technique_editor.adoc#technique-editor[Technique Editor]. - diff --git a/src/reference/modules/ROOT/pages/23_configuration_management/33_directives.adoc b/src/reference/modules/ROOT/pages/23_configuration_management/33_directives.adoc deleted file mode 100644 index eb981458..00000000 --- a/src/reference/modules/ROOT/pages/23_configuration_management/33_directives.adoc +++ /dev/null @@ -1,34 +0,0 @@ -=== Directives - -Once you have selected and organized your Techniques, you can create your -configurations in the *Configuration Management > Directives* section. - -include::{partialsdir}/glossary/directive.adoc[] - -The screen is divided in three parts: - -- on the left, The list of Directives, grouped by Technique - -- on the right, The selected Directive form. - -Click on the name of a Technique to show its description, and how to Create a Directive base on it. - -Click on the name of a Directive to see the Directive Summary containing the -description of the Technique its derived from, and the configuration items -of the Directive. - - -image::Directive_management.png[] - -.Create a Directive for Name resolution - -==== - -Use the Technique 'Name resolution' to create a new Directive called -+Google DNS Servers+, and shortly described as 'Use Google DNS Server'. Check in -the options 'Set nameservers' and 'Set DNS search suffix'. Set the value of the -variable 'DNS resolver' to +8.8.8.8+ and of 'Domain search suffix' according to -your organization, like +rudder-project.org+. - -==== - diff --git a/src/reference/modules/ROOT/pages/23_configuration_management/34_rules.adoc b/src/reference/modules/ROOT/pages/23_configuration_management/34_rules.adoc deleted file mode 100644 index 0628f4ec..00000000 --- a/src/reference/modules/ROOT/pages/23_configuration_management/34_rules.adoc +++ /dev/null @@ -1,11 +0,0 @@ -=== Rules - -include::{partialsdir}/glossary/rule.adoc[] - -image::Rule_management.png[] - -When a Rule is created or modified, the promises for the target nodes are generated. Rudder computes all the promises each nodes must have, and makes them available for the nodes. This process can take up to several minutes, depending on the number of managed nodes and the Policy Server configuration. During this time, The status icon on the top of the page turns to grey, with moving arrows. -if you feel the generated promises should be modified (for instance, if you changed the configuration of Rudder), you can click on the status menu in the top bar and click on "Regenerate policies" - -image::Rule_config.png[] - diff --git a/src/reference/modules/ROOT/pages/23_configuration_management/35_parameters.adoc b/src/reference/modules/ROOT/pages/23_configuration_management/35_parameters.adoc deleted file mode 100644 index bd580273..00000000 --- a/src/reference/modules/ROOT/pages/23_configuration_management/35_parameters.adoc +++ /dev/null @@ -1,33 +0,0 @@ -=== Variables - -==== User defined parameters - -Rudder provides a simple way to add common and reusable variables in either plain Directives, or techniques created using the Technique editor: the parameters. - -image::rudder-parameters.png[Parameters] - -The parameters enable the user to specify a content that can be put anywhere, using the following syntax: - -* In Directives: '${rudder.param.name}' will expand the content of the "name" parameter. -* In the Technique Editor: '${rudder_parameters.name}' will do the same. - -Using this, you can specify common file headers (this is the default parameter, "rudder_file_edit_header"), common DNS or domain names, backup servers, -site-specific elements... - -==== System variables - -Rudder also provides system variables that contain information about nodes -and their policy server. You can use them like user defined parameters. - -The information about a Node: - -* '${rudder.node.id}' returns the Rudder generated id of the Node -* '${rudder.node.hostname}' returns the hostname of the Node -* '${rudder.node.admin}' returns the administrator login of the Node - -The information about a Node's policy server. - -* '${rudder.node.policyserver.id}' returns the Rudder generated id of the Policy Server -* '${rudder.node.policyserver.hostname}' returns the hostname of the Policy Server -* '${rudder.node.policyserver.admin}' returns the administrator login of the Policy Server - diff --git a/src/reference/modules/ROOT/pages/23_configuration_management/36_compliance.adoc b/src/reference/modules/ROOT/pages/23_configuration_management/36_compliance.adoc deleted file mode 100644 index 68979aca..00000000 --- a/src/reference/modules/ROOT/pages/23_configuration_management/36_compliance.adoc +++ /dev/null @@ -1,125 +0,0 @@ -[[compliance-and-drift-assessment]] -=== Compliance and Drift Assessment - - -==== Overview in Rudder - - -Rudder is built to continuously assess drift compared to defined policies, with or without auto-healing. - -By auto-healing, we mean that optionally, Rudder can continuously enforce correct configuration over time, correcting the assessed drift so that -your configuration converges towards desired states. This behavior is optionnal, and Rudder can only report drift without changing configuration. -That policy enforce or audit mode can be configured by node, rule or directive (see xref:23_configuration_management/40_policy_mode.adoc#_policy_mode_audit_enforce[policy mode documentation] for more details). - -Rudder is able to adapt to complex process and only do the minimal required work so that the server converges to the desired state, -and so whatever was the starting state point. Rudder works as a GPS would, adapting the path to your destination depending of the path -you actually took. This process is much more resilient to changes than a step by step, procedural description of the commands to execute. - -Compliance and drift from expected configurations are then reported with possibility to drill down in non-compliance issues to identify the root problem. - -Of course, one can always correct a drift error by hand by updating coniguration target and changing policy mode from "audit" to "enforce" mode. - -===== Compliance and drift reporting - -Compliance drifts (non compliances, enforcement errors, repaires) are reported in Rudder by several means: - -- Compliance are reported in aggregated format globally in the dashboard, and by rules or nodes (example for Rule below) -- they are stored in Rudder compliance database, and each Rule displays an history of changes as depicted in "Changes history on a Rule" below. -- each drifts fires an event which is logged in file /var/log/rudder/compliance/non-compliant-reports.log and can be used - to integrates with log aggregation engine like Logstash, or hooks (typically to send notification to IRC or Slack, send email, etc) - - see for example the Slack connector here: https://github.com/Normation/rudder-tools/blob/master/scripts/rudder-notification/forward-non-compliance-to-slack.sh -- compliance and drift are also available from Rudder API to provide deeper integration with your IT Infrastructure. - - - -.Compliance on a Rule - -image::Rule_compliance.png[Rule compliance] - -The Rule detailed compliance screen will also graph compliance deviations on -a recent period as well as display a deviation log history for this period. - - - -.Changes history on a Rule - -image::rudder-rule-compliance-history.png[Changes compliance history] - - - - -==== How compliance is calculated ? - -As previously seen, in Rudder you define Rules which target groups of Nodes, and are composed of configuration Directives. - -A Directive contains one or multiple sub-configuration elements which generates reports. -For example, for a Sudoers Directive, each user can be such an element. - -Reports have states explaining what is the drift between the expected configuration and the actual configuration. -Some states depends if the user choose to auto-matically enforce drift correction -or if he chose to only reports on drift). - -Finaly, a node can get a global state if reports don't come at expected frequency or for expected policy configuration version. - -Below you will find all details about the possible states and their meaning with the actual compliance calculus method. - -*Checking that the node is correctly reporting, at correct frequency* - -At the node level, we are checking that the node is sending reports according to the -expected frequency, and for the currently defined version of the configuration for it. - -Based on this information, we get a - -Applying:: - -When a new set of policies are defined for a node (or any update to existing one), Rudder waits during a grace period -for reports so that the node has time to apply the new policies. -During this period, the configuration is said 'Applying'. - -No report:: - -The system didn't send any reports since a time incompatible with the agent frequency run interval. Most -likelly, the node is not online or there is an ongoing network issue between the node and Rudder server. - - - -*At directive level: checking for drift and auto-healing* - - -Success or Compliant:: - -The system is already in the desired state. No change is needed. Conformity is reached. - -Repaired:: - -When a configuration policy is "enforced", that state means that the system was not in the desired state. -Rudder applied some change and repaired what was not correct. Now the system is in the desired state. - -Error:: - -When configuration is enforced, it means that the system is not in the desired state and Rudder wasn't able to repair the system. - -Non compliant:: - -When configuration is not enforced, it means that the systemn is not in the desired state. A drift is reported. - -Not applicable:: - -A specific configuration may not be applicable on a given node because some precondition -are not met. For example, the specified configuration is only relevant for Linux nodes, and -thus is Not applicable on a Windows server. - -Unexpected:: - -We have a special kind of report for unexpected states (both for enforce and audit mode). These -reports generally mean that the node is sending reports for unexpected configuration components. It -may be due to bad parameters for the configuration, or an error in the Technique. - - -*Compliance calculus* - -Based on these facts, the compliance of a Rule is calculated like this: - -Number of Nodes for which conformity is reached for every Directive of the -Rule / Total number of Nodes on which the Rule has been applied - diff --git a/src/reference/modules/ROOT/pages/23_configuration_management/37_validation_workflow.adoc b/src/reference/modules/ROOT/pages/23_configuration_management/37_validation_workflow.adoc deleted file mode 100644 index 83e88ec4..00000000 --- a/src/reference/modules/ROOT/pages/23_configuration_management/37_validation_workflow.adoc +++ /dev/null @@ -1,169 +0,0 @@ -=== Validation workflow in Rudder - -The validation workflow is a feature whose purpose is to hold any change (Rule, Directive, Group) made by users in the web interface, -to be reviewed first by other users with the adequate privileges before actual deployment. - -The goal is to improve safety and knowledge sharing in the team that is using Rudder. - -To enable it, you only have to tick "Enable Change Requests" in the Administration - Settings tab of the web interface. (This feature -is optional and can be disabled at any time without any problem, besides risking the invalidation of yet-unapproved changes) - -image::workflows/Enabling.png[] - -==== What is a Change request ? - -A Change request represents a modification of a Rule/Directive/Group from an old state to a new one. -The Change is not saved and applied by the configuration, before that, it needs to be reviewed and approved by other members of the team. - -A Change request has: - -- An Id (an integer > 0) -- A title. -- A description. -- A creator. -- A status. -- Its own history. - -This information can be updated on the change request detail page. -For now, a Change request is linked to one change at a time. - -===== Change request status - -There is 4 Change request status: - -Pending validation:: -- The change has to be reviewed and validated. -- Can be send to: Pending deployment, Deployed, Cancelled. - -Pending deployment:: -- The change was validated, but now require to be deployed. -- Can be send to: Deployed, Cancelled. - -Deployed:: -- The change is deployed. -- This is a final state, it can't be moved anymore. - -Cancelled:: -- The change was not approved. -- This is a final state, it can't be moved anymore. - -Here is a diagram about all those states and transitions: - -image::workflows/States.png[] - -==== Change request management page - -All Change requests can be seen on the /secure/utilities/changeRequests page. -There is a table containing all requests, you can access to each of them by clicking on their id. -You can filter change requests by status and only display what you need. - -image::workflows/Management.png[] - -===== Change request detail page - -Each Change request is reachable on the /secure/utilities/changeRequest/id. - -image::workflows/Details.png[] - -The page is divided into two sections: - -Change request information:: - -display common information (title, description, status, id) and a form to edit them. - -image::workflows/Informations.png[] - -Change request content:: - -In this section, there is two tabs: -- History about that change request - -image:workflows/History.png[] - -- Display the change proposed - -image:workflows/Rule_Update_Diff.png[] - - -==== How to create a Change request ? - -If they are enabled in Rudder, every change in Rudder will make you create a Change request. -You will have a popup to enter the name of your change request and a change message. - -The change message will be used as description for you Change Request, so we advise to fill it anyway to keep an explanation ab out your change. - -image::workflows/Popup.png[] - -Change request are not available for Rule/Directive/Groups creation, they are only active if the Rule/Directive/Groups existed before: - -Here is a small table about all possibilities: - -image::workflows/Table.png[] - -==== How to validate a Change request ? - -===== Roles - -Not every user can validate or deploy change in Rudder. -Only those with one of the following roles can act on Change request: - -Validator:: -Can validate Change request - -Deployer:: -To deploy Change Request - -Both of those roles: - -- Give you access to pending Change requests -- Allow you to perform actions on them (validate or cancel) - -You have to change users in */opt/rudder/etc/rudder-users.xml* and include those rights. -Without one of those roles, you can only access Change Request in 'Deployed' or 'Cancelled' and those you opened before. - -You can deploy directly if you have both the validator and deployer roles. -The *administrator* Role gives you both the deployer and valdiator role. - -There is also the possibility to access Change requests in Read only mode by using the role 'validator_read' or 'deployer_read'. - -image::workflows/Validation.png[] - -===== Self Validations - -Using Change requests means that you want your team to share knowledge, and validate each other change. -So by default: - -- *Self validation* is disabled. -- *Self deployment* is enabled. - -Those two behaviours can be changed in the property file */opt/rudder/etc/rudder-web.properties*. -'rudder.workflow.self.validation' and 'rudder.workflow.self.deployment' are the properties that define this behaviour. - -==== Change request and conflicts - -When the initial state of a Change request has changed (i.e.: you want to modify a Directive, but someone else changes about that Directive has been accepted before yours), your change can't be validated anymore. - -image::workflows/Conflict.png[] - -For now, we decided to reduce to the possibility of an error or inconsistency when there are concurrent changes. -In a future version of Rudder, there will be a system to handle those conflicts, and make sure actual changes are not overwritten. - -==== Notifications: - -In several parts of Rudder webapp there are some Notifications about Change requests. - -===== Pending change requests - -This notification is displayed only if the validator/deployer role is active on your user account. -It shows you how many Change requests are waiting to be reviewed/deployed. -Clicking on it will lead you to the Change request management page, with a filter already applied. - -image::workflows/Notification.png[] - -===== Change already proposed on Rule/Directive/Group - -When there is a change about the Rule/Directive/Group already proposed but not deployed/cancelled, you will be notified that there are some pending Change requests about that element. -You will be provided a Link to those change request, So you can check if the change is already proposed. - -image::workflows/Warning.png[] - diff --git a/src/reference/modules/ROOT/pages/23_configuration_management/40_policy_mode.adoc b/src/reference/modules/ROOT/pages/23_configuration_management/40_policy_mode.adoc deleted file mode 100644 index 43ebe1a5..00000000 --- a/src/reference/modules/ROOT/pages/23_configuration_management/40_policy_mode.adoc +++ /dev/null @@ -1,50 +0,0 @@ -[[_policy_mode_audit_enforce]] -=== Policy Mode (Audit/Enforce) - -Rudder 4.0 includes a policy mode setting, that allows two distinct behaviors: - -* *Audit*: Test if the system is in the desired state, and report about it -* *Enforce*: Test if the system is in the desired state, if not, try to act to get to this state, and report about actions taken and final state - -This allows for example xref:26_manage_your_it/5_usecases/0_usecases_intro.adoc#_using_rudder_as_an_audit_tooli[to use Rudder as an audit tool] or xref:26_manage_your_it/5_usecases/0_usecases_intro.adoc#_using_audit_mode_to_validate_a_policy_before_applying_it[to test a policy before enforcing it]. - -image:audit_mode_general_overview.png[] - -This mode can be set: - -* Globally on the Rudder root server. In this can case there are two options: allow to override this mode on specific items, or use the global configuration everywhere. -* On a directive. -* On a node. - -A lot of attention and several safeguards have been put in place to ensure that if you choose to use "Audit" -for a target, nothing will be changed on the node for that target (except Rudder's own configuration under `/var/rudder`), and only some harmless -commands will be run (like listing installed packages or refreshing package lists). - -Nodes are fully aware of exactly what directives need to be executed in Audit or in Enforce mode, and the "rudder agent" command line has been enhanced to let you see the result with a glimpse: the first column in `rudder agent run` output is now the mode (*A* for *Audit* and *E* for *Enforce*), and the compliance summary is split by audit mode. -In addition to pre-existing technical reports, new ones have been added to report on "audit-compliant" (the check was OK), "audit-non-compliant" (the check was done, but the result is not the one expected), "audit-not-applicable" (the check is not applicable for that node, for example because of a limitation on the OS type), "audit-error" (the check wasn't able to finish correctly) status. - -==== How is the effective mode computed? - -We will here explain what is the computation made during generation to -decide which mode to apply to a directive on a node, based on the current settings. - -The short rule is: *Override wins, then Audit wins* - -For a given directive on a given node at a given time, we have three different policy mode -settings: - -* The global mode, called *G*, which can be *Audit* or *Enforce* -* The node mode called *N*, which can be *Global* (if not overridden), *Audit, or *Enforce* -* The directive mode, called *D*, which can be *Global* (if not overridden), *Audit, or *Enforce* - -The result is: - -* If override is not allowed, the policy mode is *always* the global mode *G*. -* If override is allowed: - -** If *N* and *D* are set to use the *Global* default value (i.e. no override), the policy mode is the global mode *G*. -** If *N* uses the *global* value and *D* is overriden to *Audit* or *Enforce*, the *D* value is used. -** If *D* uses the *global* value and *N* is overriden to *Audit* or *Enforce*, the *N* value is used. -** If *N* and *D* are overriden to *Audit* or *Enforce*, the value is *Audit* if at least one of *N* or *D* is *Audit*, *Enforce* if both are in *Enforce* mode - - diff --git a/src/reference/modules/ROOT/pages/23_configuration_management/41_technique_editor.adoc b/src/reference/modules/ROOT/pages/23_configuration_management/41_technique_editor.adoc deleted file mode 100644 index 44d5f0ac..00000000 --- a/src/reference/modules/ROOT/pages/23_configuration_management/41_technique_editor.adoc +++ /dev/null @@ -1,109 +0,0 @@ -[[technique-editor]] -=== Technique editor - -==== Introduction - -===== First, what is a Technique ? - -A technique is a description in code form of what the agent has to do on the node. -This code is actually composed of a series of Generic method calls. -These different Generic method calls are conditional. - -===== What is a Generic method? - -A generic method is a description of an elementary state independent of the operating system (ex: a package is installed, a file contains such line, etc...). -Generic methods are independent of the operating system (It has to work on any operating system). -Generic methods calls are conditioned by condition expressions, which are boolean expression combining basic conditions with classic boolean operators (ex : operating system is Debian, such generic method produced a modification, did not produce any modification, produced an error, etc…) - - -==== Technique Editor - -===== Utility - -Rudder provides a set of pre-defined Techniques that cover some basic configuration and system administration needs. Of course,this set of techniques cannot responds to all of the specific needs of each client. That’s why Rudder integrate the *Technique _editor_*, a tool to create advanced Techniques. -Directly accessible from Ruder menu (_Utilities > Technique editor_), this tool has an easy-to-use interface, which doesn’t require any programming skills but nevertheless allows to create complex Techniques. - -===== Interface - -Here is an overview of its interface : - -image::technique_editor/1-rudder-technique-editor.png[] - -The interface is divided into 3 columns: - - -- A column listing custom Techniques - -image::technique_editor/2-list-techniques.png[] - -Here, we can see our previously created Techniques. We can click on them to see their details/edit them, or create a new one by clicking on the “New” button. Theses Techniques are visible in the *ncf techniques* category in the *Directives _tree_*, so can be used to create new Directives. - -- A column with the Technique content - -When we create a new Technique, or when we edit an existing one, the configuration form appears at the center of the interface, instead of the title and the description of the tool. - -image::technique_editor/3-ntp-configuration.png[] - -Then we can see the name, the description, the Bundle name, the version and the Generic methods list of the current Technique. Only the name and the description are editable, the Bundle name and the version are automatically defined during the Technique creation. - -- A column listing Generic methods / displaying generic method details - -To the right of the interface is the list of Generic methods available for Technique configuration. -This list is made up of about a hundred Generic methods, grouped according to their category to make them easier to use. (An exhaustive list of them available at any time in the online product documentation can be found on the following link: http://www.rudder-project.org/doc/_generic_methods.html) - -image::technique_editor/4-list-generics-method.png[] - -You just need to click on a Generic method or drag'n drop it in the area provided for such purpose to add it to the current Technique. Once it's done, you can configure it by clicking on it. Then a new display containing the method details appears instead of the Generic methods list: - -image::technique_editor/5-configure-generic-method.png[] - -The Generic method details are divided into 3 blocks : - -. Conditions - - Conditions allow user to restrict the execution of the method. -. Parameters - - Parameters are in mono or multi line text format. They can contains variables which will be extended at the time of the execution. -. Result conditions - - One result condition of three will be defined following the execution of a generic method: - * Success, when the configuration is correct and no action are needed - * Repaired, when the configuration is wrong and actions to fix it were executed with success - * Error, when the configuration is wrong but actions to fix it failed - -Theses conditions can be used in another Generic methods conditions. ie, you can execute a command if a previous one failed or was repaired. - - -==== Create your first Technique - -Now we are going to see how to create a simple technique to configure a ntp server, step by step. - -===== 1. General information - -Let's start from the beginning. Click on the "_New_ Technique" button and start filling in the General information fields (only name is required). - -In our case: - -- *Name*: _Configure NTP_ -- *Description*: _Install, configure and ensure the ntpd is running. Uses a template file to configuration._ - -===== 2. Add and configure generic methods - -Now, we have to find and add the generic methods which correspond to the actions we want to execute. In our case, we want to add the following methods: - -* Package install (You can find it in the *Package category*) - - This method only take one parameter, the name of the package to install. So here, fill in the *package_name* field with the value _ntp_. - -* File from template (You can find it in the *File category*) - - This method take two parameters. The first one corresponds to the absolute path of the source file containing a template to be expanded. We are going to use a Rudder variable here to get the correct path. Fill in the *source_template* field with the value _$\{path_technique\}/templates/ntp.conf_. - - The second corresponds to the absolute path of the destination file. Fill in with the value _/etc/ntp.conf_. - -* Service restart (You can find it in the *Service category*) - - This method only take one parameter, the name of the service we want to restart. So here, fill in the *service_name* field with the value _ntp_. - - Also, we want to restart the service only if it has just been installed, so only if the result conditions defined following the execution of *Package install* method is *Repaired* (package_install_ntp_repaired). So here, fill in the *Other conditions* field in the Conditions panel with the value _package_install_ntp_repaired_. - -* Service ensure running (You can find it in the *Service category*) - - This method only take one parameter, the name of the service we want to check. Again, here, fill in the *service_name* field with the value _ntp_. - -===== 3. Save and apply your technique - -And… It’s already done. Rather fast, right? Don't forget to save. Now you can see it in the *Directives _tree_*, and use it to create a Directive that will be applied on your _Nodes_ thanks to a _Rule_. - diff --git a/src/reference/modules/ROOT/pages/26_manage_your_it/00_manage_your_it.adoc b/src/reference/modules/ROOT/pages/26_manage_your_it/00_manage_your_it.adoc deleted file mode 100644 index bacd715c..00000000 --- a/src/reference/modules/ROOT/pages/26_manage_your_it/00_manage_your_it.adoc +++ /dev/null @@ -1,3 +0,0 @@ -[[_manage_your_it]] -== Configuration policies - diff --git a/src/reference/modules/ROOT/pages/26_manage_your_it/20_how_to/00_how_to.adoc b/src/reference/modules/ROOT/pages/26_manage_your_it/20_how_to/00_how_to.adoc deleted file mode 100644 index 6d9d9d14..00000000 --- a/src/reference/modules/ROOT/pages/26_manage_your_it/20_how_to/00_how_to.adoc +++ /dev/null @@ -1,3 +0,0 @@ -=== How to - - diff --git a/src/reference/modules/ROOT/pages/26_manage_your_it/20_how_to/20_share_files_between_nodes.adoc b/src/reference/modules/ROOT/pages/26_manage_your_it/20_how_to/20_share_files_between_nodes.adoc deleted file mode 100644 index a37cf719..00000000 --- a/src/reference/modules/ROOT/pages/26_manage_your_it/20_how_to/20_share_files_between_nodes.adoc +++ /dev/null @@ -1,21 +0,0 @@ -==== Share files between nodes - -Rudder 4.1 introduced a way to share files from one node to another. -It allows a node to send a file to its relay, which will make it available -for another target node, that has to to specifically download it. - -This file sharing method is secured by: - -* The control of uploaded file signature by the server, to check it matches the source node's private key. -* The same mechanism as standard file copy in Rudder to download the shared file from the server. - -It also includes a ttl mechanism that allows sharing a file for a limited amount of time. - -To use this feature, two generic methods are available in the technique editor: - -* xref:90_reference/50_generic_methods.adoc#sharedfile_from_node[sharedfile_from_node]: To download a file shared from another node. -* xref:90_reference/50_generic_methods.adoc#sharedfile_to_node[sharedfile_to_node]: To make a file available to another node. - -See the documentation of these methods for details about the required parameters, -and especially xref:90_reference/50_generic_methods.adoc#sharedfile_to_node[sharedfile_to_node] for a complete usage example. - diff --git a/src/reference/modules/ROOT/pages/26_manage_your_it/30_security/00_intro.adoc b/src/reference/modules/ROOT/pages/26_manage_your_it/30_security/00_intro.adoc deleted file mode 100644 index 46c1ca50..00000000 --- a/src/reference/modules/ROOT/pages/26_manage_your_it/30_security/00_intro.adoc +++ /dev/null @@ -1,2 +0,0 @@ -=== Security considerations - diff --git a/src/reference/modules/ROOT/pages/26_manage_your_it/5_usecases/0_usecases_intro.adoc b/src/reference/modules/ROOT/pages/26_manage_your_it/5_usecases/0_usecases_intro.adoc deleted file mode 100644 index 04237d10..00000000 --- a/src/reference/modules/ROOT/pages/26_manage_your_it/5_usecases/0_usecases_intro.adoc +++ /dev/null @@ -1,51 +0,0 @@ -=== Usecases - -This chapter gives a few examples for using Rudder. We have no doubt that you'll -have your own ideas, that we're impatient to hear about... - -==== Dynamic groups by operating system - -Create dynamic groups for each operating system you administer, so that you can -apply specific policies to each type of OS. When new nodes are added to Rudder, -these policies will automatically be enforced upon them. - -==== Library of preventive policies - -Why not create policies for emergency situations in advance? You can then put -your IT infrastructure in "panic" mode in just a few clicks. - -For example, using the provided Techniques, you could create a Name -resolution Directive to use your own internal DNS servers for normal situations, -and a second, alternative Directive, to use Google's public DNS servers, in case -your internal DNS servers are no longer available. - -==== Standardizing configurations - -You certainly have your own best practices (let's call them good habits) for -setting up your SSH servers. - -But is that configuration the same on all your servers? Enforce the settings -your really want using an OpenSSH server policy and apply it to all your Linux -servers. SSH servers can then be stopped or reconfigured manually many times, -Rudder will always restore your preferred settings and restart the SSH server in -less than 5 minutes. - -[[_using_rudder_as_an_audit_tool]] -==== Using Rudder as an Audit tool - -Using Rudder as an Audit tool is useful if you do not want to make any changes on the system, -temporarily (freeze period, etc.) or permanently. - -To use Rudder as an Audit tool without modifying any configuration on your systems, -set the Policy Mode to *Audit* in the Settings, and do not allow overriding. - -==== Using Audit mode to validate a policy before applying it - -Before applying a configuration policy to some systems (a new policy or a new system), -you can switch the policy mode of the directive defining this policy or of the nodes -it is applied to to *Audit*. - -This is particularly useful when adding rules to enforce policies that are supposed to be already applied: -you can measure the gap between expected and actual state, and check what changes would be made before applying them. - - diff --git a/src/reference/modules/ROOT/pages/30_basic_administration/00_administration_intro.adoc b/src/reference/modules/ROOT/pages/30_basic_administration/00_administration_intro.adoc deleted file mode 100644 index e402b359..00000000 --- a/src/reference/modules/ROOT/pages/30_basic_administration/00_administration_intro.adoc +++ /dev/null @@ -1,7 +0,0 @@ -== Basic administration - -This chapter covers basic administration task of Rudder services like -configuring some parameters of the Rudder policy server, reading the services -log, and starting, stopping or restarting Rudder services. - - diff --git a/src/reference/modules/ROOT/pages/30_basic_administration/10_archives.adoc b/src/reference/modules/ROOT/pages/30_basic_administration/10_archives.adoc deleted file mode 100644 index 621581ec..00000000 --- a/src/reference/modules/ROOT/pages/30_basic_administration/10_archives.adoc +++ /dev/null @@ -1,113 +0,0 @@ -[[archives, Archives]] -=== Archives - -==== Archive usecases - -The archive feature of Rudder allows to: - -* Exchange configuration between multiple Rudder instances, in particular when -having distinct environments; - -* Keep an history of major changes. - -===== Changes testing - -Export the current configuration of Rudder before you begin to make any change -you have to test: if anything goes wrong, you can return to this archived state. - -===== Changes qualification - -Assuming you have multiple Rudder instances, each on dedicated for the -development, qualification and production environment. You can prepare the -changes on the development instance, export an archive, deploy this archive on -the qualification environment, then on the production environment. - - - -.Versions of the Rudder servers -[WARNING] -=========== - -If you want to export and import configurations between environments, the version -of the source and target Rudder server must be exactly the same. If the versions -don't match (even if only the minor versions are different), there is a risk that -the import will break the configuration on the target Rudder server. - -=========== - - - -==== Concepts - -In the 'Administration > Archives' section of the Rudder Server web interface, you -can export and import the configuration of Rudder Groups, Directives and Rules. -You can either archive the complete configuration, or only the subset dedicated -to Groups, Directives or Rules. - -When archiving configuration, a 'git tag' is created into +/var/rudder/configuration-repository+. -This tag is then referenced in the Rudder web interface, and available for download -as a zip file. Please note that each change in the Rudder web interface is also -committed in the repository. - -The content of this repository can be imported into any Rudder server (with the same version). - -==== Archiving - -To archive Rudder Rules, Groups, Directives, or make a global archive, you need to go to -the 'Administration > Archives' section of the Rudder Server web interface. - -To perform a global archive, the steps are: - -. Click on 'Archive everything' - it will update the drop down list 'Choose an archive' with -the latest data -. In the drop down list 'Choose an archive', select the newly created archive (archives are sorted -by date), for example 2015-01-08 16:39 -. Click on 'Download as zip' to download an archive that will contains all elements. - -==== Importing configuration - -On the target server, importing the configuration will "merge" them with the existing configuration: -every groups, rules, directives or techniques with the same identifier will be replaced by the import, -and all others will remain untouched. - -To import the archive on the target Rudder server, you can follow the following steps: - -. Uncompress the zip archive in /var/rudder/configuration-repository -. If necessary, correct all files permissions: +chown -R root:rudder directives groups parameters ruleCategories rules techniques+ and +chown -R ncf-api-venv:rudder ncf/50_techniques techniques/ncf_techniques+ -. Add all files in the git repository: +git add . && git commit -am "Importing configuration"+ -. Finally, in the Web interface, go to the 'Administration > Archives' section, and select -'Latest Git commit' in the drop down list in the Global archive section, and click on 'Restore -everything' to restore the configuration. - -[TIP] - -==== - -You can also perform the synchronisation from on environment to another by -using git, through a unique git repository referenced on both environment. - -For instance, using one unique git repository you can follow this workflow: - -. On Rudder test: - -.. Use Rudder web interface to prepare your policy; - -.. Create an archive; - -.. +git push+ to the central repository; - -. On Rudder production: - -.. +git pull+ from the central repository; - -.. Use Rudder web interface to import the qualified archive. - -==== - -==== Deploy a preconfigured instance - -You can use the procedures of Archiving and Restoring configuration to deploy -preconfigured instance. You would prepare first in your labs the configuration for -Groups, Directives and Rules, create an Archive, and import the Archive on the -new Rudder server installation - diff --git a/src/reference/modules/ROOT/pages/30_basic_administration/10_event_logs.adoc b/src/reference/modules/ROOT/pages/30_basic_administration/10_event_logs.adoc deleted file mode 100644 index 7e174b87..00000000 --- a/src/reference/modules/ROOT/pages/30_basic_administration/10_event_logs.adoc +++ /dev/null @@ -1,24 +0,0 @@ -=== Event Logs - -Every action happening in the Rudder web interface are logged in the -PostgreSQL database. The last 1000 event log entries are displayed in the -*Administration > View Event Logs* section of Rudder web application. Each -log item is described by its 'ID', 'Date', 'Actor', and 'Event' 'Type', -'Category' and 'Description'. For the most complex events, like changes in -nodes, groups, techniques, directives, deployments, more details can be -displayed by clicking on the event log line. - -Event Categories:: - -* User Authentication -* Application -* Configuration Rules -* Policy -* Technique -* Policy Deployment -* Node Group -* Nodes -* Rudder Agents -* Policy Node -* Archives - diff --git a/src/reference/modules/ROOT/pages/30_basic_administration/20_policy_server.adoc b/src/reference/modules/ROOT/pages/30_basic_administration/20_policy_server.adoc deleted file mode 100644 index 83effa9a..00000000 --- a/src/reference/modules/ROOT/pages/30_basic_administration/20_policy_server.adoc +++ /dev/null @@ -1,25 +0,0 @@ -=== Policy Server - -The *Administration > Policy Server Management* section sum-up information about -Rudder policy server and its parameters. - -==== Configure allowed networks - -Here you can configure the networks from which nodes are allowed to connect to -Rudder policy server to get their updated rules. - -You can add as many networks as you want, the expected format is: -+networkip/mask+, for example +42.42.0.0/16+. - -==== Clear caches - -Clear cached data, like node configuration. That will trigger a full -redeployment, with regeneration of all promises files. - -==== Reload dynamic groups - -Reload dynamic groups, so that new nodes and their inventories are taken into -account. Normally, dynamic group are automatically reloaded unless that feature -is explicitly disable in Rudder configuration file. - - diff --git a/src/reference/modules/ROOT/pages/30_basic_administration/30_plugins.adoc b/src/reference/modules/ROOT/pages/30_basic_administration/30_plugins.adoc deleted file mode 100644 index 9f31480b..00000000 --- a/src/reference/modules/ROOT/pages/30_basic_administration/30_plugins.adoc +++ /dev/null @@ -1,39 +0,0 @@ - -[[plugins-management]] - -=== Plugins - -Rudder is an extensible software. The *Administration > Plugin Management* -section sum-up information about loaded plugins, their version and their -configuration. - -A plugin is an `.rpkg` file (for "Rudder package"). - -==== Install a plugin - -To install a plugin, copy the `.rpkg` file on your server, and run: - ----- -/opt/rudder/bin/rudder-pkg install-file ----- - -You can list currently installed plugins using: - ----- -/opt/rudder/bin/rudder-pkg list ----- - -You can also enable or disable, or remove a plugin with: - ----- -/opt/rudder/bin/rudder-pkg plugin enable -/opt/rudder/bin/rudder-pkg plugin disable -/opt/rudder/bin/rudder-pkg remove ----- - -See all available commands with: - ----- -/opt/rudder/bin/rudder-pkg --help ----- - diff --git a/src/reference/modules/ROOT/pages/30_basic_administration/50_services_administration.adoc b/src/reference/modules/ROOT/pages/30_basic_administration/50_services_administration.adoc deleted file mode 100644 index 80468f52..00000000 --- a/src/reference/modules/ROOT/pages/30_basic_administration/50_services_administration.adoc +++ /dev/null @@ -1,78 +0,0 @@ -=== Basic administration of Rudder services - -==== Restart the agent of the node - -To restart the Rudder Agent, use following command on a node: - ----- - -service rudder-agent restart - ----- - -[TIP] - -==== - -This command can take more than one minute to restart the CFEngine daemon. -This is not a bug, but an internal protection system of CFEngine. - -==== - -==== Restart the root rudder service - -===== Restart everything - -You can restart all components of the Rudder Root Server at once: - ----- - -service rudder-server restart - ----- - -===== Restart only one component - -Here is the list of the components of the root server with a brief description -of their role, and the command to restart them: - -include::{partialsdir}/glossary/cfengine-server.adoc[] - ----- - -service rudder-agent restart - ----- - -include::{partialsdir}/glossary/web-server-application.adoc[] - ----- - -service rudder-jetty restart - ----- - -include::{partialsdir}/glossary/web-server-front-end.adoc[] - ----- - -service apache2 restart - ----- - -include::{partialsdir}/glossary/ldap-server.adoc[] - ----- - -service rudder-slapd restart - ----- - -include::{partialsdir}/glossary/sql-server.adoc[] - ----- - -service postgresql* restart - ----- - diff --git a/src/reference/modules/ROOT/pages/30_basic_administration/70_server_rest_api.adoc b/src/reference/modules/ROOT/pages/30_basic_administration/70_server_rest_api.adoc deleted file mode 100644 index e336e730..00000000 --- a/src/reference/modules/ROOT/pages/30_basic_administration/70_server_rest_api.adoc +++ /dev/null @@ -1,213 +0,0 @@ -[[rest-api]] -=== REST API - -Rudder can be used as a web service using a REST API. - -This documentation covers the version 1 of Rudder's API. - -The version 2 has now been implemented, which is much more complete and -has a dedicated documentation available here: http://www.rudder-project.org/rudder-api-doc/ - -[WARNING] - -==== - -The version 1 is to be considered legacy and should not be used anymore. Please migrate to -version 2 to benefit from the new authentication features and more complete existing methods. - -==== - - -==== Default setup - -Access to REST API can be either using Rudder authentication, either -unauthenticated, using authentication mechanisms set elsewhere, for instance at -Apache level. - -===== Rudder Authentication - -By default, the access to the REST API is open to users not authenticated in -Rudder. - -The method of authentication can be configured in -+/opt/rudder/etc/rudder-web.properties+ - ----- - -rudder.rest.allowNonAuthenticatedUser=true - ----- - -===== Apache access rules - -By default, the REST API is exposed for localhost only, at +http://localhost/rudder/api+. - -.Example usage of non authenticated REST API - -==== - -Unrestricted access can be granted to local scripts accessing to +localhost+, -whereas remote access to the REST API will be either denied, or restricted -through authentication at apache level. - -==== - -===== User for REST actions - -Actions done using the REST API are logged by default as run by the user -+UnknownRestUser+. - -To change the name of this user, add following header to the HTTP request: - ----- - -X-REST-USERNAME: MyConfiguredRestUser - ----- - -If the REST API is authenticated, the authenticated user name will be used in the -logs. - -==== Status - -+http://localhost/rudder/api/status+:: - -Check if Rudder server is up and return +OK+. -If Rudder server is not responding, an error is displayed. - -==== Promises regeneration - -+http://localhost/rudder/api/deploy/reload+:: - -Regenerate promises (same action as the +Regenerate now+ button). - -==== Dynamic groups regeneration - -+http://localhost/rudder/api/dyngroup/reload+:: - -Check all dynamic groups for changes. If changes have occurred, regenerate the -groups in the LDAP and the CFEngine promises. - -==== Technique library reload - -+http://localhost/rudder/api/techniqueLibrary/reload+:: - -Check the technique library for changes. If changes have occurred, reload the -technique library in memory and regenerate the CFEngine promises. - -==== Archives manipulation - -Various methods are available to import and export items: - -===== Archiving: - -+http://localhost/rudder/api/archives/archive/groups+:: - -Export node groups and node groups categories. - -+http://localhost/rudder/api/archives/archive/directives+:: - -Export policy library (categories, active techniques, directives). - -+http://localhost/rudder/api/archives/archive/rules+:: - -Export rules - -+http://localhost/rudder/api/archives/archive/full+:: - -Export everything - -===== Listing: - - -+http://localhost/rudder/api/archives/list/groups+:: - -List available archives datetime for groups (the datetime is in the format -awaited for restoration). - -+http://localhost/rudder/api/archives/list/directives+:: - -List available archives datetime for policy library (the datetime is in the -format awaited for restoration). - -+http://localhost/rudder/api/archives/list/rules+:: - -List available archives datetime for configuration rules (the datetime is in the -format awaited for restoration). - -+http://localhost/rudder/api/archives/list/full+:: - -List available archives datetime for full archives (the datetime is in the -format awaited for restoration). - -===== Restoring a given archive: - -`http://localhost/rudder/api/archives/restore/groups/datetime/[archiveId]`:: - -Restore given groups archive. - -`http://localhost/rudder/api/archives/restore/directives/datetime/[archiveId]`:: - -Restore given directives archive. - -`http://localhost/rudder/api/archives/restore/rules/datetime/[archiveId]`:: - -Restore given rules archive. - -`http://localhost/rudder/api/archives/restore/full/datetime/[archiveId]`:: - -Restore everything. - -===== Restoring the latest available archive (from a previously archived action, and so from a Git tag): - ----- - -http://localhost/rudder/api/archives/restore/groups/latestArchive -http://localhost/rudder/api/archives/restore/directives/latestArchive -http://localhost/rudder/api/archives/restore/rules/latestArchive -http://localhost/rudder/api/archives/restore/full/latestArchive - ----- - -===== Restoring the latest available commit (use Git HEAD): - ----- - -http://localhost/rudder/api/archives/restore/groups/latestCommit -http://localhost/rudder/api/archives/restore/directives/latestCommit -http://localhost/rudder/api/archives/restore/rules/latestCommit -http://localhost/rudder/api/archives/restore/full/latestCommit - ----- - -===== Downloading a ZIP archive - -The REST API allows to download a ZIP archive of groups, directives and -rules (as XML files) for a given Git commit ID (the commit HASH). - -It is not designed to query for available Git commit ID, so you will need to get -it directly from a Git tool (for example with Git log) or from the list API. - -Note that that API allows to download ANY Git commit ID as a ZIP archive, -not only the one corresponding to Rudder archives. - -Note 2: you should rename the resulting file with a ".zip" extension as -most zip utilities won't work correctly on a file not having it. - -`http://localhost/rudder/api/archives/zip/groups/[GitCommitId]`:: - -Download groups for the given Commit ID as a ZIP archive. - -`http://localhost/rudder/api/archives/zip/directives/[GitCommitId]`:: - -Download directives for the given Commit ID as a ZIP archive. - -`http://localhost/rudder/api/archives/zip/rules/[archiveId]`:: - -Download rules for the given Commit ID as a ZIP archive. - -`http://localhost/rudder/api/archives/zip/all/[archiveId]`:: - -Download groups, directives and rules for the given Commit ID as a ZIP archive. - - diff --git a/src/reference/modules/ROOT/pages/30_basic_administration/80_user_management.adoc b/src/reference/modules/ROOT/pages/30_basic_administration/80_user_management.adoc deleted file mode 100644 index 5644f7a1..00000000 --- a/src/reference/modules/ROOT/pages/30_basic_administration/80_user_management.adoc +++ /dev/null @@ -1,218 +0,0 @@ -[[user-management]] - -=== User management - -Change the users authorized to connect to the application. -You can define authorization level for each user - -==== Configuration of the users using a XML file - -===== Generality - -The credentials of a user are defined in the XML file -+/opt/rudder/etc/rudder-users.xml+. This file expects the following format: - ----- - - - - - - - ----- - -The name and password attributes are mandatory (non empty) for the user tags. -The role attribute can be omitted but the user will have no permission, and -only valid attributes are recognized. - -Every modification of this file should be followed by a restart of the Rudder -web application to be taken into account: - ----- - -service rudder-jetty restart - ----- - -[[_passwords]] -===== Passwords - -The authentication tag should have a "hash" attribute, making "password" attributes -on every user expect hashed passwords. Not specifying a hash attribute will fallback -to plain text passwords, but it is strongly advised not to do so for security reasons. - -The algorithm to be used to create the hash (and verify it during authentication) -depend on the value of the hash attribute. The possible values, the -corresponding algorithm and the Linux shell command need to obtain the hash of -the "secret" password for this algorithm are listed here: - -.Hashed passwords algorithms list - -[options="header"] - -|==== -|Value | Algorithm | Linux command to hash the password -|"md5" | MD5 | +read mypass; echo -n $mypass \| md5sum+ -|"sha" or "sha1" | SHA1 | +read mypass; echo -n $mypass \| shasum+ -|"sha256" or "sha-256" | SHA256 | +read mypass; echo -n $mypass \| sha256sum+ -|"sha512" or "sha-512" | SHA512 | +read mypass; echo -n $mypass \| sha512sum+ -|==== - -When using the suggested commands to hash a password, you must enter the -command, then type your password, and hit return. The hash will then be -displayed in your terminal. This avoids storing the password in your shell -history. - -Here is an example of authentication file with hashed password: - ----- - - - - - - - - ----- - -[[ldap-auth-provider, LDAP authentication provider for Rudder]] -==== Configuring an LDAP authentication provider for Rudder - -If you are operating on a corporate network or want to have your users in a -centralized database, you can enable LDAP authentication for Rudder users. - -===== LDAP is only for authentication - -Take care of the following limitation of the current process: only *authentication* -is delegated to LDAP, NOT *authorizations*. So you still have to -declare user's authorizations in the Rudder user file (rudder-users.xml). - -A user whose authentication is accepted by LDAP but not declared in the -rudder-users.xml file is considered to have no rights at all (and so will -only see a reduced version of Rudder homepage, with no action nor tabs available). - - -The credentials of a user are defined in the XML file -+/opt/rudder/etc/rudder-users.xml+. It expects the same format as regular file-based -user login, but in this case "name" will be the login used to connect to LDAP and the -'password' field will be ignored and should be set to "LDAP" to make it clear that -this Rudder installation uses LDAP to log users in. - - -Every modification of this file should be followed by a restart of the Rudder -web application to be taken into account: - ----- - -service rudder-jetty restart - ----- - -===== Enable LDAP authentication - -LDAP authentication is enabled by setting the property +rudder.auth.ldap.enable+ to +true+ -in file +/opt/rudder/etc/rudder-web.properties+ - -The LDAP authentication process is a bind/search/rebind in which an application -connection (bind) is used to search (search) for a user entry given some base and -filter parameters, and then, a bind (rebind) is tried on that entry with the -credential provided by the user. - - -So next, you have to set-up the connection parameters to the LDAP directory to use. -There are five properties to change: - -- rudder.auth.ldap.connection.url -- rudder.auth.ldap.connection.bind.dn -- rudder.auth.ldap.connection.bind.password -- rudder.auth.ldap.searchbase -- rudder.auth.ldap.filter - -The search base and filter are used to find the user. The search base may be left empty, and -in the filter, {0} will be replaced by the value provided as user login. - -Here are some usage examples, - -on standard LDAP: - ----- - -rudder.auth.ldap.searchbase=ou=People -rudder.auth.ldap.filter=(&(uid={0})(objectclass=person)) - ----- - -on Active Directory: - ----- - -rudder.auth.ldap.searchbase= -rudder.auth.ldap.filter=(&(sAMAccountName={0})(objectclass=user)) - ----- - - -==== Authorization management - -For every user you can define an access level, allowing it to access different -pages or to perform different actions depending on its level. - -You can also build custom roles with whatever permission you want, using a type -and a level as specified below. - -image::workflow_roles.png[Roles can match different types of users] - -In the xml file, the role attribute is a list of permissions/roles, separated by -a comma. Each one adds permissions to the user. If one is wrong, or not correctly -spelled, the user is set to the lowest rights (NoRights), having access only to the -dashboard and nothing else. - -===== Pre-defined roles - -|==== -|Name | Access level -|administrator | All authorizations granted, can access and modify everything -|administration_only | Only access to administration part of rudder, can do everything within it. -|user | Can access and modify everything but the administration part -|configuration | Can only access and act on configuration section -|read_only | Can access to every read only part, can perform no action -|inventory | Access to information about nodes, can see their inventory, but can't act on them -|rule_only | Access to information about rules, but can't modify them -|==== - -For each user you can define more than one role, each role adding its authorization to the user. - -Example: "rule_only,administration_only" will only give access to the "Administration" tab as well as the -Rules. - -===== Custom roles - -You can set a custom set of permissions instead of a pre-defined role. - -A permission is composed of a type and a level: - -* Type: Indicates what kind of data will be displayed and/or can be set/updated by the user -** "configuration", "rule", "directive", "technique", "node", "group", "administration", "deployment". -* Level: Access level to be granted on the related type -** "read", "write", "edit", "all" (Can read, write, and edit) - -Depending on that value(s) you give, the user will have access to different pages and action in Rudder. - -Usage example: - -* configuration_read -> Will give read access to the configuration (Rule management, Directives and Parameters) -* rule_write, node_read -> Will give read and write access to the Rules and read access to the Nodes - -==== Going further - -Rudder aims at integrating with your IT system transparently, so it can't force -its own authentication system. - -To meet this need, Rudder relies on the modular authentication system Spring -Security that allows to easily integrate with databases or an -enterprise SSO like CAS, OpenID or SPNEGO. The documentation for this -integration is not yet available, but don't hesitate to reach us on this topic. - - diff --git a/src/reference/modules/ROOT/pages/41_advanced_node_management/00_intro.adoc b/src/reference/modules/ROOT/pages/41_advanced_node_management/00_intro.adoc deleted file mode 100644 index b0d35088..00000000 --- a/src/reference/modules/ROOT/pages/41_advanced_node_management/00_intro.adoc +++ /dev/null @@ -1,2 +0,0 @@ -== Advanced Node management - diff --git a/src/reference/modules/ROOT/pages/41_advanced_node_management/10_node_management.adoc b/src/reference/modules/ROOT/pages/41_advanced_node_management/10_node_management.adoc deleted file mode 100644 index 979a20ad..00000000 --- a/src/reference/modules/ROOT/pages/41_advanced_node_management/10_node_management.adoc +++ /dev/null @@ -1,77 +0,0 @@ -=== Node management - -==== Reinitialize policies for a Node - -To reinitialize the policies for a Node, delete the local copy of the Applied -Policies fetched from the Rudder Server, and create a new local copy of the -initial promises. - ----- - -rudder agent reset - ----- - -At next run of the Rudder Agent (it runs every five minutes), the initial promises will be used. - -[CAUTION] - -==== - -Use this procedure with caution: the Applied Policies of a Node should never get -broken, unless some major change has occurred on the Rudder infrastructure, like -a full reinstallation of the Rudder Server. - -==== - -==== Completely reinitialize a Node - -You may want to completely reinitialize a Node to make it seen as a new node -on the server, for example after cloning a VM. - -[WARNING] - -==== - -This command will permanently delete your node uuid and keys, and no configuration will -be applied before re-accepting and configuring the node on the server. - -==== - -The command to reinitialize a Node is: - ----- - -rudder agent reinit - ----- - -This command will delete all local agent data, including its uuid and keys, and -also reset the agent internal state. The only configuration kept is the server -hostname or ip configured in +policy_server.dat+. It will also send an inventory -to the server, which will treat it as a new node inventory. - -[[_change_the_agent_run_schedule]] -==== Change the agent run schedule - -By default, the agent runs on all nodes every 5 minutes. You can modify this value in - *Settings* -> *General* page in *Agent Run Schedule* section, as well as the "splay time" -across nodes (a random delay that alters scheduled run time, intended to spread -load across nodes). - -image::Global_run_settings.png[] - -This settings can also be modified Node by Node, allowing you to customize the agent behavior (Node with little ressource like a Raspberry Pi or with limited bandwith). To do that, go into the Node details in the *Settings* tab - -image::Node_settings.png[] - - -[WARNING] - -==== - -When reducing notably the run interval length, reporting can be in 'No report' state -until the next run of the agent, which can take up to the previous (longer) interval. - -==== - diff --git a/src/reference/modules/ROOT/pages/41_advanced_node_management/11_node_install.adoc b/src/reference/modules/ROOT/pages/41_advanced_node_management/11_node_install.adoc deleted file mode 100644 index 025304f4..00000000 --- a/src/reference/modules/ROOT/pages/41_advanced_node_management/11_node_install.adoc +++ /dev/null @@ -1,189 +0,0 @@ -==== Installation of the Rudder Agent - -===== Static files - -At installation of the Rudder Agent, files and directories are created in -following places: - -+/etc+:: Scripts to integrate Rudder Agent in the system (init, cron). - -+/opt/rudder/share/initial-promises+:: Initialization promises for the Rudder -Agent. These promises are used until the Node has been validated in Rudder. They -are kept available at this place afterwards. - -+/opt/rudder/lib/perl5+:: The FusionInventory Inventory tool and its Perl -dependencies. - -+/opt/rudder/bin/run-inventory+:: Wrapper script to launch the inventory. - -+/opt/rudder/sbin+:: Binaries for CFEngine Community. - -+/var/rudder/cfengine-community+:: This is the working directory for CFEngine -Community. - -===== Generated files - -At the end of installation, the CFEngine Community working directory is -populated for first use, and unique identifiers for the Node are generated. - -+/var/rudder/cfengine-community/bin/+:: CFEngine Community binaries are copied -there. - -+/var/rudder/cfengine-community/inputs+:: Contains the actual working CFEngine -Community promises. Initial promises are copied here at installation. After -validation of the Node, Applied Policies, which are the CFEngine promises -generated by Rudder for this particular Node, will be stored here. - -+/var/rudder/cfengine-community/ppkeys+:: An unique SSL key generated for the -Node at installation time. - -+/opt/rudder/etc/uuid.hive+:: An unique identifier for the Node is generated -into this file. - -===== Services - -After all of these files are in place, the CFEngine Community daemons are -launched: - -include::{partialsdir}/glossary/cf-execd.adoc[] - -include::{partialsdir}/glossary/cf-serverd.adoc[] - -===== Configuration - -At this point, you should configure the Rudder Agent to actually enable the -contact with the server. Type in the IP address of the Rudder Root Server in the -following file: - ----- - -echo *root_server_IP_address* > /var/rudder/cfengine-community/policy_server.dat - ----- - -==== Rudder Agent interactive - -You can force the Rudder Agent to run from the console and observe what happens. - ----- - -rudder agent run - ----- - -[CAUTION] - -.Error: the name of the Rudder Root Server can't be resolved - -==== - -If the Rudder Root Server name is not resolvable, the Rudder Agent will issue -this error: - ----- - -rudder agent run - -Unable to lookup hostname (rudder-root) or cfengine service: Name or service not known - ----- - -To fix it, either you set up the agent to use the IP address of the Rudder root -server instead of its Domain name, either you set up accurately the name -resolution of your Rudder Root Server, in your DNS server or in the hosts file. - -The Rudder Root Server name is defined in this file - ----- - -echo *IP_of_root_server* > /var/rudder/cfengine-community/policy_server.dat - ----- - -==== - -[CAUTION] - -.Error: the CFEngine service is not responding on the Rudder Root Server - -==== - -If the CFEngine is stopped on the Rudder Root Server you will get this error: - ----- - -# rudder agent run - !! Error connecting to server (timeout) - !!! System error for connect: "Operation now in progress" - !! No server is responding on this port -Unable to establish connection with rudder-root - ----- - -Restart the CFEngine service: - ----- - -service rudder-agent restart - ----- - -==== - -==== Processing new inventories on the server - -===== Verify the inventory has been received by the Rudder Root Server - -There is some delay between the time when the first inventory of the Node is -sent, and the time when the Node appears in the New Nodes of the web interface. -For the brave and impatient, you can check if the inventory was sent by listing -incoming Nodes on the server: - ----- - -ls /var/rudder/inventories/incoming/ - ----- - -===== Process incoming inventories - -On the next run of the CFEngine agent on Rudder Root Server, the new inventory -will be detected and sent to the Inventory Endpoint. The inventory will be then -moved in the directory of received inventories. The Inventory Endpoint do -its job and the new Node appears in the interface. - -You can force the execution of CFEngine agent on the console: - ----- - -rudder agent run - ----- - -===== Validate new Nodes - -User interaction is required to validate new Nodes. - -===== Prepare policies for the Node - -Policies are not shared between the Nodes for obvious security and -confidentiality reasons. Each Node has its own set of policies. Policies are -generated for Nodes according in the following states: - -. Node is new; - -. Inventory has changed; - -. Technique has changed; - -. Directive has changed; - -. Group of Node has changed; - -. Rule has changed; - -. Regeneration was forced by the user. - -image::graphviz/generate_policy_workflow.png[Generate policy workflow] - - diff --git a/src/reference/modules/ROOT/pages/41_advanced_node_management/15_node_execution_frequency.adoc b/src/reference/modules/ROOT/pages/41_advanced_node_management/15_node_execution_frequency.adoc deleted file mode 100644 index e096fbed..00000000 --- a/src/reference/modules/ROOT/pages/41_advanced_node_management/15_node_execution_frequency.adoc +++ /dev/null @@ -1,26 +0,0 @@ -==== Agent execution frequency on nodes - -===== Checking configuration (CFEngine) - -By default, Rudder is configured to check and repair configurations using the CFEngine -agent every 5 minutes, at 5 minutes past the hour, 10 minutes past the hour, -etc. - -The exact run time on each machine will be delayed by a random interval, in -order to "smooth" the load across your infrastructure (also known as "splay -time"). This reduces simultaneous connections on relay and root servers (both -for the CFEngine server and for sending reports). - -See xref:41_advanced_node_management/10_node_management.adoc#_change_the_agent_run_schedule[Change the agent run schedule] Section to see how to configure it - - -===== Inventory (FusionInventory) - -The FusionInventory agent collects data about the node it's running on such as -machine type, OS details, hardware, software, networks, running virtual -machines, running processes, environment variables... - -This inventory is scheduled once every 24 hours, and will happen in between -0:00 and 5:00 AM. The exact time is randomized across nodes to "smooth" the -load across your infrastructure. - diff --git a/src/reference/modules/ROOT/pages/41_advanced_node_management/20_extend_node_inventory.adoc b/src/reference/modules/ROOT/pages/41_advanced_node_management/20_extend_node_inventory.adoc deleted file mode 100644 index 1147af8f..00000000 --- a/src/reference/modules/ROOT/pages/41_advanced_node_management/20_extend_node_inventory.adoc +++ /dev/null @@ -1,112 +0,0 @@ -[[extend-nodes-inventory, Extend node inventory]] - -=== Extend node inventory - - -It is quite common to need to gather information on your nodes that are not present -in the standard Rudder inventory information. - -As of Rudder 4.3.0, you can get more information about a node thanks to -`inventory hooks`. These information will be available as standard *node properties*. - - -==== Overview - -On the node, you create `inventory hooks` executable and place them in `/var/rudder/hooks.d`. -These binaries are executed in the alphanumerical order, only if executable, and their output is checked to -ensure that it is proper JSON. - -For example, one hook can output: - ----- - -{ - "my_prop1": ["a", "json", "array"], - "my_prop2": {"some": "more", "key": "value"} -} - ----- - -When the node inventory is processed server side, the node properties will get new values, one per -first-level key of all hooks. - -These node properties are marked as "provided by inventory" and can not be deleted nor overwritten. -Appart from that characteristic, they are normal node properties that can be used to create group, or as -variables in Directives parameters. - -==== Creating a node inventory hook - -An inventory hook can be any kind of executable that can be called without parameters, from a shell script to a C program. - -Hooks are located in directory `/var/rudder/hooks.d`. You may need to create that directory the first time you want to add hooks: - ----- - -mkdir /var/rudder/hooks.d - ----- - -They need to be executable by rudder agent. - -For example, this hook will create a new "hello_inventory" node property: - ----- - -% cd /var/rudder/hooks.d - -% cat < hello-world -#!/bin/sh -echo '{"hello_inventory": "a simple string value from inventory"}' -EOF - -% chmod +x hello-world - -% rudder agent inventory - ----- - -And then, after the server has processed the inventory, the node (here with ID '74d10806-b41d-4575-ab86-8becb419949b') has the corresponding property: - ----- -% curl -k -H "X-API-Token: ......" -H "Content-Type: application/json" -X GET 'https://..../rudder/api/latest/nodes/74d10806-b41d-4575-ab86-8becb419949b?include=minimal,properties' | jq '.' -{ - "action": "nodeDetails", - "id": "74d10806-b41d-4575-ab86-8becb419949b", - "result": "success", - "data": { - "nodes": [ - { - "id": "74d10806-b41d-4575-ab86-8becb419949b", - .... - "properties": [ - { - "name": "hello_inventory", - "value": "a simple string value from inventory", - "provider": "inventory" - } - ] - } - ] - } -} ----- - - -==== Overriding - -If two hooks provide the same first-level key, then the last executed hook values for that key are kept. - -You should always use the first level keys as a namespace for your hooks to avoid unwanted overriding. - - -==== Inventory XML format - -Properties comming from inventory hooks are stored in a tag named ``. The tag contains a -JSON array with all the inventory hook properties merged: - ----- - -[{ "key1" : "values"},{ "key2" : "values"}] - ----- - diff --git a/src/reference/modules/ROOT/pages/41_advanced_node_management/30_node_lifecycle.adoc b/src/reference/modules/ROOT/pages/41_advanced_node_management/30_node_lifecycle.adoc deleted file mode 100644 index 8e45c205..00000000 --- a/src/reference/modules/ROOT/pages/41_advanced_node_management/30_node_lifecycle.adoc +++ /dev/null @@ -1,51 +0,0 @@ -[[node-lifecycle, Node Lifecycle]] - -=== Node Lifecycle - -Imagine you have a node that you must disconnect for a maintenance period. -You know what is happening on the node, and during the maintenance period, -you don't want that the Rudder shows up the node as `Not responding` -and trigger alert on global compliance level. - -An other common use case is to be able to set specific policies for nodes -just after acceptation that are used for provisioning, or just before -node end of life to clean it up. - -In Rudder 4.3, we introduced a way to manage the Node lifecycle, for both of theses uses cases: - -* nodes disconnected from Rudder Server can be excluded from policy generation and Compliance with the `Ignored` state, -* the main states of a system life can be applied with the 4 states `Initializing`, `Enabled`, -`Preparing End of Life` and `Empty policies`. - - -image::node-lifecycle-settings.png[] - - -States `Ignored` and `Empty policies` automatically changes the policy generation and compliance: - -* `Ignored` prevents any new policy generation for the Nodes in this states. -* `Empty policies` generates a minimal set of policies, only to manage the Rudder Agent itself. - -Both states remove the nodes from the compliance. - -Nodes with non-default state appears with a label next to their name in the nodes list to show their -states, and their compliance doesn’t show up in `Ignored` nor `Empty policies` mode. You can filter by -node lifecycle state in that list with the common `Filter` input field. - -image::node-lifecycle-nodelist.png[] - - -Node with a given lifecycle state can be searched thanks to the quicksearch tool in Rudder status -bar. That state can also be used to construct groups (`Node state` attribute of `Node summary`) -and they also show up in the API responses concerning node information. - -Finally, the default state for a Node can be configured in the Settings page, to define in which -mode accepted Nodes use. - -image::node-lifecycle-settings.png[] - - -In the future, these states will be configurable on a per node basis at acceptation, and the -lifecycle states list will be configurable by users. - - diff --git a/src/reference/modules/ROOT/pages/42_advanced_configuration_management/00_intro.adoc b/src/reference/modules/ROOT/pages/42_advanced_configuration_management/00_intro.adoc deleted file mode 100644 index 3d7a55ae..00000000 --- a/src/reference/modules/ROOT/pages/42_advanced_configuration_management/00_intro.adoc +++ /dev/null @@ -1,2 +0,0 @@ -== Advanced configuration - diff --git a/src/reference/modules/ROOT/pages/42_advanced_configuration_management/30_server_policy_generation.adoc b/src/reference/modules/ROOT/pages/42_advanced_configuration_management/30_server_policy_generation.adoc deleted file mode 100644 index ae4aa9ef..00000000 --- a/src/reference/modules/ROOT/pages/42_advanced_configuration_management/30_server_policy_generation.adoc +++ /dev/null @@ -1,36 +0,0 @@ -=== Policy generation - -Each time a change occurs in the Rudder interface, having an impact on the -policy needed by a node, it is necessary to regenerate the modified -promises for every impacted node. By default this process is launched after each -change. - -The process of policy generation: - -* Use configured policies and information about the nodes to generate - the files defining the policy that reflects the desired state -* Compute and store expected reports that will be produced when executing these policies -* Check the validity of the generated policies -* Replace the old version of the policies by the new one for impacted node -* Restart the policy server on the Rudder central server is authorizations have changed - -image::objects-used-in-generation.png[Objects and parameters used in policy generation] - -You can customize some of these actions and add new ones using the xref:42_advanced_configuration_management/60_server_event_hooks.adoc#_server_event_hooks[Server Event Hooks]. - -image::policy_generation.png[Status of policy generation] - -==== +Update policies+ button - -The button +Update policies+ on the top right of the screen, in the +Status+ menu, allows you to force -the regeneration of the policies. As changes in the inventory of the nodes are -not automatically taken into account by Rudder, this feature can be useful -after some changes impacting the inventory information. - -==== +Regenerate all policies+ button - -The button +Regenerate all policies+ on the top right of the screen, in the +Status+ menu, allows you to force -the regeneration of all policies. It will clear all internal caches, and force a complete -computation of the policies. This is generally useful to make sure everything is correct after a problem -on the central server. - diff --git a/src/reference/modules/ROOT/pages/42_advanced_configuration_management/30_technique_creation.adoc b/src/reference/modules/ROOT/pages/42_advanced_configuration_management/30_technique_creation.adoc deleted file mode 100644 index 02252d33..00000000 --- a/src/reference/modules/ROOT/pages/42_advanced_configuration_management/30_technique_creation.adoc +++ /dev/null @@ -1,305 +0,0 @@ -=== Technique creation - -Rudder provides a set of pre-defined Techniques that cover some basic -configuration and system administration needs. You can also create your own -Techniques, to implement new functionalities or configure new services. This -paragraph will walk you through this process. - -There is two ways to configure new Techniques, either thanks to the web -Technique Editor in Rudder or by coding them by hand. - -The use of the Technique Editor (code name: http://www.ncf.io/pages/ncf-builder.html[ncf-builder]) -is the easiest way to create new Techniques and is fully integrated with Rudder. On the other hand, -it does not allow the same level of complexity and expressiveness than coding a Technique by hand. -Of course, coding new Techniques by hand is a more involved process that needs to learn how the -Technique description language and Technique reporting works. - -We advice to always start to try to create new Techniques with the Technique Editor and switch to -the hand-coding creation only if you discover specific needs not addressed that way. - -==== Recommended solution: Technique Editor - -The easiest way to create your own Techniques is to use the Technique editor, -a web interface to create and manage Techniques based on the ncf framework. - -Creating a technique in the Technique Editor will generate a Technique for Rudder automatically. -You can then use that Technique to create a Directive that will be applied on your Nodes thanks -to a Rule. - -For more information about ncf and the Technique editor, you can visit: http://www.ncf.io/ - -===== Using the Technique Editor - -The Technique Editor is available in the Directive screen or directly in the Utilities menu. -Once on the Technique Editor, creating a Technique simply consist to add desired "Generic Methods" -building block and configure them. - -When the Technique match your expectations, hitting save will automatically add it to available -Technique in the Directive screen of Rudder (in the "User Technique" category). - - -===== Logs - -In case of any issue with the Technique Editor, the first step should always be to look for its log messages. -These logs are sent to Apache system error logs: - -- On Debian, by default: `/var/log/apache2/error.log` -- On RHEL, by default: `/var/log/httpd/error_log` - - -==== Understanding how Technique Editor works - -In this chapter, we are giving an overview about how the Technique Editor works and how it is -integrated with the main Rudder application. - -===== Directory layout - -As explained in http://www.ncf.io/, ncf uses a structured directory tree composed of several layers of logic, -from internal libraries to Techniques and user services. All the files and logic in these folders will be named -"library" for simplicity - -ncf directory structure exists in two root folders: - -* `/usr/share/ncf/tree` -** This is the standard library installation folder. It is created and updated by the - the ncf package. This folder will be completely overwritten when you update ncf - package so you should never modify anything here: it will be lost at some point. - -* `/var/rudder/configuration-repository/ncf` -** This is were you add your own ncf Generic Methods and Techniques. - Techniques created with the Technique Editor will be located here, and both - Generic and Techniques in that place will be accessible in the Technique Editor - alongside what is provided by the standard library. - -==== Sharing ncf code with nodes - -To share those folders to all nodes, Rudder makes a copy of these folders in two -places: - -* `/var/rudder/ncf`, for part common to all nodes - so NOT techniques, - ** `/var/rudder/ncf/local` is a copy of node-independant directories from - `/var/rudder/configuration-repository/ncf`, so almost everything *BUT* - `/var/rudder/configuration-repository/ncf/50_techniques`. - ** `/var/rudder/ncf/common` is a copy `/usr/share/ncf/tree` -* `/var/rudder/share/xxxx-yyyy-node-id-zzzz/rules/cfengine-community/Technique_Name/1.0/Technique_Name.cf` - for techniques, with one directory for each technique applied to the node. -* `/var/rudder/share/xxxx-yyyy-node-id-zzzz/rules/cfengine-community/rudder_expected_reports.csv` - contains information about report expected for all ncf techniques applied to that node. - - - -Files in `/var/rudder/ncf` are synchronized automatically by the "rudder agent update" -command when the agent runs on the server. So any modification done in files -in these directories will be lost at the next synchronization. - -Files under `/var/rudder/share/` are updated during policy generation. - -A node updates its ncf local library by copying the content of these two folders -during its promise update phase. - -===== From ncf Technique Editor to Rudder Techniques and back - -Here we will explain how the Technique Editor integration to Rudder is done to -transform ncf techniques into full fledge Rudder one. We will also get the -big picture of the web flow and the resulting events triggered on Rudder servier -side. - -Each action in the Technique Editor interface produces requests to an API defined over ncf. - -All of the requests are authenticated thanks to a token passed in the JSESSIONID header. -The token is generated when an authenticated user is connected to the Rudder interface -(typically thanks to his browser). - -That token is shared to the Technique Editor interface, which itself passes the -JSESSIONID header to all requests. - -If you have authentication issue, check that your Rudder session is not expired. - -Get request:: - -Get request will get all Techniques and Generic Methods in a path passed as parameters of the -request in the "path" javascript variable: - -https://your.rudder.server/ncf-builder/#!?path=/var/rudder/configuration-repository/ncf - -Get requests are triggered when accessing Technique editor. - -The ncf API will parse all files in the parameter path by running "cf-promises -pjson" on all Techniques, -checking that all Techniques are correctly formed. - -The ncf API will also look to all Generic Methods description data to build the catalog of available -Generic Methods. - -The resulting information are sent back to the Technique Editor for displaying. - -Post requests:: - -Post requests are issued when a Technique is created, modified or deleted. -They will only work on Techniques available in the path given in parameter. - -They are triggered when clicking on save/delete button. - -The main difference with get requests is that hooks are launched before and after the action is made. - -We will see all hooks behavior in the following dedicated hooks section. - -===== Hooks - -On each POST request, pre- and post- hooks are executed by the Technique Editor. -These hooks are used for the Rudder integration to help transform pure ncf Techniques into Rudder one. - -- pre-hooks are located in: `/var/rudder/configuration-repository/ncf/pre-hooks.d` -- post-hooks are located in: `/var/rudder/configuration-repository/ncf/post-hooks.d` - -As of March 2015, we have two post-hooks defined and no pre-hooks: - -* `post.write_technique.commit.sh` -** It commits the Technique newly created into Rudder Git configuration repository -located in `/var/rudder/configuration-repository`. -* `post.write_technique.rudderify.sh` -** It generates a valid Rudder Technique from a the newly created Technique and reloads Rudder -Technique Library so that updates are taken into account. - -If you want to run post hooks by hand, you can use the following command: - - /var/rudder/configuration-repository/ncf/post-hooks.d/post.write_technique.commit.sh /var/rudder/configuration-repository bundle_name - -==== Create Technique manually - -===== Prerequisite - -To create a Technique, you'll need a few things: - -CFEngine knowledge:: Rudder's Techniques are implemented using CFEngine. -Rudder takes care of a lot of the work of using CFEngine, but you'll need to -have a reasonable understanding of the CFEngine syntax. - -Rudder installation for testing:: To be able to test your new Technique, -you'll need a working Rudder installation (at least a server and a node). - -Text editor:: The only other tool you need is your favorite text editor! - -===== Define your objective - -Before starting to create a new Technique, have you checked that it doesn't -already exist in Rudder? The full list of current Techniques is available from -GitHub, at http://github.com/normation/rudder-techniques/[GitHub rudder-techniques repository]. - -OK, now we've got that over with, let's go on. - -A Technique should be an abstract configuration. This means that your Technique -shouldn't just configure something one way, but instead it should implement -*how* to configure something, and offer options for users to choose what way -they want it configured. Before starting, make sure you've thought through what -you want to create. - -Here's a quick checklist to help: - -* Do you need to install packages? -* Do you need to create or edit configuration files? -* Do you need to copy files from a central location? -* Do you need to launch processes or check that they're running? -* Do you need to run commands to get things working? - -Once you've made a list of what needs doing, consider what options could be -presented in the user interface, when you create a Directive from your new -Technique. Intuitively, the more variables there are, the more flexible your -Technique will be. However, experience shows that making the Technique *too* -configurable will actually make it harder to use, so a subtle balance comes in -to play here. - -At this stage, make a list of all the variables that should be presented to -users configuring a Directive from your Technique. - -===== Initialize your new Technique - -The simplest way to create a new Technique and be able to test it as you work is -to start on a Rudder server. Open a terminal and connect to your Rudder server -by ssh, and cd into the directory where Techniques are stored: - ----- - -cd /var/rudder/configuration-repository/techniques - ----- - -Under this directory, you'll find a set of categories, and sub-categories. -Before creating your Technique, choose a category to put it in, and change to -that directory. For example: - ----- - -cd applications - ----- - -You can consult the description of each category by looking at the -+category.xml+ file in each directory. For this example: - ----- - -cat category.xml - ----- - -Will output: - ----- - - - Application management - This category contains Techniques designed to install, - configure and manage applications - - ----- - -Once you've decided on a category, it's time to create the basic skeleton of -your Technique. The technical name for your Technique is it's directory name, so -choose wisely: - ----- - -mkdir sampleTechnique - ----- - -All directories under this one are version numbers. Let's start with a simple -1.0 version. From now on, we'll work in this directory. - ----- - -mkdir sampleTechnique/1.0 -cd sampleTechnique/1.0 - ----- - -Now, you need a minimum of two files to get your Technique working: - -metadata.xml:: This file describes the Technique, and configures how it will be -displayed in the web interface. - -st files:: These files are templates for CFEngine configuration files. You need -at least one, but can have as many as you like. Rudder processes them to -generate .cf files ready to be used by CFEngine. - -To get started, copy and paste these sample files, or download them from GitHub: - -+metadata.xml+ (original file: -https://github.com/normation/rudder-techniques/blob/master/technique-metadata-sample.xml[+technique-metadata-sample.xml+]) - ----- - -include::technique-metadata-sample.xml - ----- - -+sample_technique.st+ (original file: -https://github.com/normation/rudder-techniques/blob/master/technique-st-sample.xml[+technique-st-sample.xml+]) - ----- - -include::technique-st-sample.xml - ----- - diff --git a/src/reference/modules/ROOT/pages/42_advanced_configuration_management/40_node_properties.adoc b/src/reference/modules/ROOT/pages/42_advanced_configuration_management/40_node_properties.adoc deleted file mode 100644 index 3ddfd545..00000000 --- a/src/reference/modules/ROOT/pages/42_advanced_configuration_management/40_node_properties.adoc +++ /dev/null @@ -1,174 +0,0 @@ -[[_node_properties]] -=== Node properties - -Node properties can be found in the "properties" tab of each node in Rudder. - -Node properties can be modified using Rudder's API, see http://www.rudder-project.org/rudder-api-doc/#api-Nodes-updateNodeProperties - -Properties can also be defined on the node itself, to override locally properties. - -Each property is a key=value pair. The value can be a string or a well-formatted JSON data structure. - -Some examples: -`datacenter=Paris` -`datacenter= { "id": "FRA1", "name": "Colo 1, Paris", "location": "Paris, France", "dns_suffix": "paris.example.com" }` - - -==== Using properties - -You can use node properties almost everywhere in Rudder: - -* in directive parameters -* in the technique editor -* in your own techniques and generic methods - -To use a property, simply use the variable node.properties with the variable call syntax. - -Example with a property named 'datacenter': - ----- - -${node.properties[datacenter]} - ----- - -WARNING: Before Rudder 3.1.14 and 3.2.7, node properties could not be used in JavaScript expressions (see following section), since they are evaluated during policy generation and node properties were only made available to agents at runtime. Since Rudder 3.1.14, 3.2.7 and 4.0.0 and later, you can enable a feature switch in "Administration/Settings" to enable node properties expansion in directive parameters. More details are available at xref:42_advanced_configuration_management/45_node_properties_in_directives.adoc#_node_properties_expansion_in_directives[Node properties expansion in directives]. - -In a mustache template, use: - ----- - -{{{vars.node.properties.datacenter}}} - ----- - -==== Local override - -The agent searches for optionnal properties files `/var/rudder/local/properties.d/*.json`, and will override existing properties. - -As a result, if you have node properties defined server side as -`"sysctls_postgresql":{"kernel.shmall":"903330","kernel.shmmax":"3700041320"}` and -`"vm":{"vm.dirty_ratio":"10"}` - -and a local property file `/var/rudder/local/properties.d/postgresql_config.json` as - ----- - -{ - "properties": - { - "sysctls_postgresql": { - "kernel.shmmax":"5368709120" - } - } - -} - ----- - -The resulting properties will be: - -`"sysctls_postgresql":{"kernel.shmmax":"5368709120"}` and -`"vm":{"vm.dirty_ratio":"10"}` - -`sysctls_postgresql` has been overriden by local property, and `vm` has been left untouched. -Note that it is an override, as the semantic of merging is not deterministic with literal values, and it does not allow to unset values. If you need to merge, please refer to the next paragraph. - - -==== Merging properties - -If you want to merge server defined properties with local defined properties, rather than override them, you will need to use the generic method variable_dict_merge_tolerant to define which variables you need to merge, and define the local variables in a different namespace than properties. - -For instance, if you have defined in the node properties the following properties - -`"sysctls_postgresql":{"kernel.shmall":"903330","kernel.shmmax":"3700041320"}` - -and you wish to merge these values on a node with locally defined variable, to change the value of kernel.shmmax and set the value of kernel.shmmni, you can define the file /var/rudder/local/properties.d/postgresql_config.json with the following content - ----- - -{ - "local_properties": - { - "sysctls_postgresql": { - "kernel.shmmax":"5368709120", - "kernel.shmmni":"4096" - } - } - -} - ----- - -and use the generic method `variable_dict_merge_tolerant` to merge `node.properties[sysctls_postgresql]` and `node.local_properties[sysctls_postgresql]`, and set the result in merged_properties.sysctls_postgresql (for instance): `variable_dict_merge_tolerant("merged_properties", "sysctls_postgresql", "node.properties[sysctls_postgresql]", "node.local_properties[sysctls_postgresql]")` - -As a result, merged_properties.sysctls_postgresql will contain - ---- - -"sysctls_postgresql": { - "kernel.shmall":"903330", - "kernel.shmmax":"5368709120", - "kernel.shmmni":"4096" -} - - ---- - - - -==== Under the hood - -On the server, one or more properties files are written for each node in the -`/var/rudder/share//rules/cfengine-community/properties.d/` directory. -This directory is then copied to each node by the agent with all other promise files. - -In the agent, properties are made available in the `node.` container that contains the values. -Those values are read from -`/var/rudder/cfengine-community/inputs/properties/*.json`. All files are taken -in order and override the previous ones - the last one wins. - -The agent searches for optionnal properties files `/var/rudder/local/properties.d/*.json`, and will define variables -or override existing properties. - -Each file must contain at least 2 levels of JSON content, the first level is the namespace level -and the second level is the key level. - -The namespace name must be an ASCII name that doesn't start with `_` and must -match the following regex: `[a-zA-Z0-9][a-zA-Z0-9_]*` - -For example: - ----- - -{ - "properties": - { - "datacenter": "Paris", - "environment": "production", - "customer": "Normation" - } -} - ----- - -The merge is a first level merge done at the namespace level. This means that: - -* a key in a namespace is fully overridden by the same key in the same namespace in a later file. -* a key in a namespace is never overriden by the same key in a different namespace -* a key that is overriden never retains original data even if it is a data container itself - -The result key is available in the `node.` data variable. A usage -example: - ----- -${node.properties[datacenter]} ----- - -To get the original data (for debug only) there is the -`properties.property_` variable. A usage example: - ----- -${properties.property__var_rudder_cfengine_community_inputs_properties_d_properties_json[properties][datacenter]} ----- - diff --git a/src/reference/modules/ROOT/pages/42_advanced_configuration_management/45_node_properties_in_directives.adoc b/src/reference/modules/ROOT/pages/42_advanced_configuration_management/45_node_properties_in_directives.adoc deleted file mode 100644 index bf4c3f24..00000000 --- a/src/reference/modules/ROOT/pages/42_advanced_configuration_management/45_node_properties_in_directives.adoc +++ /dev/null @@ -1,95 +0,0 @@ -[[_node_properties_expansion_in_directives]] -=== Node properties expansion in directives - -It is possible to use properties defined on nodes to build Directive values. The -resulting values will be computed during policy generation, and can therefore -provide unique values for each node or be used in JavaScript expressions. - -Properties on nodes are defined using Rudder's REST API, with the 'Update Node properties' API call. -More details in our http://www.rudder-project.org/rudder-api-doc[API documentation]. - -Properties can also be defined directly on the nodes, by creating properties files -`/var/rudder/local/properties.d/*.json/` - - -==== Feature availability - -This feature was introduced in Rudder 3.1.14, Rudder 3.2.7 and Rudder 4.0.0. - -If you upgraded to 3.1.14 (or a later 3.1.x version) or 3.2.7 (or a later 3.2.x -version) from a previous Rudder version, this feature is disabled by default -in order to mitigate any risk of undesired side effects on existing -installations. You can enable it in the Administration/Settings page, using the -*Enable node properties expansion in Directives* switch. - -Rudder installations from 4.0.0 onwards have this feature enabled by default. - -==== Usage - -In any directive text field, you can access properties defined on nodes using the following syntax: - ----- - -${node.properties[property_name][key_one][key_two]} - ----- - - -where: - -- `property_name` is the name of the property defined via the API -- `key_one` and `key_two` are keys in the JSON structure -- the value obtained is the string representation, in compact mode, of the entire node property or sub-structure of the JSON value -- if the key is not found, an error will be raised that will stop policy generation -- spaces are authorized around separators ([,],|,}..) - -===== Providing a default value - -Most of the time, you will need to provide a default value to node properties expansion to avoid a policy generation -error due to missing node properties. -This is also a good case to allow a simple override mechanism for a parameter where only some nodes have a specific value. - -You can also use other node properties, or other Rudder parameters as defaults, using the same syntax as above. - -Some examples: - ----- - -${node.properties[datacenter][id] | default = "LON2" } -${node.properties[datacenter][name] | default = """Co-location with "Hosting Company" in Paris (allows quotes)""" } -${node.properties[datacenter][id] | default = ${rudder.param.default_datacenter} } -${node.properties[netbios_name] | default = ${rudder.node.hostname} } -${node.properties[dns_suffix] | default = ${node.properties[datacenter][dns_suffix] | default = "${rudder.node.hostname}.example.com" } - -#or even use cfengine variables in the default -${node.properties[my_override] | default = "${cfengine.key}"} - ----- - -===== Forcing expansion on the node - -In some cases, you will want to use a `${node.properties[key]}` in a directive parameter, but you don't want to expand it during -policy generation on the Rudder server, but instead let the value be expanded during the agent run on the node. Typically if the value is to be used by a templating -tool, or if the value is known only on the node. - -For these cases, you can add the "node" option to the property expression: - ----- - -${node.properties[datacenter][id] | node } - ----- - -This will be rewritten during policy generation into: - ----- - -${node.properties[datacenter][id]} - ----- - -Which will be considered as a standard variable by the agent, which will replaced this expression by its value if it's defined, or kept as is if it's unknown. - -The variable content is read from `/var/rudder/cfengine-community/inputs/properties.d/properties.json`, and from the optionally defined `/var/rudder/local/properties.d/*.json` files. -You can find more information on node properties in xref:42_advanced_configuration_management/40_node_properties.adoc#_node_properties[node properties documentation]. - diff --git a/src/reference/modules/ROOT/pages/42_advanced_configuration_management/50_script_in_directives.adoc b/src/reference/modules/ROOT/pages/42_advanced_configuration_management/50_script_in_directives.adoc deleted file mode 100644 index 5ed44185..00000000 --- a/src/reference/modules/ROOT/pages/42_advanced_configuration_management/50_script_in_directives.adoc +++ /dev/null @@ -1,149 +0,0 @@ -=== JavaScript evaluation in Directives - -It is possible to use JavaScript expressions to build Directive values. The -resulting values will be computed during policy generation, and can therefore -provide unique values for each node. - -==== Feature availability - -This feature was introduced in Rudder 3.1.12, Rudder 3.2.5 for password fields -only, and generalized for all fields in Rudder 3.1.14, Rudder 3.2.7 and Rudder 4.0. - -If you upgraded to 3.1.12 (or a later 3.1.x version) or 3.2.5 (or a later 3.2.x -version) from a previous Rudder version, this feature is disabled by default -in order to mitigate any risk of undesired side effects on existing -installations. You can enable it in the Administration/Settings page, using the -*Enable script evaluation in Directives* parameter. - -Rudder installations from 4.0 onwards have this feature enabled by default. - -==== Usage - -All standard JavaScript methods are available, and a Rudder-specific -library, prefixed with `rudder.` also provides some extra utilities. This -library is documented below. - -For example, to get the first 3 letters of each node's hostname, you can write: ----- -"${rudder.node.hostname}".substring(0,3) ----- - -[TIP] - -[[limits-of-scripts, Limitation of the scripting language]] - -.Limitation of the scripting language - -==== - -JavaScript expressions are evaluated in a sandboxed JavaScript environment. It has some -limitations, such as: - -* It cannot write on the filesystem -* Scripts are killed after 5 seconds of execution, to prevent overloading the system - -==== - -==== Rudder utility library - -===== Standard hash methods - -The following methods allow to simply hash a value using standard algorithms: - -* `rudder.hash.md5(string)` -* `rudder.hash.sha256(string)` -* `rudder.hash.sha512(string)` - -These methods do not use a salt for hashing, and as such are not suitable for -distributing passwords for user accounts on UNIX systems. See below for a -preferable approach for this. - -===== UNIX password-compatible hash methods - -The following methods are specially designed to provided hashes that can be -used as user passwords on UNIX systems (in `/etc/shadow`, for example). Use -these if you want to distribute hashes of unique passwords for each of your -nodes, for example. - -Two different cases exist: support for generic Unix-like systems (Linux, BSD, -...) and support for AIX systems (which use a different hash algorithm). - -Available methods are: - -* `rudder.password.auto(algorithm, password [, salt])` -* `rudder.password.unix(algorithm, password [, salt])` -* `rudder.password.aix(algorithm, password [, salt])` - -The parameters are: - -* `algorithm` can be "MD5", "SHA-512", "SHA512", "SHA-256", "SHA256" (case insensitive) -* `password` is the plain text password to hash -* `salt` is the optional salt to use in the password (we *strongly* recommend providing this value - see warning below) - -The `unix` method generates Unix crypt password compatible hashes (for use on -Linux, BSD, etc), while the `aix` method generates AIX password compatible -hashes. The `auto` method automatically uses the appropriate algorithm for -each node type (AIX nodes will have a AIX compatible hash, others will -have a Unix compatible hash). We recommend always using `auto` for simplicity. - -For example, to use the first 8 letters of each node's hostname as a password, -you could write: ----- -rudder.password.auto("SHA-256", "${rudder.node.hostname}".substring(0,8), "abcdefg") ----- - -[WARNING] - -.Providing a salt - -==== - -It is strongly recommended to provide a *salt* to the methods above. If no -salt is provided, a random salt is created, and will be recreated at each -policy generation, causing the resulting hashes to change each time. This, in -turn, will generate an unnecessary "repaired" status for the password component on all nodes -at each policy generation. - -==== - -[TIP] - -.JVM requirements - -==== - -This features is tested only on HotSpot 1.7 and 1.8, OpenJDK 1.7 and 1.8, -IBM JVM 1.7 and 1.8. - -==== - -[TIP] - -.JVM requirements for AIX password hashes - -==== - -AIX password generation depends on the availability of *PBKDF2WithHmacSHA256* and -*PBKDF2WithHmacSHA512* in the JVM. These algorithms are included by default on -HotSpot 1.8 and OpenJDK 1.8 and upward. In the case where your JVM does not support these -algorithms, typically on an IBM JDK or a JVM 1.7 version of HotSpot and OpenJDK, the hashing -algorithm falls back to *SHA1* with *PBKDF2WithHmacSHA1*, and an error message will be -logged. You can also check your JVM editor manual to add support for these algorithms. - -==== - -==== Status and future support - -In a future version of Rudder, JavaScript evaluation will be supported in all -fields in Directives. - -In the meantime, you can already test this functionality out by entering a JavaScript -expression in any Directive field, prefixed by "evaljs:". Please be aware that -this is unsupported and untested, so do this at your own risk. - -If you do encounter any issues, please get in touch or open a ticket - we'd -love to hear about them! - -There is currently no plan to extend this support to the fields in the -Technique editor. - diff --git a/src/reference/modules/ROOT/pages/42_advanced_configuration_management/60_server_event_hooks.adoc b/src/reference/modules/ROOT/pages/42_advanced_configuration_management/60_server_event_hooks.adoc deleted file mode 100644 index 43cd8895..00000000 --- a/src/reference/modules/ROOT/pages/42_advanced_configuration_management/60_server_event_hooks.adoc +++ /dev/null @@ -1,7 +0,0 @@ -[[_server_event_hooks]] -=== Server Event Hooks - -Rudder 4.1 introduces the possibility to execute files (hooks), typically scripts, -when some predefined event occurs on Rudder. - -include::{partialsdir}/dyn/hooks.adoc[leveloffset=+3] diff --git a/src/reference/modules/ROOT/pages/42_advanced_configuration_management/70_new_directives_default_name.adoc b/src/reference/modules/ROOT/pages/42_advanced_configuration_management/70_new_directives_default_name.adoc deleted file mode 100644 index de535ef9..00000000 --- a/src/reference/modules/ROOT/pages/42_advanced_configuration_management/70_new_directives_default_name.adoc +++ /dev/null @@ -1,55 +0,0 @@ -=== New directives default naming scheme - -When a new directive is created, by default the 'Name' field is filled -with the Technique name. For example, if you create a new Directive from -the 'Users' Technique, the Name field will get the value: "Users". - -This not always what you want, especially for your custom Techniques. So you -have the possibility to define new default values for Name, at Technique or -at Technique and Version granularity. - -This is done by adding or updating the file: -`/var/rudder/configuration-repository/techniques/default-directive-names.conf`. - -That file need to be commited in git, and the Technique library reloaded -to take effect: - ----- -cd /var/rudder/configuration-repository/techniques/ -vi default-directive-names.conf - .... -git add default-directive-names.conf -git commit -m "Change default names for new directives" -rudder server reload-techniques ----- - -The file format is a simple `techniqueId[/optionalVersion]: default name to use` format. -The Technique ID is the name of the directory containing the Technique version directory -in `/var/rudder/configuration-repository/techniques`. - -For example, if we imagine that in your company, you have the internal -convention to create one directive by user role with the login in the -name, you would prefer to have a default value to: - ----- -Role : ----- - -And then, for Users Technique version 7, you changed your mind and now -use the scheme: - ----- -Role: [user-role] (with login [login]) ----- - -Then the file will look like: - ----- -# Default pattern for new directive from "userManagement" technique: -userManagement= Role : - -# For userManagement version 2.0, prefer that pattern in new Directives: -userManagement/7.0: Role: [user-role] (with login [login]) ----- - - diff --git a/src/reference/modules/ROOT/pages/43_advanced_administration/00_administration_intro.adoc b/src/reference/modules/ROOT/pages/43_advanced_administration/00_administration_intro.adoc deleted file mode 100644 index 0762e282..00000000 --- a/src/reference/modules/ROOT/pages/43_advanced_administration/00_administration_intro.adoc +++ /dev/null @@ -1,5 +0,0 @@ -== Advanced administration - -This chapter covers more advanced administration task of Rudder services. - - diff --git a/src/reference/modules/ROOT/pages/43_advanced_administration/10_database_maintenance.adoc b/src/reference/modules/ROOT/pages/43_advanced_administration/10_database_maintenance.adoc deleted file mode 100644 index 0d7ae3f2..00000000 --- a/src/reference/modules/ROOT/pages/43_advanced_administration/10_database_maintenance.adoc +++ /dev/null @@ -1,98 +0,0 @@ -[[_database_maintenance]] -=== Database maintenance - -Rudder uses two backends to store information as of now: LDAP and SQL - -To achieve this, OpenLDAP and PostgreSQL are installed with Rudder. - -However, like every database, they require a small amount of maintenance -to keep operating well. Thus, this chapter will introduce you to the basic -maintenance procedure you might want to know about these particular database -implementations. - -==== Automatic PostgreSQL table maintenance - -Rudder uses an automatic mechanism to automate the archival and pruning of the reports -database. - -By default, this system will: - -* Archive reports older that 3 days (30 in Rudder 2.6) -* Remove reports older than 90 days - -It thus reduces the work overhead by only making Rudder handle relevant reports (fresh enough) -and putting aside old ones. - -This is obviously configurable in /opt/rudder/etc/rudder-web.properties, by altering the following -configuration elements: - -* rudder.batch.reportscleaner.archive.TTL: Set the maximum report age before archival -* rudder.batch.reportscleaner.delete.TTL: Set the maximum report age before deletion - -The default values are OK for systems under moderate load, and should be adjusted in case of -excessive database bloating. - -The estimated disk space consumption, with a 5 minute agent run frequency, is 150 to 400 kB per Directive, -per day and per node, which is roughly 5 to 10 MB per Directive per month and per node. - -Thus, 25 directives on 100 nodes, with a 7 day log retention policy, would take 2.5 to 10 GB, and -25 directives on 1000 nodes with a 1 hour agent execution period and a 30 day log retention policy -would take 9 to 35 GB. - -==== PostgreSQL database vacuum - -In some cases, like a large report archiving or deletion, the Rudder interface -will still display the old database size. This is because even if the database has been -cleaned as requested, the physical storage backend did not reclaim space on the hard drive, -resulting in a "fragmented" database. This is not an issue, as PostgreSQL handles this automatically, -and new reports sent by the nodes to Rudder will fill the blanks in the database, resulting in a -steady growth of the database. This task is handled by the autovacuum process, which periodically -cleans the storage regularly to prevent database bloating. - -However, to force this operation to free storage immediately, you can trigger a "vacuum full" operation -by yourself, however keep in mind that this operation is very disk and memory intensive, -and will lock both the Rudder interface and the reporting system for quite a long time with a big database. - -[source,python] - -.Manual vacuuming using the psql binary - ----- - -# You can either use sudo to change owner to the postgres user, or use the rudder connection credentials. - -# With sudo: -sudo -u postgres psql -d rudder - -# With rudder credentials, it will ask the password in this case: -psql -u rudder -d rudder -W - -# And then, when you are connected to the rudder database in the psql shell, trigger a vacuum: -rudder=# VACUUM FULL; - -# And take a coffee. - ----- - -==== LDAP database reindexing - -In some very rare case, you will encounter some LDAP database entries that are not indexed and used -during searches. In that case, OpenLDAP will output warnings to notify you that they should be. - -[source,python] - -.LDAP database reindexing - ----- - -# Stop OpenLDAP -service rudder-slapd stop - -# Reindex the databases -service rudder-slapd reindex - -# Restart OpenLDAP -service rudder-slapd restart - ----- - diff --git a/src/reference/modules/ROOT/pages/43_advanced_administration/15_migration_backup_restore.adoc b/src/reference/modules/ROOT/pages/43_advanced_administration/15_migration_backup_restore.adoc deleted file mode 100644 index 16510298..00000000 --- a/src/reference/modules/ROOT/pages/43_advanced_administration/15_migration_backup_restore.adoc +++ /dev/null @@ -1,105 +0,0 @@ -[[_migration_backups_and_restores]] -=== Migration, backups and restores - -It is advised to backup frequently your Rudder installation in case -of a major outage. - -These procedures will explain how to backup your Rudder installation. - -==== Backup - -This backup procedure will operate on the three principal Rudder data sources: - -* The LDAP database -* The PostgreSQL database -* The configuration-repository folder - -It will also backup the application logs. - -[source,python] - -.How to backup a Rudder installation - ----- - -# First, backup the LDAP database: -/opt/rudder/sbin/slapcat -l /tmp/rudder-backup-$(date +%Y%m%d).ldif - -# Second, the PostgreSQL database: -sudo -u postgres pg_dump rudder > /tmp/rudder-backup-$(date +%Y%m%d).sql - -# Or without sudo, use the rudder application password: -pg_dump -U rudder rudder > /tmp/rudder-backup-$(date +%Y%m%d).sql - -# Third, backup the configuration repository: -tar -C /var/rudder -zvcf /tmp/rudder-backup-$(date +%Y%m%d).tar.gz configuration-repository/ cfengine-community/ppkeys/ - -# Finally, backup the logs: -tar -C /var/log -zvcf /tmp/rudder-log-backup-$(date +%Y%m%d).tar.gz rudder/ - -# And put the backups wherever you want, here /root: -cp /tmp/rudder-backup* /root -cp /tmp/rudder-log-backup* /root - ----- - -==== Restore - -Of course, after a total machine crash, you will have your backups at hand, -but what should you do with it ? - -Here is the restoration procedure: - -[source,python] - -.How to restore a Rudder backup - ----- - -# First, follow the standard installation procedure, this one assumes you have a working "blank" -# Rudder on the machine - -# Disable Rudder agent -rudder agent disable - -# Stop Rudder services -service rudder stop - -# Drop the OpenLDAP database -rm -rf /var/rudder/ldap/openldap-data/*.mdb - -# Import your backups - -# Configuration repository -tar -C /var/rudder -zvxf /root/rudder-backup-XXXXXXXX.tar.gz - -# LDAP backup -/opt/rudder/sbin/slapadd -l /root/rudder-backup-XXXXXXXX.ldif - -# Start PostgreSQL -service postgresql start - -# PostgreSQL backup -sudo -u postgres psql -d rudder < /root/rudder-backup-XXXXXXXX.sql -# or -psql -u rudder -d rudder -W < /root/rudder-backup-XXXXXXXX.sql - -# Enable Rudder agent -rudder agent enable - -# And restart the machine or just Rudder: -service rudder restart - ----- - -==== Migration - -To migrate a Rudder installation, just backup and restore your Rudder installation -from one machine to another. - -If your server address changed, you will also have to do the following on -every node that is directly connected to it (managed nodes or relays): - -* Remove the server public key +rm /var/rudder/cfengine-community/ppkeys/root-MD5=*.pub+ -* Modify +/var/rudder/cfengine-community/policy_server.dat+ with the new address, then you can force your nodes to send their inventory by running +rudder agent inventory+ - diff --git a/src/reference/modules/ROOT/pages/43_advanced_administration/20_application_tuning.adoc b/src/reference/modules/ROOT/pages/43_advanced_administration/20_application_tuning.adoc deleted file mode 100644 index cc192338..00000000 --- a/src/reference/modules/ROOT/pages/43_advanced_administration/20_application_tuning.adoc +++ /dev/null @@ -1,306 +0,0 @@ -[[_performance_tuning]] -=== Performance tuning - -Rudder and some applications used by Rudder (like the Apache web server, or Jetty) -can be tuned to your needs. - -[[_reports_retention]] -==== Reports retention - -To lower Rudder server's disk usage, you can configure the retention duration -for node's execution reports in -+/opt/rudder/etc/rudder-web.properties+ file with the options: - -+rudder.batch.reportscleaner.archive.TTL=30+ - -+rudder.batch.reportscleaner.delete.TTL=90+ - -==== Apache web server - -The Apache web server is used by Rudder as a proxy, to connect to the Jetty -application server, and to receive inventories using the WebDAV protocol. - -There are tons of documentation about Apache performance tuning available on the -Internet, but the defaults should be enough for most setups. - -==== Jetty - -The Jetty application server is the service that runs Rudder web application and inventory -endpoint. It uses the Java runtime environment (JRE). - -The default settings fit the basic recommendations for minimal Rudder hardware requirements, -but there are some configuration switches that you might need to tune to obtain better -performance with Rudder, or correct e.g. timezone issues. - -To look at the available optimization knobs, please take a look at +/etc/default/rudder-jetty+ -on your Rudder server. - -==== Java "Out Of Memory Error" - -It may happen that you get java.lang.OutOfMemoryError. -They can be of several types, -but the most common is: "java.lang.OutOfMemoryError: Java heap space". - -This error means that the web application needs more RAM than what was given. -It may be linked to a bug where some process consumed much more memory than -needed, but most of the time, it simply means that your system has grown and needs -more memory. - -You can follow the configuration steps described in the following paragraph. - -[[_configure_ram_allocated_to_jetty]] -==== Configure RAM allocated to Jetty - -To change the RAM given to Jetty, you have to: - ----- - -# edit +/etc/default/rudder-jetty+ with your preferred text editor, for example vim: -vim /etc/default/rudder-jetty - -Notice: that file is alike to +/opt/rudder/etc/rudder-jetty.conf+, which is the file with -default values. +/opt/rudder/etc/rudder-jetty.conf+ should never be modified directly because -modification would be erased by packaging in the following Rudder versuib update. - -# modify JAVA_XMX to set the value to your need. -# The value is given in MB by default, but you can also use the "G" unit to specify a size in GB. - -JAVA_XMX=2G - -# save your changes, and restart Jetty: -service restart rudder-jetty - ----- - -The amount of memory should be the half of the RAM of the server, rounded up to the nearest GB. -For example, if the server has 5GB of RAM, 3GB should be allocated to Jetty. - -[[_optimize_postgresql_server]] -==== Optimize PostgreSQL server - -The default out-of-the-box configuration of PostgreSQL server is really not -compliant for high end (or normal) servers. It uses a really small amount of -memory. - -The location of the PostgreSQL server configuration file is usually: - ----- - -/etc/postgresql/9.x/main/postgresql.conf - ----- - -On a SuSE system: - ----- - -/var/lib/pgsql/data/postgresql.conf - ----- - - - -===== Suggested values on an high end server - ----- -# -# Amount of System V shared memory -# -------------------------------- -# -# A reasonable starting value for shared_buffers is 1/4 of the memory in your -# system: - -shared_buffers = 1GB - -# You may need to set the proper amount of shared memory on the system. -# -# $ sysctl -w kernel.shmmax=1073741824 -# -# Reference: -# http://www.postgresql.org/docs/8.4/interactive/kernel-resources.html#SYSVIPC -# -# Memory for complex operations -# ----------------------------- -# -# Complex query: - -work_mem = 24MB -max_stack_depth = 4MB - -# Complex maintenance: index, vacuum: - -maintenance_work_mem = 240MB - -# Write ahead log -# --------------- -# -# Size of the write ahead log: - -wal_buffers = 4MB - -# Query planner -# ------------- -# -# Gives hint to the query planner about the size of disk cache. -# -# Setting effective_cache_size to 1/2 of total memory would be a normal -# conservative setting: - -effective_cache_size = 1024MB - ----- - -===== Suggested values on a low end server - ----- - -shared_buffers = 128MB -work_mem = 8MB -max_stack_depth = 3MB -maintenance_work_mem = 64MB -wal_buffers = 1MB -effective_cache_size = 128MB - ----- - - -==== CFEngine - -If you are using Rudder on a highly stressed machine, which has especially slow or busy -I/O's, you might experience a sluggish CFEngine agent run everytime the machine -tries to comply with your Rules. - -This is because the CFEngine agent tries to update its internal databases everytime the agent -executes a promise (the .lmdb files in the /var/rudder/cfengine-community/state directory), -which even if the database is very light, takes some time if the machine has a very high iowait. - -In this case, here is a workaround you can use to restore CFEngine's full speed: you can use -a RAMdisk to store CFEngine states. - -You might use this solution either temporarily, to examine a slowness problem, or permanently, to mitigate a -known I/O problem on a specific machine. We do not recommend as of now to use this on a whole IT infrastructure. - -Be warned, this solution has a drawback: you should backup and restore the content of this directory -manually in case of a machine reboot because all the persistent states are stored here, so in case you are using, -for example the jobScheduler Technique, you might encounter an unwanted job execution because CFEngine will have -"forgotten" the job state. - -Also, note that the mode=0700 is important as CFEngine will refuse to run correctly if the state directory is -world readable, with an error like: - ----- -error: UNTRUSTED: State directory /var/rudder/cfengine-community (mode 770) was not private! ----- - -Here is the command line to use: - -[source,python] - -.How to mount a RAMdisk on CFEngine state directory - ----- - -# How to mount the RAMdisk manually, for a "one shot" test: -mount -t tmpfs -o size=128M,nr_inodes=2k,mode=0700,noexec,nosuid,noatime,nodiratime tmpfs /var/rudder/cfengine-community/state - -# How to put this entry in the fstab, to make the modification permanent -echo "tmpfs /var/rudder/cfengine-community/state tmpfs defaults,size=128M,nr_inodes=2k,mode=0700,noexec,nosuid,noatime,nodiratime 0 0" >> /etc/fstab -mount /var/rudder/cfengine-community/state - ----- - -[[_rsyslog]] -==== Rsyslog - -If you are using syslog over TCP as reporting protocol (it is set in *Administration* -> *Settings* -> *Protocol*), -you can experience issues with rsyslog on Rudder -policy servers (root or relay) when managing a large number of nodes. -This happens because using TCP implies the system has to keep track of -the connections. It can lead to reach some limits, especially: - -* max number of open files for the user running rsyslog -* size of network backlogs -* size of the conntrack table - -You have two options in this situation: - -* Switch to UDP (in *Administration* -> *Settings* -> *Protocol*). It is less reliable - than TCP and you can lose reports in case of networking or load issues, but it will - prevent breaking your server, and allow to manage more Nodes. -* Stay on TCP. Do this only if you need to be sure you will get all your reports - to the server. You will should follow the instructions below to tune your system - to handle more connections. - -All settings needing to modify '/etc/sysctl.conf' require to run 'sysctl -p' -to be applied. - -===== Maximum number of TCP sessions in rsyslog - -You may need to increase the maximum number of TCP sessions that rsyslog will accept. -Add to your '/etc/rsyslog.conf': - ----- -$ModLoad imtcp -# 500 for example, depends on the number of nodes and the agent run frequency -$InputTCPMaxSessions 500 ----- - -Note: You can use 'MaxSessions' instead of 'InputTCPMaxSessions' on rsyslog >= 7. - -===== Maximum number of file descriptors - -If you plan to manage hundreds of Nodes behind a relay or a root server, you should increase -the open file limit (10k is a good starting point, you might have to get to 100k with -thousands of Nodes). - -You can change the system-wide maximum number of file descriptors in '/etc/sysctl.conf' if necessary: - ----- -fs.file-max = 100000 ----- - -Then you have to get the user running rsyslog enough file descriptors. To do so, -you have to: - -* Have a high enough hard limit for rsyslog -* Set the limit used by rsyslog - -The first one can be set in '/etc/security/limits.conf': - ----- -username hard nofile 8192 ----- - -For the second one, you have two options: - -* Set the soft limit (which will be used by default) in '/etc/security/limits.conf' (with 'username soft nofile 8192') -* If you want to avoid changing soft limit (particularly if rsyslog is running as root), you - can configure rsyslog to change its limit to a higher value (but not higher than the hard limit) - with the '$MaxOpenFiles' configuration directive in '/etc/rsyslog.conf' - -You have to restart rsyslog for these settings to take effect. - -You can check current soft and hard limits by running the following commands as the user you want to check: - ----- -ulimit -Sn -ulimit -Hn ----- - -===== Network backlog - -You can also have issues with the network queues (which may for example lead to sending SYN cookies): - -* You can increase the maximum number of connection requests awaiting acknowledgment by changing - 'net.ipv4.tcp_max_syn_backlog = 4096' (for example, the default is 1024) in '/etc/sysctl.conf'. -* You may also have to increase the socket listen() backlog in case of bursts, by changing - 'net.core.somaxconn = 1024' (for example, default is 128) in '/etc/sysctl.conf'. - -===== Conntrack table - -You may reach the size of the conntrack table, especially if you have other applications -running on the same server. You can increase its size in '/etc/sysctl.conf', -see http://www.netfilter.org/documentation/FAQ/netfilter-faq.html#toc3.7[the Netfilter FAQ] -for details. - - diff --git a/src/reference/modules/ROOT/pages/43_advanced_administration/21_password_management.adoc b/src/reference/modules/ROOT/pages/43_advanced_administration/21_password_management.adoc deleted file mode 100644 index 57a351a7..00000000 --- a/src/reference/modules/ROOT/pages/43_advanced_administration/21_password_management.adoc +++ /dev/null @@ -1,111 +0,0 @@ -[[password-management]] - -=== Password management - -You might want to change the default passwords used in Rudder's managed daemons -for evident security reasons. - -==== Configuration of the postgres database password - -You will have to adjust the postgres database and the rudder-web.properties file. - -Here is a semi-automated procedure: - -* Generate a decently fair password. You can use an arbitrary one too. - ----- - -PASS=`dd if=/dev/urandom count=128 bs=1 2>&1 | md5sum | cut -b-12` - ----- - -* Update the Postgres database user - ----- - -su - postgres -c "psql -q -c \"ALTER USER blah WITH PASSWORD '$PASS'\"" - ----- - -* Insert the password in the rudder-web.properties file - ----- - -sed -i "s%^rudder.jdbc.password.*$%rudder.jdbc.password=$PASS%" /opt/rudder/etc/rudder-web.properties - ----- - -==== Configuration of the OpenLDAP manager password - -You will have to adjust the OpenLDAP and the rudder-web.properties file. - -Here is a semi-automated procedure: - -* Generate a decently fair password. You can use an arbitrary one too. - ----- - -PASS=`dd if=/dev/urandom count=128 bs=1 2>&1 | md5sum | cut -b-12` - ----- - -* Update the password in the slapd configuration - ----- - -HASHPASS=`/opt/rudder/sbin/slappasswd -s $PASS` -sed -i "s%^rootpw.*$%rootpw $HASHPASS%" /opt/rudder/etc/openldap/slapd.conf - ----- - -* Update the password in the rudder-web.properties file - ----- - -sed -i "s%^ldap.authpw.*$%ldap.authpw=$PASS%" /opt/rudder/etc/rudder-web.properties - ----- - -==== Configuration of the WebDAV access password - -This time, the procedure is a bit more tricky, as you will have to update -the Technique library as well as a configuration file. - -Here is a semi-automated procedure: - -* Generate a decently fair password. You can use an arbitrary one too. - ----- - -PASS=`dd if=/dev/urandom count=128 bs=1 2>&1 | md5sum | cut -b-12` - ----- - -* Update the password in the apache htaccess file - -[TIP] - -==== - -On some systems, especially SuSE ones, htpasswd is called as "htpasswd2" - -==== - ----- - -htpasswd -b /opt/rudder/etc/htpasswd-webdav rudder $PASS - ----- - -* Update the password in Rudder's system Techniques - ----- - -cd /var/rudder/configuration-repository/techniques/system/common/1.0/ -sed -i "s%^.*davpw.*$% \"davpw\" string => \"$PASS\"\;%" site.st -git commit -m "Updated the rudder WebDAV access password" site.st - ----- - -* Update the Rudder Directives by either reloading them in the web interface (in the "Configuration Management/Techniques" tab) or restarting jetty (NOT recommended) - diff --git a/src/reference/modules/ROOT/pages/43_advanced_administration/70_system_password_management.adoc b/src/reference/modules/ROOT/pages/43_advanced_administration/70_system_password_management.adoc deleted file mode 100644 index 2855656a..00000000 --- a/src/reference/modules/ROOT/pages/43_advanced_administration/70_system_password_management.adoc +++ /dev/null @@ -1,35 +0,0 @@ -=== Password upgrade - -This version of Rudder uses a central file to manage the passwords that will -be used by the application: /opt/rudder/etc/rudder-passwords.conf - -When first installing Rudder, this file is initialized with default values, -and when you run rudder-init, it will be updated with randomly generated -passwords. - -On the majority of cases, this is fine, however you might want to adjust the -passwords manually. This is possible, just be cautious when editing the file, -as if you corrupt it Rudder will not be able to operate correctly anymore and -will spit numerous errors in the program logs. - -As of now, this file follows a simple syntax: ELEMENT:password - -You are able to configure three passwords in it: The OpenLDAP one, the -PostgreSQL one and the authenticated WebDAV one. - -If you edit this file, Rudder will take care of applying the new passwords -everywhere it is needed, however it will restart the application automatically -when finished, so take care of notifying users of potential downtime before -editing passwords. - -Here is a sample command to regenerate the WebDAV password with a random -password, that is portable on all supported systems. Just change the -"RUDDER_WEBDAV_PASSWORD" to any password file statement corresponding to -the password you want to change. - ----- - -sed -i s/RUDDER_WEBDAV_PASSWORD.*/RUDDER_WEBDAV_PASSWORD:$(dd if=/dev/urandom count=128 bs=1 2>&1 | md5sum | cut -b-12)/ /opt/rudder/etc/rudder-passwords.conf - ----- - diff --git a/src/reference/modules/ROOT/pages/43_advanced_administration/75_separate_database.adoc b/src/reference/modules/ROOT/pages/43_advanced_administration/75_separate_database.adoc deleted file mode 100644 index 824d8f09..00000000 --- a/src/reference/modules/ROOT/pages/43_advanced_administration/75_separate_database.adoc +++ /dev/null @@ -1,197 +0,0 @@ -=== Use a database on a separate server - -This section allows installing a separate database only without splitting the rest of the server components -like when using the rudder-multiserver-setup script. -The setup is done in two places: on the database server and on the Rudder root server. - -It also allows moving an existing database to another server. - -[TIP] - -.Use different user and database names - -==== - -It can be useful, for example if you want to share you database server between several Rudder root servers (see note below), -to use a different database for your Rudder root server. To do so: - -* Create the new database (replace `alternate_user_name`, `alternate_base_name` and specify a password): - ----- - -su - postgres -c "psql -q -c \"CREATE USER alternate_user_name WITH PASSWORD 'GENERATE_A_PASSWORD'\"" -su - postgres -c "psql -q -c \"CREATE DATABASE alternate_base_name WITH OWNER = alternate_user_name\"" - ----- - -* Initialize it. First copy the initialization script: - ----- - - cp /opt/rudder/etc/postgresql/reportsSchema.sql /opt/rudder/etc/postgresql/reportsSchema-alternate.sql - ----- - -* In the copied file, change the: - ----- - -ALTER database rudder SET standard_conforming_strings=true; - ----- - -To: - ----- - -ALTER database alternate_base_name SET standard_conforming_strings=true; - ----- - -* Then apply the script: - ----- - -su - postgres -c "psql -q -U alternate_user_name -h localhost -d alternate_base_name \ - -f /opt/rudder/etc/postgresql/reportsSchema-alternate.sql" - ----- - -* Follow the standard instructions of this section, with two differences: - -** You need to adjust the line added to `pg_hba.conf` to match your user and database name. - -** You need to also change the database name and user in `rudder-web.properties`. - -==== - -[CAUTION] - -.Use the same database server for several Rudder root servers - -==== - -It is possible to share the same database server between several Rudder instances, -by following the preceding tip to use a different database than the default one. -However, there are some important points to know: - -* This database server can only be used with the rudder-db role in case of multiserver setup. - -* This database server can only be a node for one of the Rudder servers. This also means that this -root server will have indirect access to the content of the other databases. - -==== - - - -==== On the database server - -* Install and configure the agent on the node, and install the *rudder-reports* package. - -* Change the `postgresql.conf` file (usually in `/var/lib/pgsql` or `/etc/postgresql`), to listen on the right interface to communicate with the server: - ----- - -# you can use '*' to listen on all interfaces -listen_addresses = 'IP_TO_USE' - ----- - -* Also ensure that network policies (i.e. the firewall settings) allow PostgreSQL flows from the root server to the database server. - -* Add an authorization line for the server (in `pg_hba.conf`, in the same directory): - ----- - -host rudder rudder ROOT_SERVER_IP/32 md5 - ----- - -* Restart postgresql to apply the new settings: - ----- - -service postgresql restart - ----- - -* Execute the following command to configure the password (that should be the same as RUDDER_PSQL_PASSWORD in `/opt/rudder/etc/rudder-passwords.conf` on the root server): - ----- - -su - postgres -c "psql -c \"ALTER USER rudder WITH PASSWORD 'RUDDER_SERVER_DATABASE_PASSWORD'\"" - ----- - -* Run an inventory to the server: - ----- - -rudder agent inventory - ----- - -==== On the root server - -In the following section, DATABASE_HOST refers to the hostname of the new database server, and SERVER_HOST to the hostname of -the root server. - -* Remove the rudder-server-root and rudder-reports packages if installed. For example, you can run on Debian: - ----- - -service rudder restart -apt-mark manual rudder-webapp rudder-inventory-endpoint -apt-get remove --purge rudder-reports - ----- - -* You can also remove the postgresql package and database from the server if installed, but keep in mind you will lose all existing data. -You can follow the xref:43_advanced_administration/15_migration_backup_restore.adoc#_migration_backups_and_restores[backup and restore] procedure to migrate the data to the new database. - -* Change the hostname in `/opt/rudder/etc/rudder-web.properties`: - ----- - -rudder.jdbc.url=jdbc:postgresql://DATABASE_HOST:5432/rudder - ----- - -* Edit `/var/rudder/cfengine-community/inputs/rudder-server-roles.conf` and set the following line: - ----- - -rudder-db:DATABASE_HOST - ----- - -* Edit the /etc/rsyslog.d/rudder.conf file and change the hostname in: - ----- - -:ompgsql:DATABASE_HOST,rudder,rudder,... - ----- - -* Run an inventory: - ----- - -rudder agent inventory - ----- - -* Restart rudder services: - ----- - -service rsyslog restart -service rudder restart - ----- - -* Clear the cache (in Administration -> Settings) - -You should now have finished configuring the database server. You can check the technical logs to see if reports are correctly -written into the database and read by the web application. - diff --git a/src/reference/modules/ROOT/pages/43_advanced_administration/77_distributed_rudder.adoc b/src/reference/modules/ROOT/pages/43_advanced_administration/77_distributed_rudder.adoc deleted file mode 100644 index 3f9e9c45..00000000 --- a/src/reference/modules/ROOT/pages/43_advanced_administration/77_distributed_rudder.adoc +++ /dev/null @@ -1,111 +0,0 @@ -[[multiserver-rudder]] -=== Multiserver Rudder - -From version 3.0 Rudder can be divided into 4 different components: - -- rudder-web: an instance with the webapp and the central policy server -- rudder-ldap: the inventory endpoint and its ldap backend -- rudder-db: the postgresql storage -- rudder-relay-top: the contact point for nodes - -==== Preliminary steps - - -You need the setup scripts provided at https://github.com/normation/rudder-tools/tree/master/scripts/rudder-multiserver-setup. -You can download them with this command: - ----- - -mkdir rudder-multiserver-setup -cd rudder-multiserver-setup -for i in add_repo detect_os.sh rudder-db.sh rudder-ldap.sh rudder-relay-top.sh rudder-web.sh -do - wget --no-check-certificate https://raw.githubusercontent.com/Normation/rudder-tools/master/scripts/rudder-multiserver-setup/$i -done -chmod 755 * -cd .. - - ----- - - -You need 4 instances of supported OS, one for each component. -Only the rudder-web instance need at least 2GB of RAM. - -Register the 4 names in the DNS or add them in /etc/hosts on each instance. - -Add firewall rules: - -- from rudder-web to rudder-db port pgsql TCP -- from rudder-* to rudder-web port rsyslog 514 TCP -- from rudder-relay-top to rudder-ldap port 8080 TCP -- from rudder-web to rudder-ldap port 8080 TCP -- from rudder-web to rudder-ldap port 389 TCP -- from rudder-web to rudder-relay-top port 5309 - - -==== Install rudder-relay-top - -Copy the rudder-multiserver-setup directory to you instance. - -Run rudder-relay-top.sh as root, replace with the hostname of the rudder-web instance: - ----- - -cd rudder-multiserver-setup -./rudder-relay-top.sh - ----- - -Take note of the UUID. -If you need it later read, it is in the file /opt/rudder/etc/uuid.hive - -==== Install rudder-db - -Copy the rudder-multiserver-setup directory to you instance. - -Run rudder-db.sh as root, replace with the hostname of the rudder-web instance, replace with the network containing the rudder-web instances: - ----- - -cd rudder-multiserver-setup -./rudder-db.sh - ----- - -==== Install rudder-ldap - -Copy the rudder-multiserver-setup directory to you instance. - -Run rudder-ldap.sh as root, replace with the hostname of the rudder-web instance: - ----- - -cd rudder-multiserver-setup -./rudder-ldap.sh - ----- - -==== Install rudder-web - -Copy the rudder-multiserver-setup directory to you instance. - -Run rudder-relay-top.sh as root, replace with the hostname of the corresponding instance: - ----- - -cd rudder-multiserver-setup -./rudder-web.sh - ----- - -Connect rudder web interface and accept all nodes. -Then run the following command where is the uuid from rudder-relay-top setup. - ----- - -/opt/rudder/bin/rudder-node-to-relay - ----- - - diff --git a/src/reference/modules/ROOT/pages/43_advanced_administration/91_mirroring_rudder_repositories.adoc b/src/reference/modules/ROOT/pages/43_advanced_administration/91_mirroring_rudder_repositories.adoc deleted file mode 100644 index 751427c3..00000000 --- a/src/reference/modules/ROOT/pages/43_advanced_administration/91_mirroring_rudder_repositories.adoc +++ /dev/null @@ -1,18 +0,0 @@ -=== Mirroring Rudder repositories - -You can also use your own packages repositories server instead of 'www.rudder-project.org' if you want. This is possible with a synchronization from our repositories with rsync. - -We've got public read only rsync modules 'rudder-apt' and 'rudder-rpm'. - -To synchronize with the APT repository just type: ----- -rsync -av www.rudder-project.org::rudder-apt /your/local/mirror ----- - -To synchronize with the RPM repository just type: ----- -rsync -av www.rudder-project.org::rudder-rpm /your/local/mirror ----- - -Finally, you have to set up these directories (/your/local/mirror) to be shared by HTTP by a web server (i.e., Apache, nginx, lighttpd, etc...). - diff --git a/src/reference/modules/ROOT/pages/43_advanced_administration/991_monitoring.adoc b/src/reference/modules/ROOT/pages/43_advanced_administration/991_monitoring.adoc deleted file mode 100644 index 5ee74967..00000000 --- a/src/reference/modules/ROOT/pages/43_advanced_administration/991_monitoring.adoc +++ /dev/null @@ -1,130 +0,0 @@ -=== Monitoring - -This section will give recommendations for: - -* Monitoring Rudder itself (besides standard monitoring) -* Monitoring the state of your configuration management - -==== Monitoring Rudder itself - -===== Monitoring a Node - -The monitoring of a node mainly consists in checking that the Node can speak with -its policy server, and that the agent is run regularly. - -You can use the 'rudder agent health' command to check for communication errors. -It will check the agent configuration and look for connection errors in the last -run logs. By default it will output detailed results, but you can start it with -the '-n' option to enable "nrpe" mode (like Nagios plugins, but it can be -used with other monitoring tools as well). In this mode, it will -display a single line result and exit with: - -* 0 for a success -* 1 for a warning -* 2 for an error - -If you are using nrpe, you can put this line in your 'nrpe.cfg' file: - ----- -command[check_rudder]=/opt/rudder/bin/rudder agent health -n ----- - -To get the last run time, you can lookup the modification date of -'/var/rudder/cfengine-community/last_successful_inputs_update'. - -===== Monitoring a Server - -You can use use regular API calls to check the server is running and has access to its data. -For example, you can issue the following command to get the list of currently defined rules: - ----- -curl -X GET -H "X-API-Token: yourToken" http://your.rudder.server/rudder/api/latest/rules ----- - -You can then check the status code (which should be 200). See the xref:30_basic_administration/70_server_rest_api.adoc#rest-api[API documentation] for more information. - -You can also check the webapp logs (in '/var/log/rudder/webapp/year_month_day.stderrout.log') -for error messages. - -==== Monitoring your configuration management - -There are two interesting types of information: - -* *Events*: all the changes made by the the agents on your Nodes -* *Compliance*: the current state of your Nodes compared with the expected configuration - -===== Monitor compliance - -You can use the Rudder API to get the current compliance state of your infrastructure. -It can be used to simply check for configuration errors, or be integrated in -other tools. - -Here is an very simple example of API call to check for errors (exits with 1 when there is an error): - ----- -curl -s -H "X-API-Token: yourToken" -X GET 'https:/your.rudder.server/rudder/api/latest/compliance/rules' | grep -qv '"status": "error"' ----- - -See the xref:30_basic_administration/70_server_rest_api.adoc#rest-api[API documentation] for more information about general API usage, and the -http://www.rudder-project.org/rudder-api-doc/#api-compliance[compliance API documentation] -for a list of available calls. - -===== Monitor events - -The Web interface gives access to this, but we will here see how to process events -automatically. They are available on the root server, in '/var/log/rudder/compliance/non-compliant-reports.log'. -This file contains two types of reports about all the nodes managed by this server: - -* All the modifications made by the agent -* All the errors that prevented the application of a policy - -The lines have the following format: - ----- -[%DATE%] N: %NODE_UUID% [%NODE_NAME%] S: [%RESULT%] R: %RULE_UUID% [%RULE_NAME%] D: %DIRECTIVE_UUID% [%DIRECTIVE_NAME%] T: %TECHNIQUE_NAME%/%TECHNIQUE_VERSION% C: [%COMPONENT_NAME%] V: [%KEY%] %MESSAGE% ----- - -In particular, the 'RESULT' field contains the type of event (change or error, respectively 'result_repaired' and 'result_error'). - -You can use the following regex to match the different fields: - ----- -^\[(?P[^\]]+)\] N: (?P[^ ]+) \[(?P[^\]]+)\] S: \[(?P[^\]]+)\] R: (?P[^ ]+) \[(?P[^\]]+)\] D: (?P[^ ]+) \[(?P[^\]]+)\] T: (?P[^/]+)/(?P[^ ]+) C: \[(?P[^\]]+)\] V: \[(?P[^\]]+)\] (?P.+)$ ----- - -Below is a basic https://www.elastic.co/products/logstash[Logstash] configuration file for parsing Rudder events. -You can then use https://www.elastic.co/products/kibana[Kibana] to explore the data, and create graphs and -dashboards to visualize the changes in your infrastructure. - ----- -input { - file { - path => "/var/log/rudder/compliance/non-compliant-reports.log" - } -} - -filter { - grok { - match => { "message" => "^\[%{DATA:date}\] N: %{DATA:node_uuid} \[%{DATA:node}\] S: \[%{DATA:result}\] R: %{DATA:rule_uuid} \[%{DATA:rule}\] D: %{DATA:directive_uuid} \[%{DATA:directive}\] T: %{DATA:technique}/%{DATA:technique_version} C: \[%{DATA:component}\] V: \[%{DATA:key}\] %{DATA:message}$" } - } - # Replace the space in the date by a "T" to make it parseable by Logstash - mutate { - gsub => [ "date", " ", "T" ] - } - # Parse the event date - date { - match => [ "date" , "ISO8601" ] - } - # Remove the date field - mutate { remove => "date" } - # Remove the key field if it has the "None" value - if [key] == "None" { - mutate { remove => "key" } - } -} - -output { - stdout { codec => rubydebug } -} ----- - diff --git a/src/reference/modules/ROOT/pages/43_advanced_administration/992_inventory.adoc b/src/reference/modules/ROOT/pages/43_advanced_administration/992_inventory.adoc deleted file mode 100644 index e3f18ab2..00000000 --- a/src/reference/modules/ROOT/pages/43_advanced_administration/992_inventory.adoc +++ /dev/null @@ -1,25 +0,0 @@ -=== Use Rudder inventory in other tools - -Rudder centralizes the information about your managed systems, and -you can use this information in other tools, mainly through the API. -We well here give a few examples. - -==== Export to a spreadsheet - -You can export the list of your nodes to a spreadsheet file (xls format) by using a -https://github.com/normation/rudder-tools/tree/master/contrib/rudder_nodes_list[tool] available in the rudder-tools repository. - -Simple follow the installation instructions, and run it against your Rudder server. -You will get a file containing: - -image::spreadsheet-list-nodes.png[] - -You can easily modify the script to add other information. - -==== Rundeck and Ansible - -There are plugins for Rundeck and Ansible that can be used with each tool to make -them aware of inventory data from Rudder. For more information, see details in -the xref:80_extension_and_integration/40_Rudder_Integration.adoc#rudder-integration[Third party integration with Rudder] -paragraph. - diff --git a/src/reference/modules/ROOT/pages/4_advanced_usage/35_directive_ordering.adoc b/src/reference/modules/ROOT/pages/4_advanced_usage/35_directive_ordering.adoc deleted file mode 100644 index 4230c418..00000000 --- a/src/reference/modules/ROOT/pages/4_advanced_usage/35_directive_ordering.adoc +++ /dev/null @@ -1,87 +0,0 @@ -=== Directives ordering - -Configuration in Rudder are based on desired states, describing the expected state of the system. However, there are cases where having order is desirable (like ensuring that a JVM is present before deploying an Application server, or ensuring a user is present before setting it sudoers), even if it will converge over the course of several agent runs. - -In Rudder, there is two separated ways to order things, depending the type of Technique". So, before that, we need to explain how Policies are generated on the -agent from Directives based on the same Technique. - -==== Policy generation and Directive merge - -In Rudder, Policies are generated from Directives, but several Directives based on the same Technique always lead to *one* Policy on the agent. -For unique (non multi-instance) Technique, the one with the highest priority is selected. For multi-instance Technique, the different Directive values are *merged* -into one Policy after having been sorted. - -.Separated Policy Generation in Rudder 4.3 -[TIP] -===== - In Rudder 4.3, that limitation is lifted and Technique can be made to generate ONE Policy for each Directive. That capacity is controled by the - `POLICYGENERATION` tag, where the value `merged` is the pre-4.3 default behavior, and values `separated` or `separated-with-param` lead to one Policy per Directive. - - See https://www.rudder-project.org/redmine/issues/10625[Don't merge directive from same technique on generation] for more information. -===== - - -==== Sorting Directives based on the *same* Technique - -For Directive based on the same Technique, the sort order is based on the *Priority* value of the Directive. Between two Directive, the one with the highest *Priority* -is the first: - -- for a *non* multi-instance Technique, it means that it is there is only one that is chosen in the resulting Policies (the others are discared), -- for a multi-instance Technique, it means that the variables in the Policy will be declared and check in sorting order of Directives (so the first Directive's - variables will be declared in first position and check first during an agent run). - -If several *Directives* have the same *Priority*, the *Rule name*, and then the *Directive name* are used for sorting in alphanumeric order. - -.Priority field value and meaning -[WARNING] -====== -The *Priority* field of a Directive used to be a number, from 0 to 10, where 0 means "highest priority". -This changed with https://www.rudder-project.org/redmine/issues/11725 but if you knew Rudder before that change, please -use "0" whenever the documentation says "highest priority". -====== - - -===== Special use case: overriding generic_variable_definition - -You can use the merging of Directive to define variable override with the "Generic Variable Definition" Technique. - -For example, let say you want to define a *DNS* variable with default value *[default dns]* and on some node case, -a value *[overrided dns]*: - -- Create a Directive [1] with *high* priority: it will be your *default* case, so set *DNS* to *[default dns]*. -- Create an other Directive [2] with *lower* priority: it will be you specialized case, so set *DNS* to *[overrided dns]*. - -Then, a node with only Directive [1] will have the default value defined, and a node with both Directives will have the overriding one. - -It works because on the agent, you can redeclare a variable name and reassign to it a new value: the last one wins (so in our case, the *less* prioritary). - - -==== Sorting Policies - -Rudder uses a best-effort method for ordering Policies, based on alphanumeric ordering of the corresponding Rule, then Directive name. - -When several Directive were merged, Rudder choose the first (Rule name, Directive name) as the ordering value to use for the resulting Policy. - - -.Best practice -[TIP] -===== -You should always start Rules and Directives name by 2 (or 3) digits to be able to easily reorder Policy evaluation if the need happen: - -Do not use: "My general security rule" and "Check ssh configuration" - -But use: "05. My general security rule" and "40. Check ssh configuration" -===== - -==== Example - -- given three Techniques A, B and C -- directives A1 and A2 based on Technique A, directives B1 and B2 based on B, directives C1 and C2 based on C -- all Directives have the same priority, -- rule R0 having [C1], R1 having [A1, B2] and rule R2 having [A2, B1, C2], all applied on a same node, -- merging (R0, C1) and (R2, C2) => [C1, C2] and keep (R0, C1) as Policy order -- merging (R1, A1) and (R2, A2) => [A1, A2] and keep (R1, A1) as Policy order, -- merging (R1, B2) and (R2, B1) => [B2, B1] (because R1 < R2) and keep (R1, B2) for policy order, -- so policies are sort: (R0, C1) then (R1, A1) then (R1, B2) -- resulting ordering of directive's values will be: [C1, C2] then [A1, A2] then [B1, B2] - diff --git a/src/reference/modules/ROOT/pages/70_troubleshooting/00_troubleshooting_intro.adoc b/src/reference/modules/ROOT/pages/70_troubleshooting/00_troubleshooting_intro.adoc deleted file mode 100644 index e25a657f..00000000 --- a/src/reference/modules/ROOT/pages/70_troubleshooting/00_troubleshooting_intro.adoc +++ /dev/null @@ -1,4 +0,0 @@ -== Troubleshooting and common issues - -All technical and general answers are on http://faq.rudder-project.org/. - diff --git a/src/reference/modules/ROOT/pages/80_extension_and_integration/20_Rudder_plugins.adoc b/src/reference/modules/ROOT/pages/80_extension_and_integration/20_Rudder_plugins.adoc deleted file mode 100644 index 3abc0cd3..00000000 --- a/src/reference/modules/ROOT/pages/80_extension_and_integration/20_Rudder_plugins.adoc +++ /dev/null @@ -1,78 +0,0 @@ - - -[[extending-rudder-with-plugins]] -=== Extending Rudder with plugins - -Rudder can be extended with Plugins so that new features or API endpoints are -available in Rudder web application. - -==== Rudder Plugin - -A plugin is an archive in the `.rpkg` file format that can be manipulated with the -`rudder-pkg` command (see xref:30_basic_administration/30_plugins.adoc#plugins-management[Plugins Administration]) - -A Rudder plugin has full access to all Rudder internal APIs, datas, and process. -Its power is very large, but some care must be taken to ensure that the plugin -does not break Rudder main use cases. -That is why we prefer to build smaller plugin, adding only one feature, and -doing it in the least impacting way. - -Here come a list of some plugins so that one can grasp the kind of feature that a -plugin can bring to Rudder: - -===== Extending API: rudder-plugin-itop - -Link: https://github.com/normation/rudder-plugin-itop - -This plugin was used to add new API endpoint dedicated to the integration with -https://www.combodo.com/itop-193[iTop CMDB software]. - -As of Rudder 4.0, the plugin is superseeded by Rudder Compliance API. - -===== Adding information to node details: rudder-plugin-external-node-information - -Link: https://github.com/normation/rudder-plugin-external-node-information - -This plugin allows to add new tabs in Rudder "node details" page and display -node specific files, stored in node-dedicated places. It also use a -self-managed and hot-reloading configuration of its properties. - -===== Providing new authentication methods - -Rudder plugins can be used to provide new authentication methods. There is no -open source version of such module, but at least a Radius Authentication Plugin -exists. - -===== Providing a full new feature: rudder-plugin-datasources - -Link: https://github.com/normation/rudder-plugin-datasources - -As we said, Rudder plugins can be quite powerful. For example, the "Data sources -Plugin" brings a completely new feature to Rudder by allowing to configure external -API data sources from which Rudder will get node properties. The plugin set-up -its own data base table, comes with its own UI (available in Rudder "administration" -page), and interacts with node properties. - - -==== Building your own plugins - -As of Rudder 4.1, there is no dedicated, frozen plugins API for plugins. A plugin -is built in Scala, and the normal starting point is to clone and study the -template plugin project, -https://github.com/normation/rudder-plugin-helloworld[rudder-plugin-helloworld]. - -The project code source is documented to be didactic and provides: - -- an example of the packaging resources needed to build a ".rpkg" package, -- example of configuration file for the plugin, -- plugin definition and plugin registration when Rudder starts, -- how to interact with Rudder internal services, -- how to define new APIs. - -Of course, you can look to the other open source plugins listed above to get -other, more involved example about how to do things. - -You also can interact with Rudder developers through the -https://www.rudder-project.org/site/community/mailing-lists/[community] regular -communication channels. - diff --git a/src/reference/modules/ROOT/pages/80_extension_and_integration/40_Rudder_Integration.adoc b/src/reference/modules/ROOT/pages/80_extension_and_integration/40_Rudder_Integration.adoc deleted file mode 100644 index 33285eb8..00000000 --- a/src/reference/modules/ROOT/pages/80_extension_and_integration/40_Rudder_Integration.adoc +++ /dev/null @@ -1,91 +0,0 @@ - -[[rudder-integration]] -=== Rudder integration in your infrastructure - -The other mains way to integrate Rudder into an existing infrastructure is -by making existing process or software take advantage of Rudder. - -==== Existing third party integration - -===== Rundeck - -http://rundeck.org[Rundeck] is a tool that helps automating infrastructures, by -defining jobs that can be run manually or automatically. There is a -https://github.com/normation/rundeck-plugin-rudder[plugin] for Rundeck -that allows using Rudder inventory data in Rundeck. - -With that plugin, you can execute commands on node registered in Rudder, taking -advantage of the groups defined for you policies. - -===== Ansible - -There is an https://github.com/ansible/ansible/blob/devel/contrib/inventory/rudder.py[inventory plugin] -for Ansible that makes possible to use Rudder inventory (including groups, nodes, -group ids, node ids, and node properties) as inventory for Ansible, for example -for orchestration tasks on your platform. An inventory in Ansible is the list of managed nodes, -their groups and some pre-defined variables. -The Rudder plugin is part of Ansible as of version 2.0 (but also works with previous versions). - -You need to download the https://github.com/ansible/ansible/blob/devel/contrib/inventory/rudder.py[rudder.py] -and https://github.com/ansible/ansible/blob/devel/contrib/inventory/rudder.ini[rudder.ini] files, then you have to -edit `rudder.ini` to fill (at least): - -* Your Rudder server URL -* A valid API token - -Then you can start using it with the `-i rudder.py` option (to the Ansible command-line). -The plugin defines: - -* An Ansible group for each Rudder group, with a group variable named `rudder_group_id` that contains the uuid of the group -* An host variable named `rudder_node_id` that contains the uuid of the node -* Host variables containing the Rudder node properties - -You can then use them in the configuration, for example: - ----- -ansible -i rudder.py All_nodes_managed_by_root_policy_server -a "echo {{rudder_node_id}} {{rudder_group_id}} {{node_property}} {{node_property.key}}" ----- - -Will try to connect over SSH to all nodes managed by your Rudder server and display the given information. - -You can defined the `ansible_host`, `ansible_user` and `ansible_port` node properties to control -the way Ansible connects to the nodes. - -===== iTop - -https://www.combodo.com/itop-193[iTop] is an Open Source CMDB solution. It allows -to describe you IT services and analyse impact of problems. - -There is a prototype integration of iTop and Rudder which allows iTop to Rudder -as a source of information about the server content (inventory) and current -compliance level. With that integration, you can see in real time in your CMDB when -a server managed by Rudder is drifting away from its expecting configuration, and -use iTop to understand the impact of such a drift for your IT services. - - -[[rudder-api-integration]] -==== Integrate Rudder thanks to its APIs - -All the above plugins are using http://www.rudder-project.org/rudder-api-doc/[Rudder APIs] -under the hood to operate or get data from Rudders. Rudder APIs are as powerful -as the UI, and anything that can be done through the main graphical interface -can also be scripted away with the APIs. - -The documentation provided on APIs is exhaustive, but here comes a summary of -what can be done with them: - -- accept, delete a node and manage its parameters, -- get information with a parametrable depth about node inventories, -- search for nodes, -- manage (create, update, delete) groups, directives, rules and parameters, -- interact with the Techniques library, -- get compliance details about a node or a rule, with a parameterized depth of - information, -- manage change requests. - -And of course, any plugin can provide new API endpoints, as is doing the -data source plugin. - -These API can also be used to automate Rudder action, like node acceptation or compliance -export and archiving for nodes. - diff --git a/src/reference/modules/ROOT/pages/85_Plugins/00_intro.adoc b/src/reference/modules/ROOT/pages/85_Plugins/00_intro.adoc deleted file mode 100644 index 44ed19f7..00000000 --- a/src/reference/modules/ROOT/pages/85_Plugins/00_intro.adoc +++ /dev/null @@ -1,11 +0,0 @@ - -[[plugins]] -== Rudder Plugins - -This chapter presents available plugins provides for Rudder and maintained -by along with Rudder. They are available for each version of Rudder and -updated if needed (for example in case of API change). - -Plugins can be open-source or only available in binary format. - - diff --git a/src/reference/modules/ROOT/pages/85_Plugins/10_DSC_plugin.adoc b/src/reference/modules/ROOT/pages/85_Plugins/10_DSC_plugin.adoc deleted file mode 100644 index 5e8eb482..00000000 --- a/src/reference/modules/ROOT/pages/85_Plugins/10_DSC_plugin.adoc +++ /dev/null @@ -1,203 +0,0 @@ -[[dsc-plugin]] - -=== Rudder agent DSC - -This plugins allows to manage Windows systems, using Microsoft Powershell DSC - -==== Install Windows DSC plugin on the server - -===== Prerequisite - -The Windows DSC plugin requires *zip* on the Rudder server, you need to install it prior to installing the plugin. - -===== Installing and Upgrading - -The installation and upgrade processes are exactly the same. -Download the *rpkg* file, and run, on the Rudder server: - ----- -/opt/rudder/bin/rudder-pkg install-file rudder-plugin-dsc--.rpkg ----- - -It will add: - -* The ability to generate policies for Windows Nodes -* New generic methods in the technique editor -* New techniques - - -==== Install Windows DSC agent - -The installation and upgrade processes are exactly the same. - -===== Supported version of Microsoft Windows - -The Rudder agent needs *PowerShell 4* or later, which is built-in on: - - * Windows Server 2012 R2 and later - -PowerShell 4 may also be installed on the following platforms, following this procedure: https://social.technet.microsoft.com/wiki/contents/articles/20623.step-by-step-upgrading-the-powershell-version-4-on-2008-r2.aspx - - * Windows Server 2008 R2 - * Windows Server 2012 - -===== Desktop version of Microsoft Windows - -There is no official support of Rudder agent on desktop versions of Microsoft Windows. However, the agent can be installed on the following platform: - - * Windows 7 (you will need to upgrade to PowerShell 4 first, and activate WinRM) - * Windows 8 (you will need to upgrade to PowerShell 4 first, and activate WinRM) - * Windows 8.1 - * Windows 10 - -Plase note that prior to the installation on Windows 7 and 8, you will need to install PowerShell 4 and make sure WinRM is activated with the following command: - ----- - -Set-WSManQuickConfig DSC - ----- - -Moreover, the Windows DSC agent comes without digital signature, you need to allow the unsigned source code execution on the Windows node. -In some environment, this policy change can lead to security issues, please read the https://docs.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_execution_policies?view=powershell-6[Microsoft Windows doc] associated. -This can be done in powershell by executing the following command: - ----- - -Set-ExecutionPolicy RemoteSigned - ----- - -===== Installation procedure - -Download the *exe* file, and run, on your node: - ----- - -rudder-agent-dsc--.exe - ----- - -The installer will ask the IP address or DNS name of the policy server to use. -If a policy server is already configured (for example during upgrade or an unattended installation), you can leave this field empty. - -The installer will install the agent files and create the scheduled tasks to run the agent and the inventory. Rudder does not come as a Windows Service but as a scheduled -PowerShell task, managed by *schtasks.exe*. - -===== Unattended installation - -For an automated unattended installation, you can pre-configure the policy server in the file: - ----- -C:\Program Files\Rudder\etc\policy-server.conf ----- - -Then the installer need to be executed with the following command: - ----- - -rudder-agent-dsc--.exe /S - ----- - -This will install the agent in silent mode. - - -==== Technique editor with DSC - -DSC Generic Methods are shipped with the Rudder dsc plugin. Some are specific for Windows managed systems (like the Registry management), and the others are the DSC version of existing generic methods. - -A filter is available in the Technique Editor to select either all generic methods, generic methods available for classic agent, and generic method available for DSC agent, so that you can choose relevant methods for the type of nodes you need to manage - -image:technique_editor/rudder-technique-editor-filter.png[Technique editor filter] - - -==== DSC Techniques - -Techniques compatible with DSC agent appear, in the Directives and Techniques trees, with a DSC symbol, as shown in the screenshot below. - -Unfortunately, not all Techniques are compatible with DSC agent, as some are deprecated, or some will be completely rewritten, but the coverage is increasing regularly. - - -image:rudder-technique-dsc.png[DSC compatible Techniques] - -==== DSC Agent CLI - -The Rudder agent CLI is available as a Powershell module, by running, in a Powershell terminal - ----- - -rudder agent - ----- - -where action can be one of the following - -* disable: Disable the agent, and prevent its execution - -* enable: Enable the agent - -* info: Show information about the agent and the node (hostname, Rudder ID, policy server, etc) - -* inventory: Generate an inventory, and send it to the server - -* run: Run the agent (see example output below) - -* update: Update agent policy from the Rudder Server - -* version: Show the version of the DSC Rudder agent - - -image:rudder-agent-dsc-cli.png[Example of a Rudder DSC agent output on Windows] - -===== Agent logs - -Rudder logs are visible in the output of the agent. You can get more details about what is done with the -`-Verbose` option: - ----- -rudder agent run -v ----- - -You can also explore all agent logs (including those from unattended runs) in the Windows Event Viewer. -Before Windows plugin version 4.2-1.6 Rudder used the windows system eventlog and was logging in -the *Windows Logs -> Application* view, with the *Rudder* source and the *101* Event ID. - -Since the Windows plugin version 4.2-1.6 Rudder will report in a dedicated windows journal named Rudder and its logs are saved on different verbosity: - -* *classic Rudder reports* will have the *Event ID 101*, they are the reports sent to the server. - -* *Information logs* will have the *Event ID 102* and will only be local logs. - -If you had an old plugin version installed Rudder will not try to install the new journal reference because -it needs a complete reboot of the host system. -See the last note on the Microsoft doc: https://msdn.microsoft.com/en-us/library/2awhba7a%28v=vs.110%29.aspx. - -If you want to change manually the Rudder eventlog use the following process, keep in mind that it will need a machine restart to avoid any reporting issues. -First identify the current eventlog for Rudder by running in the powershell console ----- -[System.Diagnostics.EventLog]::LogNameFromSourceName("Rudder", ".") ----- -If it does not suit you, remove the Rudder source from it and create a new logger for Rudder ----- -Remove-Eventlog -Source "Rudder" -New-Eventlog -Source "Rudder" -LogName "Rudder" ----- - -Then reboot the system. - - -==== Known issues - -On the first run of the Rudder DSC agent CLI in a Powershell terminal, you may have the following error message: - ----- - -Import-LocalizedData : Cannot find the Windows PowerShell data file 'MSFT_ServiceResource.strings.psd1' in directory 'C:\Windows\system32\WindowsPowershell\v1.0\Modules\PSDesiredStateConfiguration\PSProviders\MSFT_ServiceResource\\', or in any parent culture directories. - ----- - -This does not prevent the correct execution of the agent, and next runs in the same terminal will not exhibit the error - -image:rudder-agent-windows-known-issue.png[Error on first DSC agent execution in a terminal] - diff --git a/src/reference/modules/ROOT/pages/85_Plugins/47_node_properties_data_sources.adoc b/src/reference/modules/ROOT/pages/85_Plugins/47_node_properties_data_sources.adoc deleted file mode 100644 index 7060d219..00000000 --- a/src/reference/modules/ROOT/pages/85_Plugins/47_node_properties_data_sources.adoc +++ /dev/null @@ -1,74 +0,0 @@ -[[node-properties-data-sources]] - -=== Node properties data sources - -As explained in the chapter about node management, Nodes have properties that can be -used to create groups or in techniques and directives parameters. -These properties are key/value pairs, with values being a simple -string or a well formed JSON document. - -Rudder 4.1 introduces a new way to automatically import Node properties -by defining data sources. - -The following diagram explains the general working process of data source: - -image:rudder-datasources-description.png[Data source description] - -As displayed, a data source provides a way for Rudder to query (when some -conditions are met) a third party REST API to retrieve interesting -properties for nodes and save them for a given Node property key. - -More precisely, there are three main sets of properties to define (by UI -or via Rudder API) to configure a data source: - -==== First set: data source description - -The first set of properties allows to define an unique identifier for -the data source, which will be used as the property key in node, along -with a human readable name and description. - -==== Second set: query configuration - -The second set of properties allows to define how the third party REST API will -be queried and the returned JSON response processed. - -For now, we only support one query mode which is to do one HTTP query for each -node. In the future, a mode where only one query is done to retrieve -information for all nodes will be added. - -For the query, you will define the HTTP method to use (GET or POST), what is the -remote URL, if there are specific headers or query parameters to add. - -In case a 404 error is returned, the corresponding node property is deleted (on -that node). In case of a timeout or any other HTTP errors, this is considered a -temporary problem, and the node property is left as is. - -When a JSON document is returned, you can define a JSON path expression -(cf https://github.com/jayway/JsonPath/) to select only a sub-part of -the document as the actual data to use as a node property. - -Finally, the resulting data is assigned to the node, using the key name defined -in the data source configuration. - -You can use Rudder variables expansion (`${rudder.node.xxx}`, -`${rudder.parameter.xxx}`, `${node.properties[xxx]}`) in most of these -configuration option: URL, headers, query parameters, JSON path. They will be -replaced by their values for each node at the time the HTTP query is run. - -==== Third set: query triggers - -The last set of options allows to define when the data source should -be queried. - -For now, there are 3 available triggers: - -- a scheduled one, allowing to periodically do the update, -- a trigger on policy generation, which allows to get a refresh of node -properties before possibly using them in techniques or directives, -- a trigger on node acceptation, so that a new node immediately get a -working set of properties (for example to join the correct dynamic groups). - -In addition to these configured triggers, data sources can be interactively -refreshed with a call to a Rudder REST API or via the web interface. - - diff --git a/src/reference/modules/ROOT/pages/85_Plugins/50_node_external_reports.adoc b/src/reference/modules/ROOT/pages/85_Plugins/50_node_external_reports.adoc deleted file mode 100644 index 62e45e7a..00000000 --- a/src/reference/modules/ROOT/pages/85_Plugins/50_node_external_reports.adoc +++ /dev/null @@ -1,118 +0,0 @@ -[[node-external-reports]] - -=== Node external reports - -This plugin allows to add external, static documents in a new tab in node details. - -With the plugin, you configure directories on Rudder server where are located -node specific documents. A new tab is created on node details page that allows to download -documents for the corresponding node. - - -==== Documents naming convention - -Documents must be stored in configured directories with a naming convention that allows -Rudder to find back what document corresponds to a given node. - -For that, the document name must contains the node `UUID` or `hostname` in *lower* case. -The exact naming pattern if defined in the `fileformat` configuration parameter, and the -value `@@node@@` is used to denotate the place where the node `UUID` or `hostname` will be. -Both `UUID` and `hostname` are tested when looking for a matching file for a node. - -==== Plugin configuration - -This plugin is configured with a configuration file. Any modification in the file will be -reloaded immediately without the need to restart Rudder. - - -==== Configuration file location - -The default location for the configuration file is -`/opt/rudder/share/plugins/node-external-reports/node-external-reports.properties`. - -In case you need to change the configuration file location, you need to start Rudder with the JVM -option parameter `-Drudder.plugin.externalNodeInformation.config=/full/path/to/config/file`. - -==== Configuration file format - -Plugin file format is as follow: - ----- -plugin.node-external-reports.reports { - - 01_first_report_type= { - title=title to display in node page - description=a description which go below the title - dirname=/full/path/to/base/directory/for/these/reports - filename="file-name-pattern-for-@@node@@-report.pdf" - content-type=application/pdf - } - - 02_second_report_type = {} - ----- - -Note that quotes are mandatory only when the value contains `@@` (so most likely only for -the `filename` parameter). - -- "01_first_report_type" is a unique key, used internaly (in logs for example) and for - sorting reports display order in node page; -- "title" is the name of section in the external document tab; -- "dirname" is the base directory on the local file system where documents are stored; -- "description" is a description for what the document is about; -- "filename": the file template name to lookup the document name. `@@node@@` will - be replaced by the node `hostname` or `UUID`. -- "content-type": the HTTP content type to use for the new page. It allows to - direct what the browser will do (open a PDF viewer, display - an HTML page, etc). - -For example, if you gather HTML "security" reports, text monitoring one, and PDF compliance -KPI for your nodes, the configuration file will look like: - ----- -plugin.node-external-reports.reports { - - 01_security= { - title=Security Report - description=This report display pen test results - dirname=/var/reports/security - filename="report-@@node@@-sec.html" - content-type=text/html - } - - 02_monitoring { - title=Monitoring Report - description=Monitoring information about the node - dirname=/var/reports/monitoring - filename="monitor-@@node@@.txt" - content-type=text/plain - } - - 03_compliance { - title=Third party compliance report - description=Compliance reports from CMDB - dirname=/var/reports/compliance - filename="compliance-@@node@@.pdf" - content-type=application/pdf - } -} ----- - -And the content of `/var/reports/` will looks like: - ----- -/tmp/reports -├── compliance -│ ├── compliance-node34.china1.bigcorp.com.html -│ │ ..... -│   └── compliance-00000068-55a2-4b97-8529-5154cbb63a18.pdf -├── monitoring -│ ├── monitor-compliance-node34.china1.bigcorp.com.txt -│ │ ..... -│   └── monitor-00000068-55a2-4b97-8529-5154cbb63a18.txt -└── security - ├── report-node34.china1.bigcorp-sec.com.html - │ ..... - └── report-00000068-55a2-4b97-8529-5154cbb63a18-sec.html ----- - diff --git a/src/reference/modules/ROOT/pages/85_Plugins/60_branding.adoc b/src/reference/modules/ROOT/pages/85_Plugins/60_branding.adoc deleted file mode 100644 index b8577798..00000000 --- a/src/reference/modules/ROOT/pages/85_Plugins/60_branding.adoc +++ /dev/null @@ -1,45 +0,0 @@ -[[rudder-branding]] - -=== Rudder Branding - -This plugin allows for changing the apparence of the login page, and the top bar of Rudder, by adding a colored bar with a text. - -Its main usage is to easily differentiate multiple Rudders server, and making sure the changes are made on the right environnement. - -===== Prerequisite - -The Rudder Branding plugin requires Rudder 4.3.3 or higher. - -===== Installing and Upgrading - -The installation and upgrade processes are exactly the same. -Download the *rpkg* file, and run, on the Rudder server: - ----- -/opt/rudder/bin/rudder-pkg install-file rudder-plugin-branding--.rpkg ----- - -It will add a default title bar and login bar, and a new page in Settings/Brandings. - -===== Default apparance - -Upon accessing Rudder, the login page will be changed, with a red line and text "Production", as shown here - -image:branding/login-default.png[Default login message] - -Same for the Rudder top bar - -image:branding/default-main-display.png[Default main display] - -===== Customization - -A new page is available within Rudder, to customize the apparence. This page is in the Settings section, called Branding. It allows for setting the top Bar and the Login page, with a color and a text, and a real-time display of the expected result - -image:branding/Configure.png[Configure apparence] - -The text bar accepts any UTF-8 character or emojis, as displayed in this example - -image:branding/Customised_bar.png[Customized bar] - - - diff --git a/src/reference/modules/ROOT/pages/90_reference/00_references_intro.adoc b/src/reference/modules/ROOT/pages/90_reference/00_references_intro.adoc deleted file mode 100644 index 778b8c99..00000000 --- a/src/reference/modules/ROOT/pages/90_reference/00_references_intro.adoc +++ /dev/null @@ -1,4 +0,0 @@ -== Reference - -This chapter contains the reference Rudder configuration files - diff --git a/src/reference/modules/ROOT/pages/90_reference/05_inventory_workflow.adoc b/src/reference/modules/ROOT/pages/90_reference/05_inventory_workflow.adoc deleted file mode 100644 index c496a9c1..00000000 --- a/src/reference/modules/ROOT/pages/90_reference/05_inventory_workflow.adoc +++ /dev/null @@ -1,108 +0,0 @@ -=== Inventory workflow, from nodes to Root server - -One of the main information workflow in a Rudder managed system is the node's -inventory one. - -Node inventories are generated on nodes, are sent to the node policy server (be -it a Relay or the Root server) up to the Root server, and stored in the -Rudder database (technically an LDAP server), waiting for later use. - -The goal of that section is to detail the different steps and explain how to -spot and solve a problem on the inventory workflow. Following diagram sum up -the whole process. - -image:rudder-inventory-workflow.png[Inventory workflow, from node to -Root server] - - -==== Processing inventories on node - -Inventories are generated daily during an agent run in the 00:00-06:00 time -frame window local to the node. The exact time is randomly spread on the time -frame for a set of nodes, but each node will always keep the same time (modulo -the exact time of the run). - -User can request the generation and upload of inventory with the command: - ----- - -$ rudder agent inventory - ----- - -In details, generating inventory does: - -* ask the node policy server for its UUID with an HTTP GET on - `https://server/uuid`, -* generate an inventory by scanning the node hardware and software components, -* optionally make a digital signature of the generated inventory file, -* send file(s) to the node's policy server on `https://POLICY-SERVER/inventory-updates/` - -The individual commands can be displayed with the `-i` option to `rudder agent -inventory` command. - - -==== Processing inventories on relays - -On the Relay server: - -* the inventory is received by a `webdav` endpoint, -* the `webdav` service store the file in the folder - `/var/rudder/inventories/incoming` -* on each agent runs, files in `/var/rudder/inventories/incoming` are - forwarded to the Relay own policy server. - -==== Processing inventories on root server - -On the Root server, the start of the workflow is the same than on a relay: - -* the inventory is received by a `webdav` endpoint, -* the `webdav` service store the file in the folder - `/var/rudder/inventories/incoming` - -Then, on each run, the agent: - -* look for inventory / signature pairs: -** inventories without a corresponding signature file are processed only if - they are older than 2 minutes, -* POST the inventory or inventory+signature pair to the local API of - "inventory-endpoint" application on `http://localhost:8080/endpoint/upload/` -* the API makes some quick checks on inventory (well formed, mandatory fields...) - and : -** if checks are OK, *ACCEPTS* (HTTP code `200`) the inventory, -** if signature is configured to be mandatory and is missing, or if the - signature is not valid, refuses with *UNAUTHORIZED* error (HTTP code `401`) -** else fails with a *PRECONDITION FAILED* error (HTTP code `412`) -* on error, inventory file is moved to `/var/rudder/inventories/failed`, -* on success: -** the inventory file is moved to `/var/rudder/inventories/received`, -** in parallel, _inventory web_ parses and updates Rudder database. - -==== Queue of inventories waiting to be parsed - -The _inventory endpoint_ has a limited number of slot available for succesfully -uploaded inventories to be queued waiting for parsing. -That number can be configured in file `/opt/rudder/etc/inventory-web.properties`: - ----- - -waiting.inventory.queue.size=50 - ----- - -Since Rudder 3.1.18 / 3.2.11 / 4.0.3, the number of currently waiting -inventories can be obtained via a local REST API call to -`http://localhost:8080/endpoint/api/info`: - ----- - -$ curl http://localhost:8080/endpoint/api/info - -{ - "queueMaxSize": 50, - "queueFillCount": 50, - "queueSaturated": true -} - ----- - diff --git a/src/reference/modules/ROOT/pages/90_reference/10_server_data_workflow.adoc b/src/reference/modules/ROOT/pages/90_reference/10_server_data_workflow.adoc deleted file mode 100644 index 730446a6..00000000 --- a/src/reference/modules/ROOT/pages/90_reference/10_server_data_workflow.adoc +++ /dev/null @@ -1,61 +0,0 @@ -=== Rudder Server data workflow - -To have a better understanding of the Archive feature of Rudder, a description -of the data workflow can be useful. - -All the logic of Rudder Techniques is stored on the filesystem in -+/var/rudder/configuration-repository/techniques+. -The files are under version control, using git. -The tree is organized as following: - -. At the first level, techniques are classified in categories: applications, -fileConfiguration, fileDistribution, jobScheduling, system, systemSettings. -The description of the category is included in +category.xml+. - -. At the second and third level, Technique identifier and version. - -. At the last level, each technique is described with a +metadata.xml+ file and -one or several CFEngine template files (name ending with +.st+). - -[source,python] - -.An extract of Rudder Techniques filesystem tree - ----- - -+-- techniques -| +-- applications -| | +-- apacheServer -| | | +-- 1.0 -| | | +-- apacheServerConfiguration.st -| | | +-- apacheServerInstall.st -| | | +-- metadata.xml -| | +-- aptPackageInstallation -| | | +-- 1.0 -| | | +-- aptPackageInstallation.st -| | | +-- metadata.xml -| | +-- aptPackageManagerSettings -| | | +-- 1.0 -| | | +-- aptPackageManagerSettings.st -| | | +-- metadata.xml -| | +-- category.xml -| | +-- openvpnClient -| | | +-- 1.0 -| | | +-- metadata.xml -| | | +-- openvpnClientConfiguration.st -| | | +-- openvpnInstall.st - ----- - -At Rudder Server startup, or after the user has requested a reload of the -Rudder Techniques, each +metadata.xml+ is mapped in memory, and used to create -the LDAP subtree of Active Techniques. -The LDAP tree contains also a set of subtrees for Node Groups, Rules and Node -Configurations. - -At each change of the Node Configurations, Rudder Server creates CFEngine draft policies -(+Cf3PolicyDraft+) that are stored in memory, and then invokes +cf-clerk+. -+cf-clerk+ finally generates the CFEngine promises for the Nodes. - -image::graphviz/data_workflow.png[Rudder data workflow] - diff --git a/src/reference/modules/ROOT/pages/90_reference/15_config_files_server.adoc b/src/reference/modules/ROOT/pages/90_reference/15_config_files_server.adoc deleted file mode 100644 index 9afb861e..00000000 --- a/src/reference/modules/ROOT/pages/90_reference/15_config_files_server.adoc +++ /dev/null @@ -1,16 +0,0 @@ -=== Configuration files for Rudder Server - -* /opt/rudder/etc/htpasswd-webdav - -* /opt/rudder/etc/inventory-web.properties - -* /opt/rudder/etc/logback.xml - -* /opt/rudder/etc/openldap/slapd.conf - -* /opt/rudder/etc/reportsInfo.xml - -* /opt/rudder/etc/rudder-users.xml - -* /opt/rudder/etc/rudder-web.properties - diff --git a/src/reference/modules/ROOT/pages/90_reference/20_agent_data_workflow.adoc b/src/reference/modules/ROOT/pages/90_reference/20_agent_data_workflow.adoc deleted file mode 100644 index f6f81d4a..00000000 --- a/src/reference/modules/ROOT/pages/90_reference/20_agent_data_workflow.adoc +++ /dev/null @@ -1,72 +0,0 @@ -=== Rudder Agent workflow - -In this chapter, we will have a more detailed view of the Rudder Agent -workflow. What files and processes are created or modified at the installation -of the Rudder Agent? What is happening when a new Node is created? What are the -recurrent tasks performed by the Rudder Agent? How does the Rudder Server handle -the requests coming from the Rudder Agent? The Rudder Agent workflow diagram -summarizes the process that will be described in the next pages. - -image::graphviz/agent_workflow.png[Rudder agent workflow] - -Let's consider the Rudder Agent is installed and configured on the new Node. - -The Rudder Agent is regularly launched and performs following tasks -sequentially, in this order: - -==== Request data from Rudder Server - -The first action of Rudder Agent is to fetch the +tools+ directory from Rudder -Server. This directory is located at +/opt/rudder/share/tools+ on the Rudder -Server and at +/var/rudder/tools+ on the Node. If this directory is already -present, only changes will be updated. - -The agent then try to fetch new Applied Policies from Rudder Server. Only -requests from valid Nodes will be accepted. At first run and until the Node has -been validated in Rudder, this step fails. - -==== Launch processes - -Ensure that the CFEngine community daemons +cf-execd+ and +cf-serverd+ are -running. Try to start these daemons if they are not already started. - -Daily between 5:00 and 5:05, relaunch the CFEngine Community daemons +cf-execd+ -and +cf-serverd+. - -Add a line in +/etc/crontab+ to launch +cf-execd+ if it's not running. - -Ensure again that the CFEngine community daemons +cf-execd+ and +cf-serverd+ -are running. Try to start these daemons if they are not already started. - -==== Identify Rudder Root Server - -Ensure the +curl+ package is installed. Install the package if it's not -present. - -Get the identifier of the Rudder Root Server, necessary to generate reports. -The URL of the identifier is http://Rudder_root_server/uuid - - -==== Inventory - -If no inventory has been sent since 8 hours, or if a forced inventory has been -requested (class +force_inventory+ is defined), do and send an inventory to the -server. ----- - -rudder agent inventory - ----- - -No reports are generated until the Node has been validated in Rudder Server. - -==== Syslog - -After validation of the Node, the system log service of the Node is configured -to send reports regularly to the server. Supported system log providers are: -+syslogd+, +rsyslogd+ and +syslog-ng+. - -==== Apply Directives - -Apply other policies and write reports locally. - diff --git a/src/reference/modules/ROOT/pages/90_reference/25_config_files_agent.adoc b/src/reference/modules/ROOT/pages/90_reference/25_config_files_agent.adoc deleted file mode 100644 index 5c4c3c73..00000000 --- a/src/reference/modules/ROOT/pages/90_reference/25_config_files_agent.adoc +++ /dev/null @@ -1,4 +0,0 @@ -=== Configuration files for a Node - -* /etc/default/rudder-agent - diff --git a/src/reference/modules/ROOT/pages/90_reference/39_server_install.adoc b/src/reference/modules/ROOT/pages/90_reference/39_server_install.adoc deleted file mode 100644 index 68c3b4cc..00000000 --- a/src/reference/modules/ROOT/pages/90_reference/39_server_install.adoc +++ /dev/null @@ -1,145 +0,0 @@ -=== Packages organization - -==== Packages - -Rudder components are distributed as a set of packages. - -image::graphviz/packages.png[Rudder packages and their dependencies] - -+rudder-webapp+:: - -Package for the Rudder Web Application. It is the graphical interface for -Rudder. - -+rudder-inventory-endpoint+:: - -Package for the inventory reception service. It has no graphical interface. This -service is using HTTP as transport protocol. It receives an parses the files -sent by FusionInventory and insert the valuable data into the LDAP database. - -+rudder-jetty+:: - -Application server for +rudder-webapp+ and +rudder-inventory-endpoint+. Both -packages are written in 'Scala'. At compilation time, they are converted into -+.war+ files. They need to be run in an application server. 'Jetty' is this -application server. It depends on a compatible Java 7 Runtime Environment. - -+rudder-techniquess+:: - -Package for the Techniques. They are installed in -+/opt/rudder/share/techniques+. At runtime, the Techniques are -copied into a 'git' repository in +/var/rudder/configuration-repository+. Therefore, the package depends -on the +git+ package. - -+rudder-inventory-ldap+:: - -Package for the database containing the inventory and configuration information -for each pending and validated Node. This 'LDAP' database is build upon -'OpenLDAP' server. The 'OpenLDAP' engine is contained in the package. - -+rudder-reports+:: - -Package for the database containing the logs sent by each Node and the reports -computed by Rudder. This is a 'PostgreSQL' database using the 'PostgreSQL' -engine of the distribution. The package has a dependency on the +postgresl+ -package, creates the database named +rudder+ and installs the inialisation -scripts for that database in +/opt/rudder/etc/postgresql/*.sql+. - -+rudder-server-root+:: - -Package to ease installation of all Rudder services. This package depends on -all above packages. It also - -- installs the Rudder configuration script: - ----- - -/opt/rudder/bin/rudder-init - ----- - -- installs the initial promises for the Root Server in: - ----- - -/opt/rudder/share/initial-promises/ - ----- - -- installs the init scripts (and associated +default+ file): - ----- - -/etc/init.d/rudder - ----- - -- installs the logrotate configuration: - ----- - -/etc/logrotate.d/rudder-server-root - ----- - -+rudder-agent+:: - -One single package integrates everything needed for the Rudder Agent. It -contains CFEngine Commmunity, FusionInventory, and the initial promises for a -Node. It also contains an init script: - ----- - -/etc/init.d/rudder - ----- - -The +rudder-agent+ package depends on a few libraries and utilities: - -* +OpenSSL+ -* +libpcre+ -* +liblmdb+ (On platforms where it is available as a package - on others the rudder-agent package bundles it) -* +uuidgen+ - -==== Software dependencies and third party components - -The Rudder Web application requires the installation of 'Apache 2 httpd', -'JRE 7+', and 'cURL'; the LDAP Inventory service needs 'rsyslog' and -the report service requires 'PostgreSQL'. - -When available, packages from your distribution are used. These packages are: - -Apache:: - -The Apache Web server is used as a proxy to give HTTP access to the Web -Application. It is also used to give writable WebDAV access for the inventory. -The Nodes send their inventory to the WebDAV service, the inventory is stored in -+/var/rudder/inventories/incoming+. - -PostgreSQL:: - -The PostgreSQL database is used to store logs sent by the Nodes and -reports generated by Rudder. Rudder 4.0 is tested for PostgreSQL 9.2 and higher. It still works with version 8.4 to 9.1, but not warranties are made that it will hold in the future. It is really recommanded to migrate to PostgreSQL 9.2 at least. - -rsyslog and rsyslog-pgsql:: - -The rsyslog server is receiving the logs from the nodes and insert them into a -PostgreSQL database. On SLES, the +rsyslog-pgsql+ package is not part of the -distribution, it can be downloaded alongside Rudder packages. - -Java 7+ JRE:: - -The Java runtime is needed by the Jetty application server. Where possible, the -package from the distribution is used, else a Java RE must be downloaded -from Oracle's website (http://www.java.com). - -curl:: - -This package is used to send inventory files from -+/var/rudder/inventories/incoming+ to the Rudder Endpoint. - -git:: - -The running Techniques Library is maintained as a git repository in -+/var/rudder/configuration-repository/techniques+. - diff --git a/src/reference/modules/ROOT/pages/90_reference/40_build_agent.adoc b/src/reference/modules/ROOT/pages/90_reference/40_build_agent.adoc deleted file mode 100644 index a008cafa..00000000 --- a/src/reference/modules/ROOT/pages/90_reference/40_build_agent.adoc +++ /dev/null @@ -1,93 +0,0 @@ -[[_building_the_rudder_agent]] -=== Building the Rudder Agent - -==== Get source - -Make sure you have network access and the git command. - -Go to your build directory and checkout rudder-packages - ----- - -cd /usr/src -git clone https://github.com/Normation/rudder-packages.git -cd rudder-packages - ----- - -Choose the branch to build - ----- - -# For branch 4.1 (branches before 4.1 are not supported) -git checkout branches/rudder/4.1 -cd rudder-agent - ----- - -Now choose one of the 3 next chapter, depending on your case: dpkg (debian-like package), rpm (redhat-like package) or other. - -==== Build a dpkg package - -Set the version to build: - -* Update the debian/changelog file to make the first entry match the version you want to build. -* Edit the SOURCES/Makefile file and set the value of RUDDER_VERSION_TO_PACKAGE: see http://www.rudder-project.org/archives/ for a complete list of available versions. - -Run the dpkg package builder: - ----- - -dpkg-buildpackage - ----- - -The package will be stored in the parent directory. - -==== Build an rpm package - -Set the version to build: - -* Edit the SOURCES/Makefile file and set the value of RUDDER_VERSION_TO_PACKAGE: see http://www.rudder-project.org/archives/ for a complete list of available versions. - -Run the rpm package builder: - ----- - -# make sure you are in in rudder-agent, then -ln -s `pwd` /root/rpmbuild -rpmbuild -ba --define 'real_version 4.1.0' SPECS/*.spec - ----- - -The package will be stored in RPMS/ - -==== Build an agent locally - -Before building the agent, you must decide on some environment variables: - - -* RUDDER_VERSION_TO_PACKAGE: the version of the sources that will be used, see http://www.rudder-project.org/archives/ for a complete list. If a 'rudder-sources' directory exists in SOURCES it will be used instead of downloading sources. The Variable still needs to be defined though. - -* DESTDIR: where to put the installation, use / to install on the system and leave the default of ./target to prepare a package. - -* USE_SYSTEM_OPENSSL: (default true), use system openssl (depends on libssl-dev) or build it with the agent. - -* USE_SYSTEM_LMDB: (default false), use system lmdb (depends on liblmdb-dev) or build it with the agent. - -* USE_SYSTEM_PCRE: (default true), use system pcre (depends on libpcre3-dev) or build it with the agent. - -* USE_SYSTEM_PERL: (default false), use system perl (depends on perl) or build it with the agent. - -* USE_SYSTEM_FUSION: (default false), use system fusion (depends on fusioninventory-agent), or build it with the agent. We advise you to use the Rudder version since it contains some patches. - ----- - -# example -env="RUDDER_VERSION_TO_PACKAGE=4.1.0 DESTDIR=/ USE_SYSTEM_PERL=true" -make $env -make install $env - ----- - - diff --git a/src/reference/modules/ROOT/pages/90_reference/50_generic_methods.adoc b/src/reference/modules/ROOT/pages/90_reference/50_generic_methods.adoc deleted file mode 100644 index 53f370e7..00000000 --- a/src/reference/modules/ROOT/pages/90_reference/50_generic_methods.adoc +++ /dev/null @@ -1,6 +0,0 @@ -=== Generic methods - -This section documents all the generic methods available in the xref:23_configuration_management/41_technique_editor.adoc#technique-editor[Technique Editor]. - -include::{partialsdir}/dyn/generic_methods.adoc[] - diff --git a/src/reference/modules/ROOT/pages/90_reference/60_man_pages.adoc b/src/reference/modules/ROOT/pages/90_reference/60_man_pages.adoc deleted file mode 100644 index f3dc5618..00000000 --- a/src/reference/modules/ROOT/pages/90_reference/60_man_pages.adoc +++ /dev/null @@ -1,6 +0,0 @@ -// Man page - -=== Man pages - -include::{partialsdir}/dyn/rudder.adoc[leveloffset=+3] - diff --git a/src/reference/modules/ROOT/pages/90_reference/71_reports.adoc b/src/reference/modules/ROOT/pages/90_reference/71_reports.adoc deleted file mode 100644 index 5fbbfe01..00000000 --- a/src/reference/modules/ROOT/pages/90_reference/71_reports.adoc +++ /dev/null @@ -1,147 +0,0 @@ -=== Reports reference - -This page describes the concept behind the reporting in Rudder, and specifically how to write the Techniques to get proper reporting in Rudder - -==== Concepts - -Each Technique, when converted into a Directive and applied to a Node, must generate reports for Rudder to get proper compliance reports. This reports must contains specific information : - - * The Report type, that can be logs for information purpose or result to express a compliance - * The Rule Id (autogenerated) - * The Directive Id (autogenerated) - * The Version Id (revision of the Rule) (autogenerated) - * The name of the component the report is related to - * The value of the key variable in the component (or None if not available) - * The Execution Timestamp, to know in which execution of the agent the promise has been generated - -These reports are sent via Syslog to the Rudder Root Server, parsed and put in a database, that is queried to generate the reporting - -==== Report format - -A report has the following format : - ----- - -@@Technique@@Type@@RuleId@@DirectiveId@@VersionId@@Component@@Key@@ExecutionTimeStamp##NodeId@#HumanReadableMessage - ----- - - * Technique : Human readable Technique name - * Type : type of report (see bellow) - * RuleId : The Id of the Configuration Rule, autogenerated - * DirectiveId : The Id of the Directive, autogenerated - * VersionId : the revision of the ConfigurationRule, autogenerated - * Component : the name of the component this Directive is related to (if no component are defined in the metadata.xml, then the Technique name is used) - * Key : the value of the reference variable. If there is no reference variable, then the value None should be used - * ExecutionTimeStamp : the timestamp of the current CFEngine execution - * NodeId : the id of the node - * HumanReadableMessage : a message than a Human can understand - -===== Valid report types - -[cols="1,1,1,1,4", options="header"] -.Report Types -|=== -| Name -| Type -| Mode -| Max number -| Details - -| log_trace -| log -| any -| infinity -| Should be used for advanced debuging purpose only. - -| log_debug -| log -| any -| infinity -| Should be used for debug purpose only. - -| log_info -| log -| any -| infinity -| Use for standard logging purposes. - -| log_warn -| log -| any -| infinity -| Used for logging only for the moment. Should be used when something unexpected happens. - -| log_repaired -| log -| enforce -| infinity -| Used for logging purposes, to list all that is repaired by the promises. - -| result_na -| result -| enforce -| one per component/key -| Defines the status of the Component to Not Applicable (if there are no result_success, result_repaired, result_error). Should be used only when the component is not applicable because it does not match the target context. - -| result_success -| result -| enforce -| one per component/key -| Defines the status of the Component to Success (if there are no result_repaired or result_error). Should be used only when everything is already in the correct state in this component for this key. - -| result_repaired -| result -| enforce -| one per component/key -| Defines the status of the Component to Repaired (if there are no result_error). Should be used only when something was not in the correct state, but could be corrected. - -| result_error -| result -| enforce -| infinity per component/key -| Defines the status of the Component to Error. Should be used when something was not in the correct state, and could not be corrected. - -| audit_na -| result -| audit -| one per component/key -| Defines the status of an Component to Not Applicable (if there are no result_success, result_repaired, result_error). Should be used only when the component is not applicable because it does not match the target context. - -| audit_compliant -nent was not applicable to the node. -| result -| audit -| one per component/key -| Defines the status of the Component to Compliant (if there are no audit_noncompliant or audit_error). Should be used only when everything is already in the correct state in this component for this key. - -| audit_noncompliant -| result -| audit -| one per component/key -| Defines the status of the Component to Non Compliant (if there are no audit_error). Should be used only when something was not in the correct state. - -| audit_error -| result -| audit -| infinity per component/key -| Defines the status of the Component to Error. Should be used when the audit could not be done or was interrupted. - -|=== - -Variables used to generate the reports - -Some facilities have been created to help putting the right values at the right place - - * `&TRACKINGKEY&`: this is an auto generated variable, put in the technique file, that Rudder will replace when writing the promises by - ----- - -
RuleId@@DirectiveId@@VersionId
-
-----
-
-   * `$(g.execRun)`: this is replaced at runtime by CFEngine 3 to the current execution time
-   * `$(g.uuid)`: this is replaced at runtime by CFEngine 3 to the Node Id
-
-
diff --git a/src/reference/modules/ROOT/pages/90_reference/72_techniques_templating.adoc b/src/reference/modules/ROOT/pages/90_reference/72_techniques_templating.adoc
deleted file mode 100644
index e59b87c7..00000000
--- a/src/reference/modules/ROOT/pages/90_reference/72_techniques_templating.adoc
+++ /dev/null
@@ -1,89 +0,0 @@
-=== Syntax of the Techniques
-
-==== Generalities
-
-The Techniques use the http://www.stringtemplate.org/[StringTemplate] engine. A Technique *must* have the .st extension to be extended by Rudder (have some variables replaced, some part removed or added given some parameters).
-
-==== Variable remplacement
-
-Note : Rudder use a StringTemplate grammar slighlty different from the default one. Rather than using "$" as a variable identifier, the Techniques use "&" to avoid collision with the CFEngine variables
-
-===== Single-valued variable remplacement
-
-----
-
-&UUID&
-
-----
-
-   * Will be remplaced by the value of the variable UUID
-
-===== Remplacement of variable with one or more values
-
-----
-
-&DNS_RESOLVERS: { "&it&" };separator=", "&
-
-----
-
-   * Will be remplaced by `"8.8.8.8", "8.8.4.4"`
-   * Here, `&it&` is an alias for the current item in the list (with no confusion, because there is only one variable)
-
-----
-
-&POLICYCHILDREN, CHILDRENID : {host, uuid |
-"/var/rudder/share/&uuid&/"
-maproot => { host2ip("&host&"), escape("&host&") },
-admit => { host2ip("&host&"), escape("&host&") };
-   
-} &
-
-----
-
-   * `host` is an alias for the current value of POLICYCHILDREN
-   * `uuid` is an alias for the current value of CHILDRENID
-   * Both item are iterated at the same time, so both list must have the same length
-
-===== Remplacement of variable with one or more value, and writing an index all along
-
-----
-
-&FILE_AND_FOLDER_MANAGEMENT_PATH:{path |"file[&i&][path]" string => "&path&";
-}&
-
-----
-
-   * _i_ is an iterator, starting at 1
-
-The result would be:
-
-----
-
-"file[1][path]" string => "/var";
-"file[2][path]" string => "/bin";
-
-----
-
-===== Conditionnal writing of a section
-
-----
-
-&if(INITIAL)&
-
-something
-
-&endif&
-
-----
-
-The variable must either be:
-
-   * A boolean: If its value is true, then the section will be displayed
-   * A variable with the parameter `MAYBEEMPTY="true"`: If the value is not set, then the section won't be displayed, otherwise it will be displayed
-
-More information can be found here: https://theantlrguy.atlassian.net/wiki/display/ST/ST+condensed+--+Templates+and+expressions
-
-===== Unique identifier of Directive for Techniques with separated policy generation
-
-As of Rudder 4.3, Techniques with separated policy generation (see tag POLICYGENERATION in metadata.xml) need to have a way to identify uniquely their generated files, and bundles and methods. The special placeholder RudderUniqueID is replaced at generation by the identifier of the Directive. It can be used anywhere in the .st files, or even in the OUTPATH. 
-
diff --git a/src/reference/modules/ROOT/pages/90_reference/73_techniques_best_practices.adoc b/src/reference/modules/ROOT/pages/90_reference/73_techniques_best_practices.adoc
deleted file mode 100644
index cf93052e..00000000
--- a/src/reference/modules/ROOT/pages/90_reference/73_techniques_best_practices.adoc
+++ /dev/null
@@ -1,135 +0,0 @@
-=== Best Practices for Techniques
-
-==== Naming convention
-
-   * The name of bundle and classes should be written with underscore (i.e: this_is_a_good_example) instead of CamelCase (i.e: ThisIsABadExample)
-   * All variable, class and bundle names should be prefixed by "rudder_"
-   * The bundle entry point for the Technique should be named rudder_
-   * The bundles which makes all the actions should be suffixed by a meaningful name ( "rudder__installation", "rudder__configuration", "rudder__reporting", ..). This rule applies even if there is only one bundle
-   * The prefix of classes should all be "rudder__"
-   * The classes defined as an outcome should be named:
-
-      * `rudder___kept`
-      * `rudder___repaired`
-      * `rudder___failed`
-      * `rudder___denied`
-      * `rudder___timeout`
-      * `rudder___error` (error include failed, denied and timeout)
-
-   * The name of the bodies written in the Rudder Library should be prefixed: `rudder_common_`
-
-==== Raising classes
-
-   * `rudder___error` should be raised simultaneously as `rudder___failed`, `rudder___denied` or `rudder___timeout`.
-   * The body *rudder_common_classes* automatically abide by this rule
-
-==== Writing convention
-
-===== Technique naming guidelines
-
-The following rules should be followed when naming a new Technique:
-
-   * Try to keep names as short as possible, to improve readability
-   * Read the existing technique list, and particularly techniques related to what you are writing. The new names should be consistent with existing ones.
-   * The name should be a nominal group, use "File content" and "Service state" but never "Manage file content" or "Set Service state". It describes the target of the action, not the action itself.
-   * The name should look like: General Concept (package, file, etc.) + Source (from file, etc.) + Implementation details (platform, software name, etc.)
-     * Package sources (Zypper)
-     * HTTP server (Apache)
-     * Variable from local file (string)
-   * The general idea is to go from the most general information to the most precise.
-   * Use "directory" and never "folder"
-   * Use "settings" and never "configuration"
-   * Use *sentence case*, only the first word is capitalised, like in a normal sentence ("Variable from local file" and not "Variable from Local File").
-
-===== In the Technique
-
-   * We try to follow CFEngine conventions but with some exceptions like using brackets "{}" instead of parenthesis "()"
-   * When defining bundles or bodies, the opening bracket should be on a dedicated line. Exemple:
-
-----
-
-bundle common control
-{
-  bundlesequence => { "exemple" };
-}
-
-----
-
-   * Indentation should be made by spaces. A incrementation of indentation is equal to two spaces
-   * The promise type should be indented by two spaces (instead of being at the same indentation level than the bundle name)
-   * The class expression should be indented by four spaces (two spaces after the promise type)
-   * The promiser should be indented by six spaces (two spaces after the class expression or four spaces after the promise type if no class expression is defined)
-   * Attributes of promises should be indented by eight spaces (two spaces after the promiser) and it should be only one attribute by line.
-   * Attribute's arrows '=>' should all be at the same level, one character after the largest attribute name
-
-----
-
-bundle agent example
-{
-  type:
-      "promiser"
-        attribute  => "value1";
-
-    class::
-      "promiser2"
-        attribute2 => "value2";
-}
-
-----
-
-   * Attributes of promise type "vars" and "classes" should be on only one line except if there are more than one attribute.
-   * For promise type "vars" and "classes" on one line, attribute names and the arrows should be aligned
-   * A list should be written multilines if it needs more than 80 characters in one line
-   * Multilines list should have comma after each element, except the last one.
-   * Multilines list should begin with only a bracket "{"
-
-----
-
-    vars:
-        "value" slist =>
-          {
-            "one",
-            "two",
-            "three"
-          };
-
-----
-
-   * The name of the variable in argument of the bundle should be named "params"
-   * The call of the variables should be made with by using brackets `${var_correctly_called}` instead of parenthesis `$(var_wrongly_called)`
-   * Alternance of brackets and parenthesis are tolerated when lots of variables are imbricated for more readability: `${var_lv1[$(var_lvl2[${var_lvl3}])]}`
-   * A Technique should have its bundle wrote with parameters
-   * All the bundles should have as first argument "prefix" which contains the prefix to use for all the classes made from an outcome. This prefix should never be hardcoded in the bundle.
-   * Always write comments with # when a promise needs more than 30 seconds of thought.
-   * If classes should be created in order to iterate for make a workaround of the normal ordering (i.e: "iteration_1", "iteration_2", "iteration_3"), they should always be defined at the end of the promise type "classes".
-   * The order to the promise type must always be in the order of the normal ordering : https://docs.cfengine.com/docs/3.10/reference-language-concepts-normal-ordering.html
-   * StringTemplate variables should always be written in UPPERCASE
-   * StringTemplate variables should be written with underscore
-   * StringTemplate variables should always be prefixed by the Technique name in uppecase too. i.e: `CHECK_GENERIC_FILE_FILE_NAME`
-
-===== In the metadata.xml
-
-   * Name of sections should always be written in literary English (no CamelCase or underscores).
-   * The value of variable "Don't change" should always be "dontchange" or "" if the easier.
-
-==== Files convention
-
-   * File names in a Technique should not be prefixed by the name of the Technique
-   * When a Technique needs specific bodies, the bodies should be written in a bodies.st file
-   * The file containing the bundle which makes all the actions (and containing the bundle "run") should be named "main.cf"
-   * The file containing all the variables and calling the bundle "run" should be name config.st
-   * Initialization of a new Technique should always be made from the file "technique-metadata-sample.xml" which is present on the root of the "rudder-techniques" repository
-   * Rudder standard library should be located in "common" Technique
-
-==== Maintenance
-
-   * These rules were introduced after the 2.5 release of Rudder and before the 2.6 release. Therefore, they were enforced as of rudder-techniques-2.6.*.
-   * Always follow the conventions above when Techniques are updated but only for the lines edited. This rule concerns the Techniques on all the branches of git.
-   * On any branches that have released versions on them, we only allow minimal modifications. No lines should be modified if not to fix a bug (respecting these best practices is not currently considered a bug).
-
-==== Testing
-
-   * There is a test suite in scripts/check-techniques.sh that check metadata.xml and normal ordering in code
-   * The list of all maintained techniques (techniques and versions) is in maintained-techniques file, and should be updated when new techniques or versions are created.
-
-
diff --git a/src/reference/modules/ROOT/pages/90_reference/74_package_format.adoc b/src/reference/modules/ROOT/pages/90_reference/74_package_format.adoc
deleted file mode 100644
index b8726363..00000000
--- a/src/reference/modules/ROOT/pages/90_reference/74_package_format.adoc
+++ /dev/null
@@ -1,96 +0,0 @@
-=== Package format
-
-Rudder has a specific package format for plugins.
-
-You can manage Rudder packages with the rudder-pkg command. This is the documentation of how they are created.
-
-==== File description
-
-A Rudder package file ends with the `.rpkg` extension.
-
-A Rudder package file is an archive file and can be managed with the 'ar' command.
-
-The archive contains:
-
-* A metadata file in JSON format named medatata
-* A tarball file in txz format name scripts.txz that contains package setup utility scripts
-* One or more tarball files in txz format that contain the package files
-
-The metadata file is a JSON file and is named 'metadata':
-
-----
-
-{
-  # the only currently supported type in "plugin" (mandatory)
-  "type": "plugin",
-  # the package name must consist of ascii characters without whitespace (mandatory)
-  "name": "myplugin",
-  # the package version has the form "rudder_major-version_major.version_minor" for a plugin (mandatory)
-  "version": "4.1-1.0",
-  # these are is purely informative (optional)
-  "build-date": "2017-02-22T13:58:23Z",
-  "build-commit": "34aea1077f34e5abdaf88eb3455352aa4559ba8b",
-  # the list of jar files to enable if this is a webapp plugin (optional)
-  "jar-files": [ "test.jar" ],
-  # the list of packages or other plugins that this package depends on (optional)
-  # this is currently only informative
-  "depends": {
-    # dependency on a specific binary that must be in the PATH
-    "binary": [ "zip" ]
-    # dependencies on dpkg based systems
-    "dpkg": [ "apache2" ],
-    "rpm": [ ],
-    # dependency specific to debian-8
-    "debian-8": [ ],
-    "sles-11": [ ],
-    # rudder dependency, ie this is a Rudder format package
-    "rudder": [ "new-plugin" ]
-  },
-  # the plugin content (mandatory)
-  "content": {
-    # this will put the content of the extracted files.txz into /opt/rudder/share
-    "files.txz": "/opt/rudder/share",
-    "var_rudder.txz": "/var/rudder"
-  }
-}
-
-----
-
-To see a package metadata file use:
-
-----
-
-ar p package.rpkg medatada
-
-----
-
-The scripts.txz is a tarball that can contain zero or more executable files named:
-
-* preinst that will be run before installing the package files 
-* postinst that will be run after installing the package files
-* prerm that will be run before removing the package files
-* postrm that will be run after removing the package files
-
-preinst and postinst take one parameter that can be 'install' or 'upgrade'. The value 'upgrade' is used when a previous version of the package is already installed.
-
-To create the scripts.txz file use:
-
-----
-
-tar cvfJ scripts.txz preinst postinst prerm postrm
-
-----
-
-To create a Rudder package file use the ar command:
-
-----
-
-ar r mypackage-4.1-3.0.rpkg medatada scripts.txz files.txz
-
-----
-
-Note that ar r inserts or replaces files so you can create your package with incremental inserts.
-
-To extract files, 'use ar x' instead.
-
-
diff --git a/src/reference/modules/ROOT/pages/90_reference/80_relay_api.adoc b/src/reference/modules/ROOT/pages/90_reference/80_relay_api.adoc
deleted file mode 100644
index dfeacc4d..00000000
--- a/src/reference/modules/ROOT/pages/90_reference/80_relay_api.adoc
+++ /dev/null
@@ -1,103 +0,0 @@
-=== Rudder relay API
-
-The `rudder-server-relay` package provides an HTTP API.
-It is available on simple relays and root servers.
-It is an internal API, not exposed to users, and used
-to provide various Rudder features.
-
-==== Remote Run
-
-The remote run API is available at `https://relay/rudder/relay-api/remote-run`.
-It allows triggering a run on nodes (like with the `rudder remote run` command).
-
-===== Description
-
-The remote run API applies to all nodes that are below the target relay server, which means:
-
-* All nodes directly connected to the server
-* All the relays that have the target node as policy server, and all nodes that are below them
-
-In particular, it does not act on the target relay itself (except for the root server which is its own policy server).
-
-There are different methods, whether you want to trigger all nodes, or only a part of them.
-
-===== Security
-
-The remote run calls are not authenticated, but restricted to:
-
-* Local calls on the relay
-* The relay's policy server
-
-They requires allowing the policy server to connect to its nodes on port 5309.
-
-===== Usage
-
-This API provides the following methods:
-
-* *POST* `/rudder/relay-api/remote-run/all`: Trigger a run on all nodes below the target relay.
-* *POST* `/rudder/relay-api/remote-run/nodes/`__: Trigger a run on the _node-id_ node (which must be under the target relay).
-* *POST* `/rudder/relay-api/remote-run/nodes` Trigger a run on the given nodes (which must be under the target relay), see the `nodes` parameter below.
-
-The general parameters are:
-
-* `keep_output` = *true* or *false*: Should the agent output be returned (default: *false*)
-* `asynchronous` = *true* or *false*: Should the server return immediately after trigerring the agent or wait for remote runs to end (default: *false*)
-* `classes` = *class* or *class1,class2,etc.* for multiple classes: Classes to pass to the agent (default: none)
-
-And, only for the `/rudder/relay-api/remote-run/nodes` call:
-
-* `nodes` = *node_uuid* or *node_uuid1,node_uuid2,etc.* for multiple nodes: Nodes to trigger (default: none)
-
-==== Shared Files
-
-===== Description
-
-The goal of this API is to share a file from node to node. The source nodes uses an API call to send the file,
-and the destination node will get the file using the same protocol as files shared from the policy server.
-
-The relay that receives an API call to share a file (*PUT*) will:
-
-* share the file directly if the target nodes is one of its managed nodes.
-* send the file to a sub-relay if the target is somewhere under it
-* forward the file to its policy server if the target is nowhere under it
-
-The relay that receive a *HEAD* call will:
-
-* If the file exists, compare the provided hash with the hash of the stored file, and return the result (*true* or *false*).
-* If the file does not exist, return *false*.
-
-The ttl is stored along with the file, and a clean task will regularly run and check for outdated files to remove.
-
-There are ncf generic method that allow easy access to those methods from the nodes.
-
-===== Security
-
-This call is open to all nodes in the allowed networks of the target relay.
-The sent files are signed with the node's key, and the signature is checked before being traited.
-
-===== Usage
-
-This API provides the following methods:
-
-* *PUT* `/shared-files/` __ `/` __ `/` __
-* *HEAD* `/shared-files/` __ `/` __ `/` __ `?hash=` _file-hash_
-
-The common URL parameters are:
-
-* `target-uuid` = *destination_node_uuid*:  where to send the file to
-* `source-uuid` = *source_node_uuid*: who sent the file
-* `file-id` = *my_file_id*: under which name to store the file, this needs to be unique
-
-The URL parameters specific to the *HEAD* call are:
-
-* `file-hash` = *value of the hash*: hash of the shared file
-
-The following are only needed for the *PUT* call:
-
-* `hash_value` = *value of the hash*: hash of the shared file
-* `algorithm` = *sha1*, *sha256* or *sha512*: algorithm used to hash the file
-* `digest` = **: signature of the file
-* `pubkey` = **: public key
-* `ttl` = **: can be a number of second or a string of the long form "1day 2hours 3minute 4seconds" or abbreviated in the form "5h 3s"
-* `header` = *rudder-signature-v1*: signing format (for now, only one possible value)
-
diff --git a/src/reference/modules/ROOT/pages/00_introduction/40_architecture_and_dependencies.adoc b/src/reference/modules/ROOT/pages/architecture_and_dependencies.adoc
similarity index 87%
rename from src/reference/modules/ROOT/pages/00_introduction/40_architecture_and_dependencies.adoc
rename to src/reference/modules/ROOT/pages/architecture_and_dependencies.adoc
index 85d54620..73d8a590 100644
--- a/src/reference/modules/ROOT/pages/00_introduction/40_architecture_and_dependencies.adoc
+++ b/src/reference/modules/ROOT/pages/architecture_and_dependencies.adoc
@@ -1,9 +1,9 @@
 [[architecture]]
-=== Technical architecture and software dependencies
+= Technical architecture and software dependencies
 
-==== Functional architecture of Rudder
+== Functional architecture of Rudder
 
-Rudder contains several functionaly independant components, illustrated in the diagram below:
+Rudder contains several functionally independent components, illustrated in the diagram below:
 
 * Inventory database
 * Configuration policies database
@@ -15,7 +15,7 @@ Rudder contains several functionaly independant components, illustrated in the d
 
 image::introduction/rudder_functional_component_diagram-simple-v1.png[Rudder functional architecture]
 
-==== Network architecture in client/server mode
+== Network architecture in client/server mode
 
 The Rudder server listens for incoming connections from the agents installed on the nodes to manage,
 and communicates with them. The connection frequency from nodes to server is configurable, from several
@@ -29,7 +29,7 @@ You can see that relay server allow separating some network areas (for example a
 a specific datacenter or remote site) using a local server for each area to distribute
 configuration policies and centralize agent reports and inventories.
 
-==== Agents
+== Agents
 
 Agents can be installed using a simple software package (`.exe`, `.deb` or `.rpm`).
 
@@ -42,5 +42,3 @@ like CentOS and Scientific Linux, Debian, Ubuntu, SUSE Linux Enterprise, etc.) f
 but also for older unsupported ones, Windows for desktops and servers (Server 2008 R2 or newer) and
 AIX (5.3 or newer). Experimental builds for Solaris, FreeBSD, Android and Mac OS X also exist,
 as well as a version for ARM architecture.
-
-
diff --git a/src/reference/modules/ROOT/pages/index.adoc b/src/reference/modules/ROOT/pages/index.adoc
index 93ab7718..803f28ac 100644
--- a/src/reference/modules/ROOT/pages/index.adoc
+++ b/src/reference/modules/ROOT/pages/index.adoc
@@ -1 +1,88 @@
-= Rudder reference documentation
\ No newline at end of file
+[[what-is-rudder]]
+= What is Rudder?
+
+image::big-rudder-logo.png["Rudder logo", align="center"]
+
+Rudder is an easy to use, web-driven, role-based solution for IT Infrastructure
+Automation and Compliance. With a focus on continuously checking configurations
+and centralising real-time status data, Rudder can show a high-level summary
+(_ISO 27001 rules are at 100%!_) and break down noncompliance issues to a deep
+technical level (_Host prod-web-03: SSH server configuration allows root login_).
+
+A few things that make Rudder stand out:
+
+* A *simple framework* allows you to *extend the built-in rules* to implement
+  specific low-level configuration patterns, however complex they may be, using
+  simple building blocks (_ensure package installed in version X_, _ensure file content_,
+  _ensure line in file_, etc.). A graphical builder lowers the technical level required to use this.
+* Each policy can be independently set to be automatically *checked or enforced*
+  on a policy or host level. In Enforce mode, each remediation action is recorded,
+  showing the value of these invisible fixes.
+* Rudder works on almost *every kind of device*, so you’ll be managing physical
+  and virtual servers in the data center, cloud instances, and embedded IoT devices
+  in the same way.
+* Rudder is designed for *critical environments* where a *security* breach can mean
+  more than a blip in the sales stats. Built-in features include change requests,
+  audit logs, and strong authentication.
+* Rudder relies on an agent that needs to be installed on all hosts to audit.
+  The *agent is very lightweight* (10 to 20 MB of RAM at peak) and *blazingly fast*
+  (it’s written in C and takes less than 10 seconds to verify 100 rules). Installation
+  is self-contained, via a single package, and can auto-update to limit agent
+  management burden.
+* Rudder is a *true and professional open source* solution—the team behind Rudder
+  doesn't believe in the dual-speed licensing approach that makes you reinstall
+  everything and promotes open source as little more than a “demo version.”
+
+Rudder is an established project with *several 10000s of node managed*, in companies
+from small to biggest-in-their-field. Typical deployments manage 100s to 1000s of nodes.
+The biggest known deployment in 2017 is about 7000 nodes.
+
+image::dashboard-overview.png["Rudder dashboard", align="center"]
+
+== Made for production environments
+
+We believe that there is a growing impedance mismatch between the Short Time of
+application development and deployment, and the Long Time of the infrastructure.
+The latter need rationalisation, stability and conformity before catching the hyped
+techno of the day, to be able to deliver reliable technical platform, continuously
+working with a minimum of risks.
+
+Rudder was made for the Long Time, to help team deliver efficient infrastructures with
+simplicity, giving them feedback where needed, keeping them alert of possible
+incoming problem, continuously checking conformity to their rules, and all of that
+whatever the infrastructure they choose to build.
+
+image::introduction/build_run_devops.png["Modern IT production of services and Open Source automation tools stack", align="center"]
+
+To achieve these goals, Rudder goes beyond simple automation of commands or
+configurations. Rudder continuously maintains your infrastructure to keep it
+conform with your configurations and security rules.
+
+At each level (global, by configuration policy, by node, etc), you can choose to
+either *Audit* the component - and no modification at all will made on it -, or to
+*Enforce* the policy, automatically correcting a drift if needed.
+
+== Different roles for a better accessibility
+
+Rudder was thought from the start for plug&play-ability: easy to install and to
+upgrade, easy to start with and growth with.
+
+Rudder comes with a graphical interface, a standard library of configuration
+policy ready to use, and a graphical rule editor.
+
+image::introduction/web_api_cli.png["Use what best feets your need: Web interface, API, or console", align="center"]
+
+Developers can script Rudder through its APIs and security teams can check
+conformity level to their policies or inventory (both software and hardware) of a
+server at any time.
+
+
+== Universality
+
+Rudder agent is extremely fast, light, and versatile. It works on a wide variety
+of OS or hardware, from physical server to cloud instance, user laptops or even
+Digital Cities and IoT objects.
+
+image::introduction/agent_output.png["Versatile agent", align="center"]
+
+
diff --git a/src/reference/modules/ROOT/pages/00_introduction/20_key_features.adoc b/src/reference/modules/ROOT/pages/key_features.adoc
similarity index 88%
rename from src/reference/modules/ROOT/pages/00_introduction/20_key_features.adoc
rename to src/reference/modules/ROOT/pages/key_features.adoc
index ca9eee80..ec0b67b3 100644
--- a/src/reference/modules/ROOT/pages/00_introduction/20_key_features.adoc
+++ b/src/reference/modules/ROOT/pages/key_features.adoc
@@ -1,7 +1,7 @@
 [[key-features]]
-=== Key Features
+= Key Features
 
-==== OS independent target configuration state definition
+== OS independent target configuration state definition
 
 Rudder is able to adapt to complex process and only do the minimal required
 work so that the server converges to the desired state, and so whatever was the
@@ -14,7 +14,7 @@ image::continuous-configuration.png[Continuous Configuration]
 
 
 Rudder is natively integrated with the supported OS (Linux, Windows, AIX - see
-xref:10_installation/05_requirements/21_supported_architecture.adoc#node-supported-os[the list of supported Operating Systems for Nodes]) so that it provides generic, abstract, OS independant
+xref:10_installation/05_requirements/21_supported_architecture.adoc#node-supported-os[the list of supported Operating Systems for Nodes]) so that it provides generic, abstract, OS independent
 primitives to the user who can:
 
 
@@ -29,7 +29,7 @@ primitives to the user who can:
   checking, etc.
 * configure middleware by files (for example in Linux world, whatever the file
   format, and be it from a template or by only specifying enforcement of some
-  configuration parameters) or thanks to the Windows Regestry,
+  configuration parameters) or thanks to the Windows Registry,
 * manage service start-up at boot time and ensure that a service is correctly
   running at any time, starting it up again if needed.
 
@@ -39,7 +39,7 @@ The simple primitives can be simply mixed and xref:00_introduction/20_key_featur
 solutions for any and all of your unique use cases of software stacks,
 deployments, IT services or configuration that can't be natively supported.
 
-==== Centralize and aggregate real configuration states
+== Centralize and aggregate real configuration states
 
 The nominal working mode of Rudder is a **continuous verification** mode, which
 makes Rudder manage the whole application life cycle and check that configurations
@@ -61,7 +61,7 @@ where the drift comes from.
 image::introduction/rule_compliance_details.png["Fine grained reporting on configuration components", align="center"]
 
 
-==== Automatic inventory
+== Automatic inventory
 
 Rudder automatically does a technical, detailed inventory of the servers on
 which the agent is installed.
@@ -70,21 +70,21 @@ hard drives, etc), networks information (network interface and configuration),
 OS level data (OS type and name, version and patch level, etc) and software
 information (installed software with their versions).
 
-These informations are available in Rudder configuration data base and can be
-used to defined coniguration rule targets. Typically, some configurations are
+This information are available in Rudder configuration data base and can be
+used to defined configuration rule targets. Typically, some configurations are
 linked to the kind of server (physical or virtual), the quantity of RAM
 available, the version of an OS library which contains a security bug, etc.
 
 All of these data are also available xref:80_extension_and_integration/40_Rudder_Integration.adoc#rudder-api-integration[through Rudder APIs].
 
-==== REST API
+== REST API
 
 All Rudder commands are available through an exhaustive REST API. That API is
 http://www.rudder-project.org/rudder-api-doc/[fully documented online] and can
 be used to xref:80_extension_and_integration/40_Rudder_Integration.adoc#rudder-api-integration[quickly and smoothly integrate Rudder with your existing infrastructure].
 
 
-==== Audit trace and Change Requests
+== Audit trace and Change Requests
 
 Any change done thanks to Rudder in your infrastructure is automatically
 recorded in an *Audit Log* which allows a full traceability of all changes.
@@ -102,7 +102,7 @@ a CMDB, so that it can integrated into an existing company workflow. This
 integration is done thanks to xref:80_extension_and_integration/40_Rudder_Integration.adoc#rudder-integration[an existing
 plugin or a dedicated synchronisation tool].
 
-==== Centralized authentication (LDAP, Active Directory, plugins)
+== Centralized authentication (LDAP, Active Directory, plugins)
 
 Rudder can use enterprise directories (LDAP, Active Directory)
 or be connected to an SSO to manage users authentication.
@@ -111,11 +111,11 @@ Moreover, Rudder authentication layer is plugable and can be extended to other
 authentication protocol xref:80_extension_and_integration/20_Rudder_plugins.adoc#extending-rudder-with-plugins[like Radius or SPNEGO with plugins].
 
 [[intro-rudder-extensibility]]
-==== Extensibilty
+== Extensibility
 
 Rudder has a built-in library of common software components and configuration.
 But of course, your infrastructure is not limited to that handful of standard
-components and that's why Rudder was made to be extremelly simply extended so
+components and that's why Rudder was made to be extremely simply extended so
 that it can manage services, process or software specific to your company and
 your workflows.
 
@@ -123,16 +123,16 @@ To achieve that goal, Rudder provided a big set of OS independent and generic,
 unitary modules. Rudder agent is able to translate these abstract modules to
 native OS specific commands and configurations.
 
-Modules are atomic tasks, that can be extremelly simple (for example, check the
+Modules are atomic tasks, that can be extremely simple (for example, check the
 existence of a file, create an user or a group, update a software package) or
-more complexe (for example, import JSON data from a REST API).
+more complex (for example, import JSON data from a REST API).
 For information, the following image provides a NON-exhaustive list of
 available modules:
 
 image::introduction/generic_methods_list.png["Non exhaustive list of generic methods", align="center"]
 
 These generic, unitary modules can be used to build new higher level,
-OS independent, parametrizable configuration modules. By combining these module,
+OS independent, parameterizable configuration modules. By combining these module,
 you are able to manage any configuration and build advanced configuration
 policies for your IT services:
 
diff --git a/src/reference/modules/ROOT/assets/images/spreadsheet-list-nodes.png b/src/reference/modules/administration/assets/images/spreadsheet-list-nodes.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/spreadsheet-list-nodes.png
rename to src/reference/modules/administration/assets/images/spreadsheet-list-nodes.png
diff --git a/src/reference/modules/ROOT/assets/images/workflow_roles.png b/src/reference/modules/administration/assets/images/workflow_roles.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/workflow_roles.png
rename to src/reference/modules/administration/assets/images/workflow_roles.png
diff --git a/src/reference/modules/administration/nav.list b/src/reference/modules/administration/nav.list
new file mode 100644
index 00000000..0168a966
--- /dev/null
+++ b/src/reference/modules/administration/nav.list
@@ -0,0 +1,2 @@
+administration.adoc
+advanced_administration.adoc
diff --git a/src/reference/modules/ROOT/pages/_partials/glossary/cfengine-server.adoc b/src/reference/modules/administration/pages/_partials/glossary/cfengine-server.adoc
similarity index 100%
rename from src/reference/modules/ROOT/pages/_partials/glossary/cfengine-server.adoc
rename to src/reference/modules/administration/pages/_partials/glossary/cfengine-server.adoc
diff --git a/src/reference/modules/ROOT/pages/_partials/glossary/ldap-server.adoc b/src/reference/modules/administration/pages/_partials/glossary/ldap-server.adoc
similarity index 100%
rename from src/reference/modules/ROOT/pages/_partials/glossary/ldap-server.adoc
rename to src/reference/modules/administration/pages/_partials/glossary/ldap-server.adoc
diff --git a/src/reference/modules/ROOT/pages/_partials/glossary/sql-server.adoc b/src/reference/modules/administration/pages/_partials/glossary/sql-server.adoc
similarity index 100%
rename from src/reference/modules/ROOT/pages/_partials/glossary/sql-server.adoc
rename to src/reference/modules/administration/pages/_partials/glossary/sql-server.adoc
diff --git a/src/reference/modules/ROOT/pages/_partials/glossary/web-server-application.adoc b/src/reference/modules/administration/pages/_partials/glossary/web-server-application.adoc
similarity index 100%
rename from src/reference/modules/ROOT/pages/_partials/glossary/web-server-application.adoc
rename to src/reference/modules/administration/pages/_partials/glossary/web-server-application.adoc
diff --git a/src/reference/modules/ROOT/pages/_partials/glossary/web-server-front-end.adoc b/src/reference/modules/administration/pages/_partials/glossary/web-server-front-end.adoc
similarity index 100%
rename from src/reference/modules/ROOT/pages/_partials/glossary/web-server-front-end.adoc
rename to src/reference/modules/administration/pages/_partials/glossary/web-server-front-end.adoc
diff --git a/src/reference/modules/administration/pages/administration.adoc b/src/reference/modules/administration/pages/administration.adoc
new file mode 100644
index 00000000..70de62f5
--- /dev/null
+++ b/src/reference/modules/administration/pages/administration.adoc
@@ -0,0 +1,721 @@
+= Basic administration
+
+This chapter covers basic administration task of Rudder services like
+configuring some parameters of the Rudder policy server, reading the services
+log, and starting, stopping or restarting Rudder services.
+
+[[archives, Archives]]
+== Archives
+
+=== Archive usecases
+
+The archive feature of Rudder allows to:
+
+* Exchange configuration between multiple Rudder instances, in particular when
+having distinct environments;
+
+* Keep an history of major changes.
+
+==== Changes testing
+
+Export the current configuration of Rudder before you begin to make any change
+you have to test: if anything goes wrong, you can return to this archived state.
+
+==== Changes qualification
+
+Assuming you have multiple Rudder instances, each on dedicated for the
+development, qualification and production environment. You can prepare the
+changes on the development instance, export an archive, deploy this archive on
+the qualification environment, then on the production environment.
+
+
+
+.Versions of the Rudder servers
+[WARNING]
+===========
+
+If you want to export and import configurations between environments, the version
+of the source and target Rudder server must be exactly the same. If the versions
+don't match (even if only the minor versions are different), there is a risk that
+the import will break the configuration on the target Rudder server.
+
+===========
+
+
+
+=== Concepts
+
+In the 'Administration > Archives' section of the Rudder Server web interface, you
+can export and import the configuration of Rudder Groups, Directives and Rules.
+You can either archive the complete configuration, or only the subset dedicated
+to Groups, Directives or Rules.
+
+When archiving configuration, a 'git tag' is created into +/var/rudder/configuration-repository+.
+This tag is then referenced in the Rudder web interface, and available for download
+as a zip file. Please note that each change in the Rudder web interface is also
+committed in the repository.
+
+The content of this repository can be imported into any Rudder server (with the same version).
+
+=== Archiving
+
+To archive Rudder Rules, Groups, Directives, or make a global archive, you need to go to
+the 'Administration > Archives' section of the Rudder Server web interface.
+
+To perform a global archive, the steps are:
+
+. Click on 'Archive everything' - it will update the drop down list 'Choose an archive' with
+the latest data
+. In the drop down list 'Choose an archive', select the newly created archive (archives are sorted
+by date), for example 2015-01-08 16:39
+. Click on 'Download as zip' to download an archive that will contains all elements.
+
+=== Importing configuration
+
+On the target server, importing the configuration will "merge" them with the existing configuration:
+every groups, rules, directives or techniques with the same identifier will be replaced by the import,
+and all others will remain untouched.
+
+To import the archive on the target Rudder server, you can follow the following steps:
+
+. Uncompress the zip archive in /var/rudder/configuration-repository
+. If necessary, correct all files permissions: +chown -R root:rudder directives groups parameters ruleCategories rules techniques+ and +chown -R ncf-api-venv:rudder ncf/50_techniques techniques/ncf_techniques+
+. Add all files in the git repository: +git add . && git commit -am "Importing configuration"+
+. Finally, in the Web interface, go to the 'Administration > Archives' section, and select
+'Latest Git commit' in the drop down list in the Global archive section, and click on 'Restore
+everything' to restore the configuration.
+
+[TIP]
+
+====
+
+You can also perform the synchronisation from on environment to another by
+using git, through a unique git repository referenced on both environment.
+
+For instance, using one unique git repository you can follow this workflow:
+
+. On Rudder test:
+
+.. Use Rudder web interface to prepare your policy;
+
+.. Create an archive;
+
+.. +git push+ to the central repository;
+
+. On Rudder production:
+
+.. +git pull+ from the central repository;
+
+.. Use  Rudder web interface to import the qualified archive.
+
+====
+
+=== Deploy a preconfigured instance
+
+You can use the procedures of Archiving and Restoring configuration to deploy
+preconfigured instance. You would prepare first in your labs the configuration for
+Groups, Directives and Rules, create an Archive, and import the Archive on the
+new Rudder server installation
+
+
+== Event Logs
+
+Every action happening in the Rudder web interface are logged in the
+PostgreSQL database. The last 1000 event log entries are displayed in the
+*Administration > View Event Logs* section of Rudder web application. Each
+log item is described by its 'ID', 'Date', 'Actor', and  'Event' 'Type',
+'Category' and 'Description'. For the most complex events, like changes in
+nodes, groups, techniques, directives, deployments, more details can be
+displayed by clicking on the event log line.
+
+Event Categories::
+
+* User Authentication
+* Application
+* Configuration Rules
+* Policy
+* Technique
+* Policy Deployment
+* Node Group
+* Nodes
+* Rudder Agents
+* Policy Node
+* Archives
+
+
+== Policy Server
+
+The *Administration > Policy Server Management* section sum-up information about
+Rudder policy server and its parameters.
+
+=== Configure allowed networks
+
+Here you can configure the networks from which nodes are allowed to connect to
+Rudder policy server to get their updated rules.
+
+You can add as many networks as you want, the expected format is:
++networkip/mask+, for example +42.42.0.0/16+.
+
+=== Clear caches
+
+Clear cached data, like node configuration. That will trigger a full
+redeployment, with regeneration of all promises files.
+
+=== Reload dynamic groups
+
+Reload dynamic groups, so that new nodes and their inventories are taken into
+account. Normally, dynamic group are automatically reloaded unless that feature
+is explicitly disable in Rudder configuration file.
+
+
+
+
+
+[[plugins-management]]
+
+== Plugins
+
+Rudder is an extensible software. The *Administration > Plugin Management*
+section sum-up information about loaded plugins, their version and their
+configuration.
+
+A plugin is an `.rpkg` file (for "Rudder package").
+
+=== Install a plugin
+
+To install a plugin, copy the `.rpkg` file on your server, and run:
+
+----
+/opt/rudder/bin/rudder-pkg install-file 
+----
+
+You can list currently installed plugins using:
+
+----
+/opt/rudder/bin/rudder-pkg list
+----
+
+You can also enable or disable, or remove a plugin with:
+
+----
+/opt/rudder/bin/rudder-pkg plugin enable 
+/opt/rudder/bin/rudder-pkg plugin disable 
+/opt/rudder/bin/rudder-pkg remove 
+----
+
+See all available commands with:
+
+----
+/opt/rudder/bin/rudder-pkg --help
+----
+
+
+== Basic administration of Rudder services
+
+=== Restart the agent of the node
+
+To restart the Rudder Agent, use following command on a node:
+
+----
+
+service rudder-agent restart
+
+----
+
+[TIP]
+
+====
+
+This command can take more than one minute to restart the CFEngine daemon.
+This is not a bug, but an internal protection system of CFEngine.
+
+====
+
+=== Restart the root rudder service
+
+==== Restart everything
+
+You can restart all components of the Rudder Root Server at once:
+
+----
+
+service rudder-server restart
+
+----
+
+==== Restart only one component
+
+Here is the list of the components of the root server with a brief description
+of their role, and the command to restart them:
+
+include::{partialsdir}/glossary/cfengine-server.adoc[]
+
+----
+
+service rudder-agent restart
+
+----
+
+include::{partialsdir}/glossary/web-server-application.adoc[]
+
+----
+
+service rudder-jetty restart
+
+----
+
+include::{partialsdir}/glossary/web-server-front-end.adoc[]
+
+----
+
+service apache2 restart
+
+----
+
+include::{partialsdir}/glossary/ldap-server.adoc[]
+
+----
+
+service rudder-slapd restart
+
+----
+
+include::{partialsdir}/glossary/sql-server.adoc[]
+
+----
+
+service postgresql* restart
+
+----
+
+[[rest-api]]
+== REST API
+
+Rudder can be used as a web service using a REST API.
+
+This documentation covers the version 1 of Rudder's API.
+
+The version 2 has now been implemented, which is much more complete and
+has a dedicated documentation available here: http://www.rudder-project.org/rudder-api-doc/
+
+[WARNING]
+
+====
+
+The version 1 is to be considered legacy and should not be used anymore. Please migrate to
+version 2 to benefit from the new authentication features and more complete existing methods.
+
+====
+
+
+=== Default setup
+
+Access to REST API can be either using Rudder authentication, either
+unauthenticated, using authentication mechanisms set elsewhere, for instance at
+Apache level.
+
+==== Rudder Authentication
+
+By default, the access to the REST API is open to users not authenticated in
+Rudder.
+
+The method of authentication can be configured in
++/opt/rudder/etc/rudder-web.properties+
+
+----
+
+rudder.rest.allowNonAuthenticatedUser=true
+
+----
+
+==== Apache access rules
+
+By default, the REST API is exposed for localhost only, at +http://localhost/rudder/api+.
+
+.Example usage of non authenticated REST API
+
+====
+
+Unrestricted access can be granted to local scripts accessing to +localhost+,
+whereas remote access to the REST API will be either denied, or restricted
+through authentication at apache level.
+
+====
+
+==== User for REST actions
+
+Actions done using the REST API are logged by default as run by the user
++UnknownRestUser+.
+
+To change the name of this user, add following header to the HTTP request:
+
+----
+
+X-REST-USERNAME: MyConfiguredRestUser
+
+----
+
+If the REST API is authenticated, the authenticated user name will be used in the
+logs.
+
+=== Status
+
++http://localhost/rudder/api/status+::
+
+Check if Rudder server is up and return +OK+.
+If Rudder server is not responding, an error is displayed.
+
+=== Promises regeneration
+
++http://localhost/rudder/api/deploy/reload+::
+
+Regenerate promises (same action as the +Regenerate now+ button).
+
+=== Dynamic groups regeneration
+
++http://localhost/rudder/api/dyngroup/reload+::
+
+Check all dynamic groups for changes. If changes have occurred, regenerate the
+groups in the LDAP and the CFEngine promises.
+
+=== Technique library reload
+
++http://localhost/rudder/api/techniqueLibrary/reload+::
+
+Check the technique library for changes. If changes have occurred, reload the
+technique library in memory and regenerate the CFEngine promises.
+
+=== Archives manipulation
+
+Various methods are available to import and export items:
+
+==== Archiving:
+
++http://localhost/rudder/api/archives/archive/groups+::
+
+Export node groups and node groups categories.
+
++http://localhost/rudder/api/archives/archive/directives+::
+
+Export policy library (categories, active techniques, directives).
+
++http://localhost/rudder/api/archives/archive/rules+::
+
+Export rules
+
++http://localhost/rudder/api/archives/archive/full+::
+
+Export everything
+
+==== Listing:
+
+
++http://localhost/rudder/api/archives/list/groups+::
+
+List available archives datetime for groups (the datetime is in the format
+awaited for restoration).
+
++http://localhost/rudder/api/archives/list/directives+::
+
+List available archives datetime for policy library (the datetime is in the
+format awaited for restoration).
+
++http://localhost/rudder/api/archives/list/rules+::
+
+List available archives datetime for configuration rules (the datetime is in the
+format awaited for restoration).
+
++http://localhost/rudder/api/archives/list/full+::
+
+List available archives datetime for full archives (the datetime is in the
+format awaited for restoration).
+
+==== Restoring a given archive:
+
+`http://localhost/rudder/api/archives/restore/groups/datetime/[archiveId]`::
+
+Restore given groups archive.
+
+`http://localhost/rudder/api/archives/restore/directives/datetime/[archiveId]`::
+
+Restore given directives archive.
+
+`http://localhost/rudder/api/archives/restore/rules/datetime/[archiveId]`::
+
+Restore given rules archive.
+
+`http://localhost/rudder/api/archives/restore/full/datetime/[archiveId]`::
+
+Restore everything.
+
+==== Restoring the latest available archive (from a previously archived action, and so from a Git tag):
+
+----
+
+http://localhost/rudder/api/archives/restore/groups/latestArchive
+http://localhost/rudder/api/archives/restore/directives/latestArchive
+http://localhost/rudder/api/archives/restore/rules/latestArchive
+http://localhost/rudder/api/archives/restore/full/latestArchive
+
+----
+
+==== Restoring the latest available commit (use Git HEAD):
+
+----
+
+http://localhost/rudder/api/archives/restore/groups/latestCommit
+http://localhost/rudder/api/archives/restore/directives/latestCommit
+http://localhost/rudder/api/archives/restore/rules/latestCommit
+http://localhost/rudder/api/archives/restore/full/latestCommit
+
+----
+
+==== Downloading a ZIP archive
+
+The REST API allows to download a ZIP archive of groups, directives and
+rules (as XML files) for a given Git commit ID (the commit HASH).
+
+It is not designed to query for available Git commit ID, so you will need to get
+it directly from a Git tool (for example with Git log) or from the list API.
+
+Note that that API allows to download ANY Git commit ID as a ZIP archive,
+not only the one corresponding to Rudder archives.
+
+Note 2: you should rename the resulting file with a ".zip" extension as
+most zip utilities won't work correctly on a file not having it.
+
+`http://localhost/rudder/api/archives/zip/groups/[GitCommitId]`::
+
+Download groups for the given Commit ID as a ZIP archive.
+
+`http://localhost/rudder/api/archives/zip/directives/[GitCommitId]`::
+
+Download directives for the given Commit ID as a ZIP archive.
+
+`http://localhost/rudder/api/archives/zip/rules/[archiveId]`::
+
+Download rules for the given Commit ID as a ZIP archive.
+
+`http://localhost/rudder/api/archives/zip/all/[archiveId]`::
+
+Download groups, directives and rules for the given Commit ID as a ZIP archive.
+
+
+
+[[user-management]]
+
+== User management
+
+Change the users authorized to connect to the application.
+You can define authorization level for each user
+
+=== Configuration of the users using a XML file
+
+==== Generality
+
+The credentials of a user are defined in the XML file
++/opt/rudder/etc/rudder-users.xml+. This file expects the following format:
+
+----
+
+
+  
+  
+  
+
+
+----
+
+The name and password attributes are mandatory (non empty) for the user tags.
+The role attribute can be omitted but the user will have no permission, and
+only valid attributes are recognized.
+
+Every modification of this file should be followed by a restart of the Rudder
+web application to be taken into account:
+
+----
+
+service rudder-jetty restart
+
+----
+
+[[_passwords]]
+==== Passwords
+
+The authentication tag should have a "hash" attribute, making "password" attributes
+on every user expect hashed passwords. Not specifying a hash attribute will fallback
+to plain text passwords, but it is strongly advised not to do so for security reasons.
+
+The algorithm to be used to create the hash (and verify it during authentication)
+depend on the value of the hash attribute. The possible values, the
+corresponding algorithm and the Linux shell command need to obtain the hash of
+the "secret" password for this algorithm are listed here:
+
+.Hashed passwords algorithms list
+
+[options="header"]
+
+|====
+|Value                 | Algorithm | Linux command to hash the password
+|"md5"                 | MD5       | +read mypass; echo -n $mypass \| md5sum+
+|"sha" or "sha1"       | SHA1      | +read mypass; echo -n $mypass \| shasum+
+|"sha256" or "sha-256" | SHA256    | +read mypass; echo -n $mypass \| sha256sum+
+|"sha512" or "sha-512" | SHA512    | +read mypass; echo -n $mypass \| sha512sum+
+|====
+
+When using the suggested commands to hash a password, you must enter the
+command, then type your password, and hit return. The hash will then be
+displayed in your terminal. This avoids storing the password in your shell
+history.
+
+Here is an example of authentication file with hashed password:
+
+----
+
+
+
+  
+  
+
+
+
+----
+
+[[ldap-auth-provider, LDAP authentication provider for Rudder]]
+=== Configuring an LDAP authentication provider for Rudder
+
+If you are operating on a corporate network or want to have your users in a
+centralized database, you can enable LDAP authentication for Rudder users.
+
+==== LDAP is only for authentication
+
+Take care of the following limitation of the current process: only *authentication*
+is delegated to LDAP, NOT *authorizations*. So you still have to
+declare user's authorizations in the Rudder user file (rudder-users.xml).
+
+A user whose authentication is accepted by LDAP but not declared in the
+rudder-users.xml file is considered to have no rights at all (and so will
+only see a reduced version of Rudder homepage, with no action nor tabs available).
+
+
+The credentials of a user are defined in the XML file
++/opt/rudder/etc/rudder-users.xml+. It expects the same format as regular file-based
+user login, but in this case "name" will be the login used to connect to LDAP and the
+'password' field will be ignored and should be set to "LDAP" to make it clear that
+this Rudder installation uses LDAP to log users in.
+
+
+Every modification of this file should be followed by a restart of the Rudder
+web application to be taken into account:
+
+----
+
+service rudder-jetty restart
+
+----
+
+==== Enable LDAP authentication
+
+LDAP authentication is enabled by setting the property +rudder.auth.ldap.enable+ to +true+
+in file +/opt/rudder/etc/rudder-web.properties+
+
+The LDAP authentication process is a bind/search/rebind in which an application
+connection (bind) is used to search (search) for a user entry given some base and
+filter parameters, and then, a bind (rebind) is tried on that entry with the
+credential provided by the user.
+
+
+So next, you have to set-up the connection parameters to the LDAP directory to use.
+There are five properties to change:
+
+- rudder.auth.ldap.connection.url
+- rudder.auth.ldap.connection.bind.dn
+- rudder.auth.ldap.connection.bind.password
+- rudder.auth.ldap.searchbase
+- rudder.auth.ldap.filter
+
+The search base and filter are used to find the user. The search base may be left empty, and
+in the filter, {0} will be replaced by the value provided as user login.
+
+Here are some usage examples,
+
+on standard LDAP:
+
+----
+
+rudder.auth.ldap.searchbase=ou=People
+rudder.auth.ldap.filter=(&(uid={0})(objectclass=person))
+
+----
+
+on Active Directory:
+
+----
+
+rudder.auth.ldap.searchbase=
+rudder.auth.ldap.filter=(&(sAMAccountName={0})(objectclass=user))
+
+----
+
+
+=== Authorization management
+
+For every user you can define an access level, allowing it to access different
+pages or to perform different actions depending on its level.
+
+You can also build custom roles with whatever permission you want, using a type
+and a level as specified below.
+
+image::workflow_roles.png[Roles can match different types of users]
+
+In the xml file, the role attribute is a list of permissions/roles, separated by
+a comma. Each one adds permissions to the user. If one is wrong, or not correctly
+spelled, the user is set to the lowest rights (NoRights), having access only to the
+dashboard and nothing else.
+
+==== Pre-defined roles
+
+|====
+|Name                | Access level
+|administrator | All authorizations granted, can access and modify everything
+|administration_only | Only access to administration part of rudder, can do everything within it.
+|user | Can access and modify everything but the administration part
+|configuration | Can only access and act on configuration section
+|read_only | Can access to every read only part, can perform no action
+|inventory | Access to information about nodes, can see their inventory, but can't act on them
+|rule_only | Access to information about rules, but can't modify them
+|====
+
+For each user you can define more than one role, each role adding its authorization to the user.
+
+Example: "rule_only,administration_only" will only give access to the "Administration" tab as well as the
+Rules.
+
+==== Custom roles
+
+You can set a custom set of permissions instead of a pre-defined role.
+
+A permission is composed of a type and a level:
+
+* Type:  Indicates what kind of data will be displayed and/or can be set/updated by the user
+** "configuration", "rule", "directive", "technique", "node", "group", "administration", "deployment".
+* Level: Access level to be granted on the related type
+** "read", "write", "edit", "all" (Can read, write, and edit)
+
+Depending on that value(s) you give, the user will have access to different pages and action in Rudder.
+
+Usage example:
+
+* configuration_read    -> Will give read access to the configuration (Rule management, Directives and Parameters)
+* rule_write, node_read -> Will give read and write access to the Rules and read access to the Nodes
+
+=== Going further
+
+Rudder aims at integrating with your IT system transparently, so it can't force
+its own authentication system.
+
+To meet this need, Rudder relies on the modular authentication system Spring
+Security that allows to easily integrate with databases or an
+enterprise SSO like CAS, OpenID or SPNEGO. The documentation for this
+integration is not yet available, but don't hesitate to reach us on this topic.
+
diff --git a/src/reference/modules/administration/pages/advanced_administration.adoc b/src/reference/modules/administration/pages/advanced_administration.adoc
new file mode 100644
index 00000000..d5bc6678
--- /dev/null
+++ b/src/reference/modules/administration/pages/advanced_administration.adoc
@@ -0,0 +1,1144 @@
+= Advanced administration
+
+[[_database_maintenance]]
+=== Database maintenance
+
+Rudder uses two backends to store information as of now: LDAP and SQL
+
+To achieve this, OpenLDAP and PostgreSQL are installed with Rudder.
+
+However, like every database, they require a small amount of maintenance
+to keep operating well. Thus, this chapter will introduce you to the basic
+maintenance procedure you might want to know about these particular database
+implementations.
+
+==== Automatic PostgreSQL table maintenance
+
+Rudder uses an automatic mechanism to automate the archival and pruning of the reports
+database.
+
+By default, this system will:
+
+* Archive reports older that 3 days (30 in Rudder 2.6)
+* Remove reports older than 90 days
+
+It thus reduces the work overhead by only making Rudder handle relevant reports (fresh enough)
+and putting aside old ones.
+
+This is obviously configurable in /opt/rudder/etc/rudder-web.properties, by altering the following
+configuration elements:
+
+* rudder.batch.reportscleaner.archive.TTL: Set the maximum report age before archival
+* rudder.batch.reportscleaner.delete.TTL: Set the maximum report age before deletion
+
+The default values are OK for systems under moderate load, and should be adjusted in case of
+excessive database bloating.
+
+The estimated disk space consumption, with a 5 minute agent run frequency, is 150 to 400 kB per Directive,
+per day and per node, which is roughly 5 to 10 MB per Directive per month and per node.
+
+Thus, 25 directives on 100 nodes, with a 7 day log retention policy, would take 2.5 to 10 GB, and
+25 directives on 1000 nodes with a 1 hour agent execution period and a 30 day log retention policy
+would take 9 to 35 GB.
+
+==== PostgreSQL database vacuum
+
+In some cases, like a large report archiving or deletion, the Rudder interface
+will still display the old database size. This is because even if the database has been
+cleaned as requested, the physical storage backend did not reclaim space on the hard drive,
+resulting in a "fragmented" database. This is not an issue, as PostgreSQL handles this automatically,
+and new reports sent by the nodes to Rudder will fill the blanks in the database, resulting in a
+steady growth of the database. This task is handled by the autovacuum process, which periodically
+cleans the storage regularly to prevent database bloating.
+
+However, to force this operation to free storage immediately, you can trigger a "vacuum full" operation
+by yourself, however keep in mind that this operation is very disk and memory intensive,
+and will lock both the Rudder interface and the reporting system for quite a long time with a big database.
+
+[source,python]
+
+.Manual vacuuming using the psql binary
+
+----
+
+# You can either use sudo to change owner to the postgres user, or use the rudder connection credentials.
+
+# With sudo:
+sudo -u postgres psql -d rudder
+
+# With rudder credentials, it will ask the password in this case:
+psql -u rudder -d rudder -W
+
+# And then, when you are connected to the rudder database in the psql shell, trigger a vacuum:
+rudder=# VACUUM FULL;
+
+# And take a coffee.
+
+----
+
+==== LDAP database reindexing
+
+In some very rare case, you will encounter some LDAP database entries that are not indexed and used
+during searches. In that case, OpenLDAP will output warnings to notify you that they should be.
+
+[source,python]
+
+.LDAP database reindexing
+
+----
+
+# Stop OpenLDAP
+service rudder-slapd stop
+
+# Reindex the databases
+service rudder-slapd reindex
+
+# Restart OpenLDAP
+service rudder-slapd restart
+
+----
+
+[[_migration_backups_and_restores]]
+=== Migration, backups and restores
+
+It is advised to backup frequently your Rudder installation in case
+of a major outage.
+
+These procedures will explain how to backup your Rudder installation.
+
+==== Backup
+
+This backup procedure will operate on the three principal Rudder data sources:
+
+* The LDAP database
+* The PostgreSQL database
+* The configuration-repository folder
+
+It will also backup the application logs.
+
+[source,python]
+
+.How to backup a Rudder installation
+
+----
+
+# First, backup the LDAP database:
+/opt/rudder/sbin/slapcat -l /tmp/rudder-backup-$(date +%Y%m%d).ldif
+
+# Second, the PostgreSQL database:
+sudo -u postgres pg_dump rudder > /tmp/rudder-backup-$(date +%Y%m%d).sql
+
+# Or without sudo, use the rudder application password:
+pg_dump -U rudder rudder > /tmp/rudder-backup-$(date +%Y%m%d).sql
+
+# Third, backup the configuration repository:
+tar -C /var/rudder -zvcf /tmp/rudder-backup-$(date +%Y%m%d).tar.gz configuration-repository/ cfengine-community/ppkeys/
+
+# Finally, backup the logs:
+tar -C /var/log -zvcf /tmp/rudder-log-backup-$(date +%Y%m%d).tar.gz rudder/
+
+# And put the backups wherever you want, here /root:
+cp /tmp/rudder-backup* /root
+cp /tmp/rudder-log-backup* /root
+
+----
+
+==== Restore
+
+Of course, after a total machine crash, you will have your backups at hand,
+but what should you do with it ?
+
+Here is the restoration procedure:
+
+[source,python]
+
+.How to restore a Rudder backup
+
+----
+
+# First, follow the standard installation procedure, this one assumes you have a working "blank"
+# Rudder on the machine
+
+# Disable Rudder agent
+rudder agent disable
+
+# Stop Rudder services
+service rudder stop
+
+# Drop the OpenLDAP database
+rm -rf /var/rudder/ldap/openldap-data/*.mdb
+
+# Import your backups
+
+# Configuration repository
+tar -C /var/rudder -zvxf /root/rudder-backup-XXXXXXXX.tar.gz
+
+# LDAP backup
+/opt/rudder/sbin/slapadd -l /root/rudder-backup-XXXXXXXX.ldif
+
+# Start PostgreSQL
+service postgresql start
+
+# PostgreSQL backup
+sudo -u postgres psql -d rudder < /root/rudder-backup-XXXXXXXX.sql
+# or
+psql -u rudder -d rudder -W < /root/rudder-backup-XXXXXXXX.sql
+
+# Enable Rudder agent
+rudder agent enable
+
+# And restart the machine or just Rudder:
+service rudder restart
+
+----
+
+==== Migration
+
+To migrate a Rudder installation, just backup and restore your Rudder installation
+from one machine to another.
+
+If your server address changed, you will also have to do the following on
+every node that is directly connected to it (managed nodes or relays):
+
+* Remove the server public key +rm /var/rudder/cfengine-community/ppkeys/root-MD5=*.pub+
+* Modify +/var/rudder/cfengine-community/policy_server.dat+ with the new address, then you can force your nodes to send their inventory by running +rudder agent inventory+
+
+
+[[_performance_tuning]]
+=== Performance tuning
+
+Rudder and some applications used by Rudder (like the Apache web server, or Jetty)
+can be tuned to your needs.
+
+[[_reports_retention]]
+==== Reports retention
+
+To lower Rudder server's disk usage, you can configure the retention duration
+for node's execution reports in
++/opt/rudder/etc/rudder-web.properties+ file with the options:
+
++rudder.batch.reportscleaner.archive.TTL=30+
+
++rudder.batch.reportscleaner.delete.TTL=90+
+
+==== Apache web server
+
+The Apache web server is used by Rudder as a proxy, to connect to the Jetty
+application server, and to receive inventories using the WebDAV protocol.
+
+There are tons of documentation about Apache performance tuning available on the
+Internet, but the defaults should be enough for most setups.
+
+==== Jetty
+
+The Jetty application server is the service that runs Rudder web application and inventory
+endpoint. It uses the Java runtime environment (JRE).
+
+The default settings fit the basic recommendations for minimal Rudder hardware requirements,
+but there are some configuration switches that you might need to tune to obtain better
+performance with Rudder, or correct e.g. timezone issues.
+
+To look at the available optimization knobs, please take a look at +/etc/default/rudder-jetty+
+on your Rudder server.
+
+==== Java "Out Of Memory Error"
+
+It may happen that you get java.lang.OutOfMemoryError.
+They can be of several types,
+but the most common is: "java.lang.OutOfMemoryError: Java heap space".
+
+This error means that the web application needs more RAM than what was given.
+It may be linked to a bug where some process consumed much more memory than
+needed, but most of the time, it simply means that your system has grown and needs
+more memory.
+
+You can follow the configuration steps described in the following paragraph.
+
+[[_configure_ram_allocated_to_jetty]]
+==== Configure RAM allocated to Jetty
+
+To change the RAM given to Jetty, you have to:
+
+----
+
+# edit +/etc/default/rudder-jetty+ with your preferred text editor, for example vim:
+vim /etc/default/rudder-jetty
+
+Notice: that file is alike to +/opt/rudder/etc/rudder-jetty.conf+, which is the file with
+default values. +/opt/rudder/etc/rudder-jetty.conf+ should never be modified directly because
+modification would be erased by packaging in the following Rudder versuib update.
+
+# modify JAVA_XMX to set the value to your need.
+# The value is given in MB by default, but you can also use the "G" unit to specify a size in GB.
+
+JAVA_XMX=2G
+
+# save your changes, and restart Jetty:
+service restart rudder-jetty
+
+----
+
+The amount of memory should be the half of the RAM of the server, rounded up to the nearest GB.
+For example, if the server has 5GB of RAM, 3GB should be allocated to Jetty.
+
+[[_optimize_postgresql_server]]
+==== Optimize PostgreSQL server
+
+The default out-of-the-box configuration of PostgreSQL server is really not
+compliant for high end (or normal) servers. It uses a really small amount of
+memory.
+
+The location of the PostgreSQL server configuration file is usually:
+
+----
+
+/etc/postgresql/9.x/main/postgresql.conf
+
+----
+
+On a SuSE system:
+
+----
+
+/var/lib/pgsql/data/postgresql.conf
+
+----
+
+
+
+===== Suggested values on an high end server
+
+----
+#
+# Amount of System V shared memory
+# --------------------------------
+#
+# A reasonable starting value for shared_buffers is 1/4 of the memory in your
+# system:
+
+shared_buffers = 1GB
+
+# You may need to set the proper amount of shared memory on the system.
+#
+#   $ sysctl -w kernel.shmmax=1073741824
+#
+# Reference:
+# http://www.postgresql.org/docs/8.4/interactive/kernel-resources.html#SYSVIPC
+#
+# Memory for complex operations
+# -----------------------------
+#
+# Complex query:
+
+work_mem = 24MB
+max_stack_depth = 4MB
+
+# Complex maintenance: index, vacuum:
+
+maintenance_work_mem = 240MB
+
+# Write ahead log
+# ---------------
+#
+# Size of the write ahead log:
+
+wal_buffers = 4MB
+
+# Query planner
+# -------------
+#
+# Gives hint to the query planner about the size of disk cache.
+#
+# Setting effective_cache_size to 1/2 of total memory would be a normal
+# conservative setting:
+
+effective_cache_size = 1024MB
+
+----
+
+===== Suggested values on a low end server
+
+----
+
+shared_buffers = 128MB
+work_mem = 8MB
+max_stack_depth = 3MB
+maintenance_work_mem = 64MB
+wal_buffers = 1MB
+effective_cache_size = 128MB
+
+----
+
+
+==== CFEngine
+
+If you are using Rudder on a highly stressed machine, which has especially slow or busy
+I/O's, you might experience a sluggish CFEngine agent run everytime the machine
+tries to comply with your Rules.
+
+This is because the CFEngine agent tries to update its internal databases everytime the agent
+executes a promise (the .lmdb files in the /var/rudder/cfengine-community/state directory),
+which even if the database is very light, takes some time if the machine has a very high iowait.
+
+In this case, here is a workaround you can use to restore CFEngine's full speed: you can use
+a RAMdisk to store CFEngine states.
+
+You might use this solution either temporarily, to examine a slowness problem, or permanently, to mitigate a
+known I/O problem on a specific machine. We do not recommend as of now to use this on a whole IT infrastructure.
+
+Be warned, this solution has a drawback: you should backup and restore the content of this directory
+manually in case of a machine reboot because all the persistent states are stored here, so in case you are using,
+for example the jobScheduler Technique, you might encounter an unwanted job execution because CFEngine will have
+"forgotten" the job state.
+
+Also, note that the mode=0700 is important as CFEngine will refuse to run correctly if the state directory is
+world readable, with an error like:
+
+----
+error: UNTRUSTED: State directory /var/rudder/cfengine-community (mode 770) was not private!
+----
+
+Here is the command line to use:
+
+[source,python]
+
+.How to mount a RAMdisk on CFEngine state directory
+
+----
+
+# How to mount the RAMdisk manually, for a "one shot" test:
+mount -t tmpfs -o size=128M,nr_inodes=2k,mode=0700,noexec,nosuid,noatime,nodiratime tmpfs /var/rudder/cfengine-community/state
+
+# How to put this entry in the fstab, to make the modification permanent
+echo "tmpfs /var/rudder/cfengine-community/state tmpfs defaults,size=128M,nr_inodes=2k,mode=0700,noexec,nosuid,noatime,nodiratime 0 0" >> /etc/fstab
+mount /var/rudder/cfengine-community/state
+
+----
+
+[[_rsyslog]]
+==== Rsyslog
+
+If you are using syslog over TCP as reporting protocol (it is set in *Administration* -> *Settings* -> *Protocol*),
+you can experience issues with rsyslog on Rudder
+policy servers (root or relay) when managing a large number of nodes.
+This happens because using TCP implies the system has to keep track of
+the connections. It can lead to reach some limits, especially:
+
+* max number of open files for the user running rsyslog
+* size of network backlogs
+* size of the conntrack table
+
+You have two options in this situation:
+
+* Switch to UDP (in *Administration* -> *Settings* -> *Protocol*). It is less reliable
+  than TCP and you can lose reports in case of networking or load issues, but it will
+  prevent breaking your server, and allow to manage more Nodes.
+* Stay on TCP. Do this only if you need to be sure you will get all your reports
+  to the server. You will should follow the instructions below to tune your system
+  to handle more connections.
+
+All settings needing to modify '/etc/sysctl.conf' require to run 'sysctl -p'
+to be applied.
+
+===== Maximum number of TCP sessions in rsyslog
+
+You may need to increase the maximum number of TCP sessions that rsyslog will accept.
+Add to your '/etc/rsyslog.conf':
+
+----
+$ModLoad imtcp
+# 500 for example, depends on the number of nodes and the agent run frequency
+$InputTCPMaxSessions 500
+----
+
+Note: You can use 'MaxSessions' instead of 'InputTCPMaxSessions' on rsyslog >= 7.
+
+===== Maximum number of file descriptors
+
+If you plan to manage hundreds of Nodes behind a relay or a root server, you should increase
+the open file limit (10k is a good starting point, you might have to get to 100k with
+thousands of Nodes).
+
+You can change the system-wide maximum number of file descriptors in '/etc/sysctl.conf' if necessary:
+
+----
+fs.file-max = 100000
+----
+
+Then you have to get the user running rsyslog enough file descriptors. To do so,
+you have to:
+
+* Have a high enough hard limit for rsyslog
+* Set the limit used by rsyslog
+
+The first one can be set in '/etc/security/limits.conf':
+
+----
+username hard nofile 8192
+----
+
+For the second one, you have two options:
+
+* Set the soft limit (which will be used by default) in '/etc/security/limits.conf' (with 'username soft nofile 8192')
+* If you want to avoid changing soft limit (particularly if rsyslog is running as root), you
+  can configure rsyslog to change its limit to a higher value (but not higher than the hard limit)
+  with the '$MaxOpenFiles' configuration directive in '/etc/rsyslog.conf'
+
+You have to restart rsyslog for these settings to take effect.
+
+You can check current soft and hard limits by running the following commands as the user you want to check:
+
+----
+ulimit -Sn
+ulimit -Hn
+----
+
+===== Network backlog
+
+You can also have issues with the network queues (which may for example lead to sending SYN cookies):
+
+* You can increase the maximum number of connection requests awaiting acknowledgment by changing
+  'net.ipv4.tcp_max_syn_backlog = 4096' (for example, the default is 1024) in '/etc/sysctl.conf'.
+* You may also have to increase the socket listen() backlog in case of bursts, by changing
+  'net.core.somaxconn = 1024' (for example, default is 128) in '/etc/sysctl.conf'.
+
+===== Conntrack table
+
+You may reach the size of the conntrack table, especially if you have other applications
+running on the same server. You can increase its size in '/etc/sysctl.conf',
+see http://www.netfilter.org/documentation/FAQ/netfilter-faq.html#toc3.7[the Netfilter FAQ]
+for details.
+
+
+[[password-management]]
+
+=== Password management
+
+You might want to change the default passwords used in Rudder's managed daemons
+for evident security reasons.
+
+==== Configuration of the postgres database password
+
+You will have to adjust the postgres database and the rudder-web.properties file.
+
+Here is a semi-automated procedure:
+
+* Generate a decently fair password. You can use an arbitrary one too.
+
+----
+
+PASS=`dd if=/dev/urandom count=128 bs=1 2>&1 | md5sum | cut -b-12`
+
+----
+
+* Update the Postgres database user
+
+----
+
+su - postgres -c "psql -q -c \"ALTER USER blah WITH PASSWORD '$PASS'\""
+
+----
+
+* Insert the password in the rudder-web.properties file
+
+----
+
+sed -i "s%^rudder.jdbc.password.*$%rudder.jdbc.password=$PASS%" /opt/rudder/etc/rudder-web.properties
+
+----
+
+==== Configuration of the OpenLDAP manager password
+
+You will have to adjust the OpenLDAP and the rudder-web.properties file.
+
+Here is a semi-automated procedure:
+
+* Generate a decently fair password. You can use an arbitrary one too.
+
+----
+
+PASS=`dd if=/dev/urandom count=128 bs=1 2>&1 | md5sum | cut -b-12`
+
+----
+
+* Update the password in the slapd configuration
+
+----
+
+HASHPASS=`/opt/rudder/sbin/slappasswd -s $PASS`
+sed -i "s%^rootpw.*$%rootpw          $HASHPASS%" /opt/rudder/etc/openldap/slapd.conf
+
+----
+
+* Update the password in the rudder-web.properties file
+
+----
+
+sed -i "s%^ldap.authpw.*$%ldap.authpw=$PASS%" /opt/rudder/etc/rudder-web.properties
+
+----
+
+==== Configuration of the WebDAV access password
+
+This time, the procedure is a bit more tricky, as you will have to update
+the Technique library as well as a configuration file.
+
+Here is a semi-automated procedure:
+
+* Generate a decently fair password. You can use an arbitrary one too.
+
+----
+
+PASS=`dd if=/dev/urandom count=128 bs=1 2>&1 | md5sum | cut -b-12`
+
+----
+
+* Update the password in the apache htaccess file
+
+[TIP]
+
+====
+
+On some systems, especially SuSE ones, htpasswd is called as "htpasswd2"
+
+====
+
+----
+
+htpasswd -b /opt/rudder/etc/htpasswd-webdav rudder $PASS
+
+----
+
+* Update the password in Rudder's system Techniques
+
+----
+
+cd /var/rudder/configuration-repository/techniques/system/common/1.0/
+sed -i "s%^.*davpw.*$%   \"davpw\" string => \"$PASS\"\;%" site.st
+git commit -m "Updated the rudder WebDAV access password" site.st
+
+----
+
+* Update the Rudder Directives by either reloading them in the web interface (in the "Configuration Management/Techniques" tab) or restarting jetty (NOT recommended)
+
+=== Password upgrade
+
+This version of Rudder uses a central file to manage the passwords that will
+be used by the application: /opt/rudder/etc/rudder-passwords.conf
+
+When first installing Rudder, this file is initialized with default values,
+and when you run rudder-init, it will be updated with randomly generated
+passwords.
+
+On the majority of cases, this is fine, however you might want to adjust the
+passwords manually. This is possible, just be cautious when editing the file,
+as if you corrupt it Rudder will not be able to operate correctly anymore and
+will spit numerous errors in the program logs.
+
+As of now, this file follows a simple syntax: ELEMENT:password
+
+You are able to configure three passwords in it: The OpenLDAP one, the
+PostgreSQL one and the authenticated WebDAV one.
+
+If you edit this file, Rudder will take care of applying the new passwords
+everywhere it is needed, however it will restart the application automatically
+when finished, so take care of notifying users of potential downtime before
+editing passwords.
+
+Here is a sample command to regenerate the WebDAV password with a random
+password, that is portable on all supported systems. Just change the
+"RUDDER_WEBDAV_PASSWORD" to any password file statement corresponding to
+the password you want to change.
+
+----
+
+sed -i s/RUDDER_WEBDAV_PASSWORD.*/RUDDER_WEBDAV_PASSWORD:$(dd if=/dev/urandom count=128 bs=1 2>&1 | md5sum | cut -b-12)/ /opt/rudder/etc/rudder-passwords.conf
+
+----
+
+
+=== Use a database on a separate server
+
+This section allows installing a separate database only without splitting the rest of the server components
+like when using the rudder-multiserver-setup script.
+The setup is done in two places: on the database server and on the Rudder root server.
+
+It also allows moving an existing database to another server.
+
+[TIP]
+
+.Use different user and database names
+
+====
+
+It can be useful, for example if you want to share you database server between several Rudder root servers (see note below),
+to use a different database for your Rudder root server. To do so:
+
+* Create the new database (replace `alternate_user_name`, `alternate_base_name` and specify a password):
+
+----
+
+su - postgres -c "psql -q -c \"CREATE USER alternate_user_name WITH PASSWORD 'GENERATE_A_PASSWORD'\""
+su - postgres -c "psql -q -c \"CREATE DATABASE alternate_base_name WITH OWNER = alternate_user_name\""
+
+----
+
+* Initialize it. First copy the initialization script:
+
+----
+
+ cp /opt/rudder/etc/postgresql/reportsSchema.sql /opt/rudder/etc/postgresql/reportsSchema-alternate.sql
+
+----
+
+* In the copied file, change the:
+
+----
+
+ALTER database rudder SET standard_conforming_strings=true;
+
+----
+
+To:
+
+----
+
+ALTER database alternate_base_name SET standard_conforming_strings=true;
+
+----
+
+* Then apply the script:
+
+----
+
+su - postgres -c "psql -q -U alternate_user_name -h localhost -d alternate_base_name \
+     -f /opt/rudder/etc/postgresql/reportsSchema-alternate.sql"
+
+----
+
+* Follow the standard instructions of this section, with two differences:
+
+** You need to adjust the line added to `pg_hba.conf` to match your user and database name.
+
+** You need to also change the database name and user in `rudder-web.properties`.
+
+====
+
+[CAUTION]
+
+.Use the same database server for several Rudder root servers
+
+====
+
+It is possible to share the same database server between several Rudder instances,
+by following the preceding tip to use a different database than the default one.
+However, there are some important points to know:
+
+* This database server can only be used with the rudder-db role in case of multiserver setup.
+
+* This database server can only be a node for one of the Rudder servers. This also means that this
+root server will have indirect access to the content of the other databases.
+
+====
+
+
+
+==== On the database server
+
+* Install and configure the agent on the node, and install the *rudder-reports* package.
+
+* Change the `postgresql.conf` file (usually in `/var/lib/pgsql` or `/etc/postgresql`), to listen on the right interface to communicate with the server:
+
+----
+
+# you can use '*' to listen on all interfaces
+listen_addresses = 'IP_TO_USE'
+
+----
+
+* Also ensure that network policies (i.e. the firewall settings) allow PostgreSQL flows from the root server to the database server.
+
+* Add an authorization line for the server (in `pg_hba.conf`, in the same directory):
+
+----
+
+host    rudder          rudder          ROOT_SERVER_IP/32       md5
+
+----
+
+* Restart postgresql to apply the new settings:
+
+----
+
+service postgresql restart
+
+----
+
+* Execute the following command to configure the password (that should be the same as RUDDER_PSQL_PASSWORD in `/opt/rudder/etc/rudder-passwords.conf` on the root server):
+
+----
+
+su - postgres -c "psql -c \"ALTER USER rudder WITH PASSWORD 'RUDDER_SERVER_DATABASE_PASSWORD'\""
+
+----
+
+* Run an inventory to the server:
+
+----
+
+rudder agent inventory
+
+----
+
+==== On the root server
+
+In the following section, DATABASE_HOST refers to the hostname of the new database server, and SERVER_HOST to the hostname of
+the root server.
+
+* Remove the rudder-server-root and rudder-reports packages if installed. For example, you can run on Debian:
+
+----
+
+service rudder restart
+apt-mark manual rudder-webapp rudder-inventory-endpoint
+apt-get remove --purge rudder-reports
+
+----
+
+* You can also remove the postgresql package and database from the server if installed, but keep in mind you will lose all existing data.
+You can follow the xref:43_advanced_administration/15_migration_backup_restore.adoc#_migration_backups_and_restores[backup and restore] procedure to migrate the data to the new database.
+
+* Change the hostname in `/opt/rudder/etc/rudder-web.properties`:
+
+----
+
+rudder.jdbc.url=jdbc:postgresql://DATABASE_HOST:5432/rudder
+
+----
+
+* Edit `/var/rudder/cfengine-community/inputs/rudder-server-roles.conf` and set the following line:
+
+----
+
+rudder-db:DATABASE_HOST
+
+----
+
+* Edit the /etc/rsyslog.d/rudder.conf file and change the hostname in:
+
+----
+
+:ompgsql:DATABASE_HOST,rudder,rudder,...
+
+----
+
+* Run an inventory:
+
+----
+
+rudder agent inventory
+
+----
+
+* Restart rudder services:
+
+----
+
+service rsyslog restart
+service rudder restart
+
+----
+
+* Clear the cache (in Administration -> Settings)
+
+You should now have finished configuring the database server. You can check the technical logs to see if reports are correctly
+written into the database and read by the web application.
+
+
+[[multiserver-rudder]]
+=== Multiserver Rudder
+
+From version 3.0 Rudder can be divided into 4 different components:
+
+- rudder-web: an instance with the webapp and the central policy server
+- rudder-ldap: the inventory endpoint and its ldap backend
+- rudder-db: the postgresql storage
+- rudder-relay-top: the contact point for nodes
+
+==== Preliminary steps
+
+
+You need the setup scripts provided at https://github.com/normation/rudder-tools/tree/master/scripts/rudder-multiserver-setup.
+You can download them with this command:
+
+----
+
+mkdir rudder-multiserver-setup
+cd rudder-multiserver-setup
+for i in add_repo detect_os.sh rudder-db.sh rudder-ldap.sh rudder-relay-top.sh rudder-web.sh
+do
+  wget --no-check-certificate https://raw.githubusercontent.com/Normation/rudder-tools/master/scripts/rudder-multiserver-setup/$i
+done
+chmod 755 *
+cd ..
+
+
+----
+
+
+You need 4 instances of supported OS, one for each component.
+Only the rudder-web instance need at least 2GB of RAM.
+
+Register the 4 names in the DNS or add them in /etc/hosts on each instance.
+
+Add firewall rules:
+
+- from rudder-web to rudder-db port pgsql TCP
+- from rudder-* to rudder-web port rsyslog 514 TCP
+- from rudder-relay-top to rudder-ldap port 8080 TCP
+- from rudder-web to rudder-ldap port 8080 TCP
+- from rudder-web to rudder-ldap port 389 TCP
+- from rudder-web to rudder-relay-top port 5309
+
+
+==== Install rudder-relay-top
+
+Copy the rudder-multiserver-setup directory to you instance.
+
+Run rudder-relay-top.sh as root, replace  with the hostname of the rudder-web instance:
+
+----
+
+cd rudder-multiserver-setup
+./rudder-relay-top.sh 
+
+----
+
+Take note of the UUID.
+If you need it later read, it is in the file /opt/rudder/etc/uuid.hive
+
+==== Install rudder-db
+
+Copy the rudder-multiserver-setup directory to you instance.
+
+Run rudder-db.sh as root, replace  with the hostname of the rudder-web instance, replace  with the network containing the rudder-web instances:
+
+----
+
+cd rudder-multiserver-setup
+./rudder-db.sh  
+
+----
+
+==== Install rudder-ldap
+
+Copy the rudder-multiserver-setup directory to you instance.
+
+Run rudder-ldap.sh as root, replace  with the hostname of the rudder-web instance:
+
+----
+
+cd rudder-multiserver-setup
+./rudder-ldap.sh 
+
+----
+
+==== Install rudder-web
+
+Copy the rudder-multiserver-setup directory to you instance.
+
+Run rudder-relay-top.sh as root, replace  with the hostname of the corresponding instance:
+
+----
+
+cd rudder-multiserver-setup
+./rudder-web.sh    
+
+----
+
+Connect rudder web interface and accept all nodes.
+Then run the following command where  is the uuid from rudder-relay-top setup.
+
+----
+
+/opt/rudder/bin/rudder-node-to-relay 
+
+----
+
+
+
+
+=== Mirroring Rudder repositories
+
+You can also use your own packages repositories server instead of 'www.rudder-project.org' if you want. This is possible with a synchronization from our repositories with rsync.
+
+We've got public read only rsync modules 'rudder-apt' and 'rudder-rpm'.
+
+To synchronize with the APT repository just type:
+----
+rsync -av www.rudder-project.org::rudder-apt /your/local/mirror
+----
+
+To synchronize with the RPM repository just type:
+----
+rsync -av www.rudder-project.org::rudder-rpm /your/local/mirror
+----
+
+Finally, you have to set up these directories (/your/local/mirror) to be shared by HTTP by a web server (i.e., Apache, nginx, lighttpd, etc...).
+
+
+=== Monitoring
+
+This section will give recommendations for:
+
+* Monitoring Rudder itself (besides standard monitoring)
+* Monitoring the state of your configuration management
+
+==== Monitoring Rudder itself
+
+===== Monitoring a Node
+
+The monitoring of a node mainly consists in checking that the Node can speak with
+its policy server, and that the agent is run regularly.
+
+You can use the 'rudder agent health' command to check for communication errors.
+It will check the agent configuration and look for connection errors in the last
+run logs. By default it will output detailed results, but you can start it with
+the '-n' option to enable "nrpe" mode (like Nagios plugins, but it can be
+used with other monitoring tools as well). In this mode, it will
+display a single line result and exit with:
+
+* 0 for a success
+* 1 for a warning
+* 2 for an error
+
+If you are using nrpe, you can put this line in your 'nrpe.cfg' file:
+
+----
+command[check_rudder]=/opt/rudder/bin/rudder agent health -n
+----
+
+To get the last run time, you can lookup the modification date of
+'/var/rudder/cfengine-community/last_successful_inputs_update'.
+
+===== Monitoring a Server
+
+You can use use regular API calls to check the server is running and has access to its data.
+For example, you can issue the following command to get the list of currently defined rules:
+
+----
+curl -X GET -H "X-API-Token: yourToken" http://your.rudder.server/rudder/api/latest/rules
+----
+
+You can then check the status code (which should be 200). See the xref:30_basic_administration/70_server_rest_api.adoc#rest-api[API documentation] for more information.
+
+You can also check the webapp logs (in '/var/log/rudder/webapp/year_month_day.stderrout.log')
+for error messages.
+
+==== Monitoring your configuration management
+
+There are two interesting types of information:
+
+* *Events*: all the changes made by the the agents on your Nodes
+* *Compliance*: the current state of your Nodes compared with the expected configuration
+
+===== Monitor compliance
+
+You can use the Rudder API to get the current compliance state of your infrastructure.
+It can be used to simply check for configuration errors, or be integrated in
+other tools.
+
+Here is an very simple example of API call to check for errors (exits with 1 when there is an error):
+
+----
+curl -s -H "X-API-Token: yourToken" -X GET 'https:/your.rudder.server/rudder/api/latest/compliance/rules' | grep -qv '"status": "error"'
+----
+
+See the xref:30_basic_administration/70_server_rest_api.adoc#rest-api[API documentation] for more information about general API usage, and the
+http://www.rudder-project.org/rudder-api-doc/#api-compliance[compliance API documentation]
+for a list of available calls.
+
+===== Monitor events
+
+The Web interface gives access to this, but we will here see how to process events
+automatically. They are available on the root server, in '/var/log/rudder/compliance/non-compliant-reports.log'.
+This file contains two types of reports about all the nodes managed by this server:
+
+* All the modifications made by the agent
+* All the errors that prevented the application of a policy
+
+The lines have the following format:
+
+----
+[%DATE%] N: %NODE_UUID% [%NODE_NAME%] S: [%RESULT%] R: %RULE_UUID% [%RULE_NAME%] D: %DIRECTIVE_UUID% [%DIRECTIVE_NAME%] T: %TECHNIQUE_NAME%/%TECHNIQUE_VERSION% C: [%COMPONENT_NAME%] V: [%KEY%] %MESSAGE%
+----
+
+In particular, the 'RESULT' field contains the type of event (change or error, respectively 'result_repaired' and 'result_error').
+
+You can use the following regex to match the different fields:
+
+----
+^\[(?P[^\]]+)\] N: (?P[^ ]+) \[(?P[^\]]+)\] S: \[(?P[^\]]+)\] R: (?P[^ ]+) \[(?P[^\]]+)\] D: (?P[^ ]+) \[(?P[^\]]+)\] T: (?P[^/]+)/(?P[^ ]+) C: \[(?P[^\]]+)\] V: \[(?P[^\]]+)\] (?P.+)$
+----
+
+Below is a basic https://www.elastic.co/products/logstash[Logstash] configuration file for parsing Rudder events.
+You can then use https://www.elastic.co/products/kibana[Kibana] to explore the data, and create graphs and
+dashboards to visualize the changes in your infrastructure.
+
+----
+input {
+   file {
+      path => "/var/log/rudder/compliance/non-compliant-reports.log"
+   }
+}
+
+filter {
+   grok {
+      match => { "message" => "^\[%{DATA:date}\] N: %{DATA:node_uuid} \[%{DATA:node}\] S: \[%{DATA:result}\] R: %{DATA:rule_uuid} \[%{DATA:rule}\] D: %{DATA:directive_uuid} \[%{DATA:directive}\] T: %{DATA:technique}/%{DATA:technique_version} C: \[%{DATA:component}\] V: \[%{DATA:key}\] %{DATA:message}$" }
+   }
+   # Replace the space in the date by a "T" to make it parseable by Logstash
+   mutate {
+      gsub => [ "date", " ", "T" ]
+   }
+   # Parse the event date
+   date {
+      match => [ "date" , "ISO8601" ]
+   }
+   # Remove the date field
+   mutate { remove => "date" }
+   # Remove the key field if it has the "None" value
+   if [key] == "None" {
+      mutate { remove => "key" }
+   }
+}
+
+output {
+    stdout { codec => rubydebug }
+}
+----
+
+=== Use Rudder inventory in other tools
+
+Rudder centralizes the information about your managed systems, and
+you can use this information in other tools, mainly through the API.
+We well here give a few examples.
+
+==== Export to a spreadsheet
+
+You can export the list of your nodes to a spreadsheet file (xls format) by using a
+https://github.com/normation/rudder-tools/tree/master/contrib/rudder_nodes_list[tool] available in the rudder-tools repository.
+
+Simple follow the installation instructions, and run it against your Rudder server.
+You will get a file containing:
+
+image::spreadsheet-list-nodes.png[]
+
+You can easily modify the script to add other information.
+
+==== Rundeck and Ansible
+
+There are plugins for Rundeck and Ansible that can be used with each tool to make
+them aware of inventory data from Rudder. For more information, see details in
+the xref:80_extension_and_integration/40_Rudder_Integration.adoc#rudder-integration[Third party integration with Rudder]
+paragraph.
+
diff --git a/src/reference/modules/ROOT/assets/images/RudderRelay.png b/src/reference/modules/installation/assets/images/RudderRelay.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/RudderRelay.png
rename to src/reference/modules/installation/assets/images/RudderRelay.png
diff --git a/src/reference/modules/installation/nav.list b/src/reference/modules/installation/nav.list
new file mode 100644
index 00000000..a9cf9dac
--- /dev/null
+++ b/src/reference/modules/installation/nav.list
@@ -0,0 +1,6 @@
+index.adoc
+requirements.adoc
+server.adoc
+agent.adoc
+relay.adoc
+upgrade.adoc
diff --git a/src/reference/modules/ROOT/pages/_partials/apt_key.adoc b/src/reference/modules/installation/pages/_partials/apt_key.adoc
similarity index 100%
rename from src/reference/modules/ROOT/pages/_partials/apt_key.adoc
rename to src/reference/modules/installation/pages/_partials/apt_key.adoc
diff --git a/src/reference/modules/ROOT/pages/_partials/initial_config.adoc b/src/reference/modules/installation/pages/_partials/initial_config.adoc
similarity index 96%
rename from src/reference/modules/ROOT/pages/_partials/initial_config.adoc
rename to src/reference/modules/installation/pages/_partials/initial_config.adoc
index 596df9c6..da9e4e2d 100644
--- a/src/reference/modules/ROOT/pages/_partials/initial_config.adoc
+++ b/src/reference/modules/installation/pages/_partials/initial_config.adoc
@@ -1,4 +1,4 @@
-==== Initial configuration of your Rudder Root Server
+== Initial configuration of your Rudder Root Server
 
 After the installation, you have to configure some system elements, by launching
 the following initialisation script:
diff --git a/src/reference/modules/ROOT/pages/_partials/rpm_key.adoc b/src/reference/modules/installation/pages/_partials/rpm_key.adoc
similarity index 100%
rename from src/reference/modules/ROOT/pages/_partials/rpm_key.adoc
rename to src/reference/modules/installation/pages/_partials/rpm_key.adoc
diff --git a/src/reference/modules/ROOT/pages/_partials/syslog.adoc b/src/reference/modules/installation/pages/_partials/syslog.adoc
similarity index 100%
rename from src/reference/modules/ROOT/pages/_partials/syslog.adoc
rename to src/reference/modules/installation/pages/_partials/syslog.adoc
diff --git a/src/reference/modules/ROOT/pages/_partials/zypper_segfault.adoc b/src/reference/modules/installation/pages/_partials/zypper_segfault.adoc
similarity index 100%
rename from src/reference/modules/ROOT/pages/_partials/zypper_segfault.adoc
rename to src/reference/modules/installation/pages/_partials/zypper_segfault.adoc
diff --git a/src/reference/modules/installation/pages/agent.adoc b/src/reference/modules/installation/pages/agent.adoc
new file mode 100644
index 00000000..2ff6f876
--- /dev/null
+++ b/src/reference/modules/installation/pages/agent.adoc
@@ -0,0 +1,237 @@
+[[install-agent]]
+= Install Rudder Agent
+
+This chapter gives a general presentation of the Rudder Agent, and describes
+the different configuration steps to deploy the Rudder agent on the Nodes you
+wish to manage. Each Operating System has its own set of installation procedures.
+
+The machines managed by Rudder are called Nodes, and can either be physical or virtual.
+For a machine to become a managed Node, you have to install the Rudder Agent on it.
+The Node will afterwards register itself on the server. And finally, the Node should
+be acknowledged in the Rudder Server interface to become a managed Node. For a more detailed
+description of the workflow, please refer to the xref:21_node_management/20_node_management.adoc#_node_management_2[Node Management]
+documentation.
+
+[NOTE]
+
+.Components
+
+=====
+
+This agent contains the following tools:
+
+. The community version of http://www.cfengine.com[CFEngine], a powerful open
+source configuration management tool.
+
+. http://fusioninventory.org/[FusionInventory], an inventory software.
+
+. An initial configuration set for the agent, to bootstrap the Rudder Root Server
+access.
+
+These components are recognized for their reliability and minimal impact on
+performances. Our tests showed their memory consumption is usually under 10 MB
+of RAM during their execution. So you can safely install them on your servers.
+
+We grouped all these tools in one package, to ease the Rudder Agent
+installation.
+
+To get the list of supported Operating systems, please refer to
+xref:10_installation/05_requirements/21_supported_architecture.adoc#node-supported-os[the list of supported Operating Systems for the Nodes].
+
+=====
+
+== Install Rudder Agent on Debian or Ubuntu
+
+include::{partialsdir}/syslog.adoc[]
+
+include::{partialsdir}/apt_key.adoc[]
+
+Then add Rudder's package repository:
+
+----
+
+echo "deb http://www.rudder-project.org/apt-4.3/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/rudder.list
+
+----
+
+Update your local package database to retrieve the list of packages available on our repository:
+
+----
+
+sudo apt-get update
+
+----
+
+Install the +rudder-agent+ package:
+
+----
+
+sudo apt-get install rudder-agent
+
+----
+
+You can now xref:10_installation/11_install_agent/80_agent_configuration.adoc#_configure_and_validate[configure the agent].
+
+== Install Rudder Agent on RHEL-like systems
+
+include::{partialsdir}/syslog.adoc[]
+
+include::{partialsdir}/rpm_key.adoc[]
+
+Then define a yum repository for Rudder:
+
+----
+
+echo '[Rudder_4.3]
+name=Rudder 4.3 EL repository
+baseurl=http://www.rudder-project.org/rpm-4.3/RHEL_$releasever/
+gpgcheck=1
+gpgkey=https://www.rudder-project.org/rpm-repos/rudder_rpm_key.pub' > /etc/yum.repos.d/rudder.repo
+
+----
+
+[TIP]
+
+====
+
+The RPM can be directly downloaded for a standalone installation,
+from the following URL: http://www.rudder-project.org/rpm-4.1/RHEL_7/
+(or RHEL_6, RHEL_5, etc, depending on your host's OS version)
+
+====
+
+Install the package:
+
+----
+
+yum install rudder-agent
+
+----
+
+Or:
+
+----
+
+yum install rudder-agent-4.2.0-1.EL.7.x86_64.rpm
+
+----
+
+You can now xref:10_installation/11_install_agent/80_agent_configuration.adoc#_configure_and_validate[configure the agent].
+
+
+== Install Rudder Agent on SLES
+
+include::{partialsdir}/syslog.adoc[]
+
+Following commands are executed as the +root+ user.
+
+include::{partialsdir}/zypper_segfault.adoc[]
+
+include::{partialsdir}/rpm_key.adoc[]
+
+Then add the Rudder packages repository:
+
+* on SLES 12:
+
+----
+
+zypper ar -n 'Rudder SLES 12 repository' http://www.rudder-project.org/rpm-4.3/SLES_12/ Rudder
+
+----
+
+* on SLES 11:
+
+----
+
+zypper ar -n 'Rudder SLES repository' http://www.rudder-project.org/rpm-4.3/SLES_11_SP1/ Rudder
+
+----
+
+* on SLES 10:
+
+----
+
+zypper sa 'http://www.rudder-project.org/rpm-4.3/SLES_10_SP3/' Rudder
+
+----
+
+Update your local package database to retrieve the list of packages available on our repository:
+
+----
+
+zypper ref
+
+----
+
+Install the +rudder-agent+ package:
+
+----
+
+zypper install rudder-agent
+
+----
+
+[TIP]
+
+====
+
+The use the the +rug+ package manager on SLES 10 is strongly discouraged, due to poor performance
+and possible stability issues.
+
+====
+
+You can now xref:10_installation/11_install_agent/80_agent_configuration.adoc#_configure_and_validate[configure the agent].
+
+
+[[_configure_and_validate]]
+== Configure and validate
+
+=== Configure Rudder Agent
+
+Configure the IP address or hostname of the Rudder Root Server in the following file
+
+----
+
+echo '' > /var/rudder/cfengine-community/policy_server.dat
+
+----
+
+[TIP]
+
+=====
+
+We advise you to use the +IP address+ of the Rudder Root Server. The DNS name of
+this server can also be accepted if you have a trusted DNS infrastructure
+with proper reverse resolutions.
+
+=====
+
+You can now start the Rudder service with:
+
+----
+
+service rudder-agent start
+
+----
+
+=== Validate new Node
+
+Several minutes after the start of the agent, a new Node should be pending in
+the Rudder web interface. You will be able to browse its inventory, and accept it to manage its
+configuration with Rudder.
+
+You may force the agent to run and send an inventory by issuing the following command:
+
+----
+
+rudder agent inventory
+
+----
+
+You may force the agent execution by issuing the following command:
+
+----
+
+rudder agent run
+
+----
diff --git a/src/reference/modules/ROOT/pages/10_installation/04_quick_install.adoc b/src/reference/modules/installation/pages/index.adoc
similarity index 93%
rename from src/reference/modules/ROOT/pages/10_installation/04_quick_install.adoc
rename to src/reference/modules/installation/pages/index.adoc
index a3253d90..79202eb1 100644
--- a/src/reference/modules/ROOT/pages/10_installation/04_quick_install.adoc
+++ b/src/reference/modules/installation/pages/index.adoc
@@ -1,3 +1,5 @@
+== Installation
+
 === Quick installation
 
 [WARNING]
@@ -16,7 +18,7 @@ We have a quick procedure for people who just want to test Rudder:
 ----
 
   su -
-  wget https://www.rudder-project.org/tools/rudder-setup 
+  wget https://www.rudder-project.org/tools/rudder-setup
   chmod +x rudder-setup
   ./rudder-setup setup-server latest
 
@@ -36,8 +38,8 @@ Usage rudder-setup (add-repository|setup-agent|setup-server|upgrade-agent|upgrad
   rudder_version : x.y or x.y.z or x.y-nightly or ci/x.y or lts or latest
        x.y:         the last x.y release (ex: 3.2)
        x.y.z:       the exact x.y.z release (ex: 3.2.1)
-       x.y.z-t:     the exact x.y.z release with a retag number t (ex: 3.2.1-1) 
-       x.y.z.a:     the last x.y.z pre-release where a can be alpha1, beta1, rc1... (ex: 4.0.0.rc1) 
+       x.y.z-t:     the exact x.y.z release with a retag number t (ex: 3.2.1-1)
+       x.y.z.a:     the last x.y.z pre-release where a can be alpha1, beta1, rc1... (ex: 4.0.0.rc1)
        x.y-nightly: the last public x.y nightly build (ex: 3.2-nightly)
        ci/x.y:      the last private x.y nightly build (ex: ci/3.2)
        ci/x.y.z:    the last private x.y.z release build (ex: ci/3.2.16)
diff --git a/src/reference/modules/ROOT/pages/10_installation/12_install_relay.adoc b/src/reference/modules/installation/pages/relay.adoc
similarity index 95%
rename from src/reference/modules/ROOT/pages/10_installation/12_install_relay.adoc
rename to src/reference/modules/installation/pages/relay.adoc
index cdccc8c5..71ffacdc 100644
--- a/src/reference/modules/ROOT/pages/10_installation/12_install_relay.adoc
+++ b/src/reference/modules/installation/pages/relay.adoc
@@ -1,5 +1,5 @@
 [[relay-servers]]
-=== Install Rudder Relay (optional)
+= Install Rudder Relay (optional)
 
 Relay servers can be added to Rudder, for example to manage a DMZ or to isolate specific
 nodes from the main environment for security reasons. 
@@ -23,7 +23,7 @@ The procedure will:
 - Configure the relay components (Syslog, Apache HTTPd, CFEngine)
 - Switch this node to the relay server role (from the root server point of view)
 
-==== On the relay
+== On the relay
 
 To begin, please install a regular Rudder agent on the OS, following the
 xref:10_installation/11_install_agent/00_install_agent.adoc#install-agent[installation instructions], and install the 'rudder-server-relay'
@@ -32,7 +32,7 @@ package in addition to the 'rudder-agent' package.
 To complete this step, please make sure that your node is configured successfully
 and appears in your Rudder web interface.
 
-==== On the root server
+== On the root server
 
 You have to tell the Rudder Root server that a node will be a relay. To do so,
 launch the rudder-node-to-relay script on the root server, supplying the UUID of the
@@ -45,7 +45,7 @@ host to be considered as a relay. You can find the UUID of your node with the
 
 ----
 
-==== Validation
+== Validation
 
 When every step has completed successfully:
 
@@ -61,7 +61,7 @@ from a node to a relay.
 
 image::RudderRelay.png[Relay]
 
-==== Adding nodes to a relay server
+== Adding nodes to a relay server
 
 When you have at least one relay, you will likely want to add nodes
 on it.
diff --git a/src/reference/modules/installation/pages/requirements.adoc b/src/reference/modules/installation/pages/requirements.adoc
new file mode 100644
index 00000000..a21058ab
--- /dev/null
+++ b/src/reference/modules/installation/pages/requirements.adoc
@@ -0,0 +1,271 @@
+[[rudder-installation-requirements]]
+= Requirements
+
+[[configure-the-network]]
+== Networking
+
+.Network Flows
+[options="header"]
+|=======================
+|To|From|Port|Usage
+|Root Server|User or API client| *tcp/443* (https) | Access Web interface/API
+.6+|Policy Server |Any Node|*udp/514* (or tcp/514) |Send reports
+.3+|Linux or AIX Node | *tcp/443* (https/WebDAV) | Send inventories
+|  *tcp/5309*    |Fetch policies
+|  _tcp/5310 (optional)_    |Debug policy copy
+|AIX Node | *tcp/80* (http/WebDAV) | Send inventories
+|Windows DSC Node | *tcp/443* (https/WebDAV) | Send inventories and fetch policies
+|Linux or AIX Node | Policy Server | _tcp/5309 (optional)_ | Trigger remote agent run
+|=======================
+
+Note: The Policy Server is the server configured to manage the node, and can be
+either a Root Server or a Relay Server.
+
+=== DNS - Name resolution
+
+If you want to be able to trigger agent runs from the Root Server (without
+having to wait for regular automated run),
+you will need your Root Server (or Relay Server) to be able to resolve your nodes
+using the provided hostname.
+
+
+
+
+[[jvm-requirements]]
+== JVM Security Policy
+
+Rudder needs `unlimited strength` security policy because it uses a variety of advanced
+hashing and cryptographic algorithms only available in that mode.
+
+Any recent JVM (JDK 8 > 8u161, all JDK 9 and more recent) is configured by default with this policy.
+
+You can check your case by running the following command on your server:
+
+----
+
+jrunscript -e 'exit (javax.crypto.Cipher.getMaxAllowedKeyLength("RC5") >= 256 ? 0 : 1);'; echo $?
+
+----
+
+If it returns 0, you have the correct policy. In other cases, you will need to change it.
+
+For that, you can download the
+http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html[`unlimited strength` policy for JDK 8 here].
+
+
+Then, simply copy the `java.policy` file into `$JAVA_HOME/jre/lib/security/java.policy`.
+
+[[node-supported-os, the list of supported Operating Systems for Nodes]]
+
+== Fully supported Operating Systems
+
+Fully supported Operating Systems are systems that are frequently built and tested on our servers.
+Partially supported Operating Systems are systems that have been built and tested at least once but that have not seen continuous flow of fixes.
+
+=== For Rudder Nodes
+
+The following operating systems are supported for Rudder Nodes and packages are
+available for these platforms:
+
+GNU/Linux:
+
+* Debian 5 to 9
+
+* RedHat Enterprise Linux (RHEL) / RHEL-like 3 and 5 to 7
+
+* SuSE Linux Enterprise Server (SLES) 10 SP3, 11 and 12
+
+* Ubuntu 10.04 LTS (Lucid), 12.04 LTS (Precise), 14.04 LTS (Trusty), 16.04 LTS (Xenial), 18.04 LTS (Bionic)
+
+Other Unix systems:
+
+* IBM AIX 5.3, 6.1 and 7.1
+
+Windows:
+
+* xref:10_installation/05_requirements/21_supported_architecture.adoc#install-on-windows[Microsoft Windows] Server 2008 R2, 2012, 2012 R2, 2016
+
+[TIP]
+
+[[install-on-windows, Install on Microsoft Windows]]
+
+.Windows and AIX Nodes
+
+====
+
+* On Windows, installing Rudder requires the DSC (Desired State Configuration) plugin and Powershell 4.0 or more
+* For IBM AIX, pre-built RPM packages are distributed by Normation only
+
+Hence, as a starting point, we suggest that you only use Linux machines. Once
+you are accustomed to Rudder, contact Normation to obtain a demo version for
+these platforms.
+
+====
+
+
+[[server-supported-os, the list of supported Operating Systems for Root server]]
+
+=== For Rudder Root Server
+
+The following operating systems are supported as a Root server:
+
+GNU/Linux:
+
+* Debian 8 and 9
+
+* RedHat Enterprise Linux (RHEL) / RHEL-like 6 and 7
+
+* SuSE Linux Enterprise Server (SLES) 11 SP1 and SP3, 12 SP1, 12 SP2
+
+* Ubuntu 14.04 LTS (Trusty), 16.04 LTS (Xenial)
+
+
+
+== Partially supported Operating Systems
+
+Fully supported Operating Systems are systems that are frequently built and tested on our servers.
+Partially supported Operating Systems are systems that have been built and tested at least once but that have not seen continuous flow of fixes.
+
+[WARNING]
+
+.Partially supported Operating Systems
+
+====
+
+It is possible to use Rudder on other platforms than the fully supported ones.
+However, we haven't tested the application on them, and can't currently supply
+any packages for them. Moreover, some Techniques may not work properly. If you
+wish to get Rudder support on those systems, please get in touch with us!
+
+A reference about how to manually build a Rudder agent is available on Rudder's
+documentation here: xref:90_reference/40_build_agent.adoc#_building_the_rudder_agent[Building the Rudder Agent]
+
+====
+
+=== For Rudder Nodes
+
+The following operating systems have had an agent built using xref:90_reference/40_build_agent.adoc#_building_the_rudder_agent[Building the Rudder Agent]:
+
+* FreeBSD
+
+* Slackware
+
+* Solaris 10 and 11
+
+* Raspbian, based on jessie (via dpkg)
+
+* Debian 8 on ARM (armhf version) (via dpkg)
+
+* OpenSUSE (via rpm)
+
+
+You can also follow the documentation instructions to build and install Rudder Agent locally on your favorite linux distribution.
+Even if this distribution has not been tested by us, it has a reasonable chance of success.
+
+
+=== For Rudder Root Server
+
+We advise against using an unsupported OS for Rudder server because the server contains
+much more code than the agent. This code is tailored against specific OS versions
+to work around many system limitations and specificities.
+
+[[rudder-cloud-compatibility]]
+== Cloud compatibility
+
+The agent provides an abstraction that permits a high level management of the infrastructure.
+This abstraction is independent of the underlying hardware. This also works for the cloud -
+we can define configuration rules in Rudder that will be applied as well inside a cloud instance as in a virtual server or in a physical machine of a datacenter.
+
+Any cloud instance based on one of the supported operating system is automatically supported.
+
+[[node-hardware-requirements]]
+== Hardware specifications for Rudder Agent
+
+Rudder agent has a very small footprint, and only consumes:
+
+* 10 to 20 MB of RAM during an agent run
+* a few kB on the network to check or update its policies
+* a few kB on the network to report
+* around 100 MB of disk space for the installed files and the workspace
+
+These figures will vary depending on your configuration (backup retention,
+number of configured components to check, etc...).
+
+[[server-hardware-requirements]]
+== Hardware specifications and sizing for Rudder Root Server
+
+A dedicated server is strongly recommended, either physical or virtual with at least one dedicated core.
+Rudder Server runs on both 32 (if available) and 64 bit versions of every supported Operating System.
+
+[NOTE]
+
+====
+
+Rudder does not fear big infrastructures. It is currently used in production in
+infrastructure with more than *7000* nodes.
+
+====
+
+=== Memory
+
+The required amount of RAM mainly depends on the number of managed nodes. A general rule for the minimal value on a stand-alone server is:
+
+* less than 50 nodes: 2 GB
+* between 50 and 1000 nodes: 4 GB
+* more than 1000 nodes: 4 GB + 1 GB of RAM by 500 nodes above 1000.
+
+When managing more than 1000 nodes, we also recommend you to use a multiserver
+installation for Rudder as described in chapter xref:43_advanced_administration/77_distributed_rudder.adoc#multiserver-rudder[Multiserver Rudder].
+
+When your server has more than 2 GB of RAM, you have to configure the RAM allocated
+to the Java Virtual Machine as explained in the section
+xref:43_advanced_administration/20_application_tuning.adoc#_configure_ram_allocated_to_jetty[about webapplication RAM configuration].
+
+When your server has more than 4 GB, you may need to also tune the PostgresSQL
+server, as explained in the xref:43_advanced_administration/20_application_tuning.adoc#_optimize_postgresql_server[Optimize PostgreSQL Server]
+section.
+
+[TIP]
+
+====
+
+As an example, a Rudder server which manages 2600 nodes (with a lot of policies
+checked) will need:
+
+* A server with 8 GB of RAM,
+* 4 GB of RAM will be allocated to the JVM.
+
+In our load-tests, with such a configuration, the server is not stressed and
+the user experience is good.
+
+====
+
+=== Disk
+
+The PostgreSQL database will take up most disk space needed by Rudder. The storage
+necessary for the database can be estimated by counting around
+150 to 400 kB by Directive, by Node and by day of retention of node's
+execution reports (the default is 4 days):
+
+----
+max_space = number of Directives * number of Nodes * retention duration in days * 400 kB
+----
+
+For example, a default installation with 500 nodes and an average of
+50 Directives by node, should require between *14 GB and 38 GB* of disk space
+for PostgreSQL.
+
+Follow the xref:43_advanced_administration/20_application_tuning.adoc#_reports_retention[Reports Retention] section to configure the
+retention duration.
+
+
+[WARNING]
+
+====
+
+Be careful to correctly size your */var* partition. Compliance data are growing
+fast, and PostgreSQL doesn't like at all to encounter a write error because
+the disk is full. It is also adviced to set-up your monitoring to check for
+available space on that partition.
+
+====
diff --git a/src/reference/modules/installation/pages/server.adoc b/src/reference/modules/installation/pages/server.adoc
new file mode 100644
index 00000000..22c272d2
--- /dev/null
+++ b/src/reference/modules/installation/pages/server.adoc
@@ -0,0 +1,260 @@
+[[install-server]]
+= Install Rudder Server
+
+This chapter covers the installation of a Rudder Root Server, from the
+specification of the underlying server, to the initial setup of the application.
+
+Before all, you need to setup a server according to
+xref:10_installation/05_requirements/21_supported_architecture.adoc#server-supported-os[the server specifications]. You should also
+xref:10_installation/05_requirements/05_requirements.adoc#configure-the-network[configure the network]. These topics are covered in the
+Architecture chapter.
+
+Ideally, this machine should have Internet access, but this is not a strict requirement.
+
+As Rudder data can grow really fast depending on your number of managed nodes and number of rules, it is advised to separate partitions to prevent /var getting full and break your system.
+Special attention should be given to:
+
+=======
+
+/var/lib/pgsql::
+(OS dependent).
+Please see the xref:10_installation/05_requirements/05_requirements.adoc#_database_maintenance[database maintenance] chapter for more details about the
+PostgreSQL database size estimation.
+
+/var/rudder::
+Contains most of your server information, the configuration-repository, LDAP database, etc...
+Rudder application-related files should stay under 1GB, but the size of the configuration-repository will
+depend of the amount of data you store in it, especially in the shared-files folder (files that will get
+distributed to the agents using the "Download a file for the shared folder" Technique).
+
+/var/log/rudder::
+Report logs (/var/log/rudder/reports) size will depend on the amount of nodes you manage.
+It is possible to reduce this drastically by unticking "Log all reports received to /var/log/rudder/reports/all.log"
+under the Administration - Settings tab in the Rudder web interface. This will prevent Rudder from recording this logs
+in a text file on disk, and will only store them in the SQL database. This saves on space, and doesn't remove any
+functionality, but does however make debugging harder.
+
+=======
+
+[NOTE]
+
+.Files installed by the application
+
+=====
+
++/etc+:: System-wide configuration files are stored here: init scripts,
+configuration for apache, logrotate and rsyslog.
+
++/opt/rudder+:: Non variable application files are stored here.
+
++/opt/rudder/etc+:: Configuration files for Rudder services are stored here.
+
++/var/log/rudder+:: Log files for Rudder services are stored here.
+
++/var/rudder+:: Variable data for Rudder services are stored here.
+
++/var/rudder/configuration-repository/techniques+:: Techniques are stored here.
+
++/var/rudder/cfengine-community+:: Data for CFEngine Community is stored here.
+
++/usr/share/doc/rudder*+:: Documentation about Rudder packages.
+
+=====
+
+== Install Rudder Root server on Debian or Ubuntu
+
+=== Add the Rudder packages repository
+
+Rudder requires Java RE (version 8 at least) which is not packaged by default on Debian 8 nor Ubuntu 14.04.
+
+The Java RE 8 for Debian or Ubuntu can be found through Oracle's website: https://www.java.com
+
+include::{partialsdir}/apt_key.adoc[]
+
+
+Then run the following commands as root:
+
+----
+
+echo "deb http://www.rudder-project.org/apt-4.3/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/rudder.list
+apt-get update
+
+----
+
+This will add the package repository and finally update the local package cache.
+
+=== Install your Rudder Root Server
+
+To begin the installation, you should simply install the rudder-server-root
+metapackage, which will install the required components:
+
+----
+
+apt-get install rudder-server-root
+
+----
+
+include::{partialsdir}/initial_config.adoc[]
+
+
+[[install-server-sles, Install Rudder Root server on SLES]]
+== Install Rudder Root server on SLES
+
+=== Configure the package manager
+
+Rudder requires Java RE (version 7 at least) that is not always packaged by SuSE on all versions
+
+* PostgreSQL 9
+* Java RE (version 8 at least).
+
+It is also recommended to use PostgreSQL >= 9.2 for optimal performances.
+
+PostgreSQL 9.4 can be installed through the OpenSuSE build service: https://build.opensuse.org/project/show/server:database:postgresql
+or through the system repositories, on SLES 11 SP4 and later systems.
+
+The Java RE 8 for SLES11 can be found through Oracle's website: https://www.java.com
+
+Also, Rudder server requires the +git+ software, that can be found on SLES SDK DVD under the name +git-core+.
+
+[WARNING]
+
+====
+
+SLES 11 pre SP4 will try to install PostgreSQL 8.x by default, which is not recommended for Rudder and will cause serious performance degradation, and requires much more disk space in the long run.
+
+It is really recommended to either add the OpenSuSE build service repository, or install postgresql9x-server (if available) beforehand to prevent the system from choosing the default PostgreSQL version.
+
+====
+
+include::{partialsdir}/zypper_segfault.adoc[]
+
+
+[WARNING]
+
+====
+
+Zypper seems to be quite tolerant to missing dependencies and will let you install rudder-server-root even if you are missing
+something like +git-core+ for example, if nothing provides it or you did not install it beforehand.
+
+Special care should be taken during initial installation not to say "Continue anyway" if Zypper does complain a dependency can
+not be resolved and asks what to do.
+
+====
+
+=== Add the Rudder packages repository
+
+include::{partialsdir}/rpm_key.adoc[]
+
+
+Then run the following commands as root:
+
+----
+
+zypper ar -n "Rudder SLES repository" http://www.rudder-project.org/rpm-4.3/SLES_11/ Rudder
+zypper refresh
+
+----
+
+This will add the Rudder package repository, then update the local package
+cache.
+
+=== Install your Rudder Root Server
+
+To begin the installation, you should simply install the rudder-server-root
+metapackage, which will install the required components:
+
+----
+
+zypper in rudder-server-root
+
+----
+
+include::{partialsdir}/initial_config.adoc[]
+
+
+== Install Rudder Root server on RHEL-like systems
+
+=== Add the Rudder packages repository
+
+include::{partialsdir}/rpm_key.adoc[]
+
+
+Then run the following command as root:
+
+----
+
+echo '[Rudder_4.3]
+name=Rudder 4.3 EL repository
+baseurl=http://www.rudder-project.org/rpm-4.3/RHEL_$releasever/
+gpgcheck=1
+gpgkey=https://www.rudder-project.org/rpm-repos/rudder_rpm_key.pub' > /etc/yum.repos.d/rudder.repo
+
+----
+
+=== Install your Rudder Root Server
+
+To begin the installation, you should simply install the rudder-server-root
+metapackage, which will install the required components:
+
+----
+
+yum install rudder-server-root
+
+----
+
+On Red Hat-like systems, a firewall setup is enabled by default, and would need to be adjusted
+for Rudder to operate properly. You have to allow all the flows described in the
+xref:10_installation/05_requirements/05_requirements.adoc#configure-the-network[network] section.
+
+
+[TIP]
+
+====
+
+On EL6, the /etc/sysconfig/iptables file configures the firewall:
+
+----
+
+*filter
+:INPUT ACCEPT [0:0]
+:FORWARD ACCEPT [0:0]
+:OUTPUT ACCEPT [0:0]
+-A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
+-A INPUT -p icmp -j ACCEPT
+-A INPUT -i lo -j ACCEPT
+# Allow SSH access (Maintenance)
+-A INPUT -m state --state NEW -m tcp -p tcp --dport 22 -j ACCEPT
+# Allow HTTPS access (Rudder)
+-A INPUT -m state --state NEW -m tcp -p tcp --dport 443 -j ACCEPT
+-A INPUT -j REJECT --reject-with icmp-host-prohibited
+-A FORWARD -j REJECT --reject-with icmp-host-prohibited
+COMMIT
+
+----
+
+The important line to have access to the Web interface being:
+
+----
+
+# Allow HTTPS access (Rudder)
+-A INPUT -m state --state NEW -m tcp -p tcp --dport 443 -j ACCEPT
+
+----
+
+====
+
+[TIP]
+
+====
+
+On EL7, the default firewall is firewalld, and you can enable HTTP/S access by running
+
+----
+
+firewall-cmd --permanent --zone=public --add-port=443/tcp
+
+----
+
+====
+
+include::{partialsdir}/initial_config.adoc[]
diff --git a/src/reference/modules/installation/pages/upgrade.adoc b/src/reference/modules/installation/pages/upgrade.adoc
new file mode 100644
index 00000000..947f24c1
--- /dev/null
+++ b/src/reference/modules/installation/pages/upgrade.adoc
@@ -0,0 +1,315 @@
+= Upgrade
+
+This short chapter covers the upgrade of the Rudder Server Root and Rudder Agent
+from older versions to the latest version.
+
+The upgrade is quite similar to the installation.
+
+A big effort has been made to ensure that all upgrade steps are performed
+automatically by packaging scripts. Therefore, you shouldn't have to do any
+upgrade procedures manually, but you will note that several data migrations
+occur during the upgrade process.
+
+== Upgrade notes
+
+[[_upgrade_from_rudder_4_0_or_older]]
+=== Upgrade from Rudder 4.0 or older
+
+Direct upgrades from 4.0.x and older are no longer supported on 4.3.
+If you are still running one of those, either on servers or nodes,
+please first upgrade to one of the supported versions, and then upgrade to 4.3.
+
+=== Upgrade from Rudder 4.1 or 4.2
+
+Migration from 4.1 or 4.2 are supported, so you can upgrade directly to 4.3.
+
+=== Compatibility between Rudder agent 4.3 and older server versions
+
+==== 4.1.x and 4.2.x servers
+
+Rudder agents 4.3.x are compatible with 4.1 and 4.2 Rudder servers.
+
+==== Older servers
+
+Rudder agents 4.3.x are not compatible with Rudder servers older than 4.1.
+You need to upgrade your server to a compatible version before the agents.
+
+=== Compatibility between Rudder server 4.3 and older agent versions
+
+==== 4.1.x and 4.2.x agents
+
+Rudder agent 4.1.x and 4.2.x are fully compatible with Rudder server 4.3.x. It is
+therefore not strictly necessary to update your agents to 4.3.x.
+
+==== Older agents
+
+These agents are not compatible with Rudder 4.3, and you have to upgrade them.
+Be careful to follow the upgrade path explained xref:12_upgrade/05_caution.adoc#_upgrade_from_rudder_4_0_or_older[above].
+
+=== Protocol for reporting
+
+Rudder uses syslog messages over UDP by default for reporting (since 3.1), but if you upgraded
+your server from a previous version, you will keep the previous setting which uses
+syslog messages over TCP.
+
+You should consider switching to UDP (in *Administration* -> *Settings* -> *Protocol*),
+as it will prevent breaking your server in
+case of networking or load issues, or if you want to manage a lot of nodes.
+The only drawback is that you can lose reports in these situations. It does not
+affects the reliability of policy enforcement, but may only temporarily affects
+reporting on the server.
+Read xref:43_advanced_administration/20_application_tuning.adoc#_rsyslog[perfomance notes] about rsyslog for detailed information.
+
+== On Debian or Ubuntu
+
+Following commands are executed as the +root+ user.
+
+Add the Rudder project repository:
+
+----
+
+echo "deb http://www.rudder-project.org/apt-4.3/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/rudder.list
+
+----
+
+Update your local package database to retrieve the list of packages available on our repository:
+
+----
+
+apt-get update
+
+----
+
+For Rudder Server, upgrade all the packages associated to +rudder-server-root+:
+
+* With apt-get:
+
+----
+
+apt-get install rudder-server-root ncf ncf-api-virtualenv
+
+----
+
+and after the upgrade of these packages, restart jetty to apply the changes on the Web application:
+
+----
+
+service rudder-jetty restart
+
+----
+
+For Rudder Agent, upgrade the +rudder-agent+ package:
+
+----
+
+apt-get install rudder-agent
+
+----
+
+[WARNING]
+
+====
+
+Rudder includes a script for upgrading all files, databases, etc... which need
+migrating. Therefore, you should not replace your old files by the new ones
+when apt-get/aptitude asks about this, unless you want to reset all your parameters.
+
+====
+
+[WARNING]
+
+====
+
+Rudder 4.1 requires Java RE version 8 or more, which is not packaged be default on Debian 7 nor Ubuntu 14.04
+On these platforms, prior to upgrade Rudder, you will need to install Java RE 8, either from Oracle site https://www.java.com
+or through any other means of your choice
+
+====
+
+
+You can now xref:12_upgrade/60_technique_upgrade.adoc#_technique_upgrade[upgrade your local techniques].
+
+== On RHEL or CentOS
+
+Following commands are executed as the +root+ user.
+
+Update your yum repository:
+
+----
+
+echo '[Rudder_4.3]
+name=Rudder 4.3 Repository
+baseurl=http://www.rudder-project.org/rpm-4.3/RHEL_$releasever/
+gpgcheck=1
+gpgkey=https://www.rudder-project.org/rpm-repos/rudder_rpm_key.pub' > /etc/yum.repos.d/rudder.repo
+
+----
+
+[TIP]
+
+====
+
+Replace RHEL_7 with your Enterprise Linux version if necessary.
+
+====
+
+=== Rudder server
+
+For Rudder server, upgrade the +rudder-*+ and +ncf+-related packages:
+
+----
+
+yum update "rudder-*" ncf ncf-api-virtualenv
+
+----
+
+and after the upgrade of these packages, restart jetty to apply the changes on the Web application:
+
+----
+
+service rudder-jetty restart
+
+----
+
+From version 3.1, Rudder provides an SELinux policy. You can enable it after upgrading your server with:
+
+----
+
+sed -i "s%^\s*SELINUX=.*%SELINUX=enabled%" /etc/sysconfig/selinux
+setenforce 1
+
+----
+
+=== Rudder agent
+
+For Rudder agent, upgrade the +rudder-agent+ package:
+
+----
+
+yum update rudder-agent
+
+----
+
+You can now xref:12_upgrade/60_technique_upgrade.adoc#_technique_upgrade[upgrade your local techniques].
+
+== On SLES
+
+Following commands are executed as the +root+ user.
+
+Add the Rudder packages repository:
+
+* On a SLES 11 system:
+
+----
+
+zypper ar -n "Rudder SLES repository" http://www.rudder-project.org/rpm-4.3/SLES_11_SP1/ Rudder
+
+----
+
+* On a SLES 10 system:
+
+----
+
+zypper sa "http://www.rudder-project.org/rpm-4.3/SLES_10_SP3/" Rudder
+
+----
+
+
+Update your local package database to retrieve the list of packages available on our repository:
+
+----
+
+zypper ref
+
+----
+
+
+For Rudder Server, upgrade all the packages associated to +rudder-server-root+:
+
+----
+
+zypper update "rudder-*" "ncf*"
+
+----
+
+[WARNING]
+
+====
+
+SLES 11 pre SP4 uses PostgreSQL 8.x by default, which is not recommended for Rudder and will cause serious performance degradation, and requires much more disk space in the long run.
+
+Rudder 4.0 is tested for PostgreSQL 9.2 and higher. It still works with version 8.4 or 9.1, but not warranties are made that this will hold in the future. It is really recommanded to migrate to PostgreSQL 9.2 at least.
+
+Please look at xref:10_installation/10_install_server/12_install_root_server_sles.adoc#install-server-sles[Install Rudder Root server on SLES] for details.
+
+====
+
+[WARNING]
+
+====
+
+Rudder 4.1 requires Java RE version 8 or more, which is not packaged be default on SLES 11
+On this platform, prior to upgrade Rudder, you will need to install Java RE 8, either from Oracle site https://www.java.com
+or through any other means of your choice
+
+====
+
+and after the upgrade of these packages, restart jetty to apply the changes on the Web application:
+
+----
+
+service rudder-jetty restart
+
+----
+
+For Rudder Agent, upgrade the +rudder-agent+ package:
+
+----
+
+zypper update rudder-agent
+
+----
+
+You can now xref:12_upgrade/60_technique_upgrade.adoc#_technique_upgrade[upgrade your local techniques].
+
+
+[[_technique_upgrade]]
+== Technique upgrade
+
+At the first installation, Rudder will automatically deploy a Technique library in the
+`/var/rudder/configuration-repository/techniques` directory.
+
+When upgrading Rudder to another version, a new (updated) Technique library will be deployed
+in `/opt/rudder/share/techniques`, and Rudder will automatically take care of updating the system
+Techniques in the configuration-repository directory.
+
+However, the other Techniques will not be updated automatically (yet), so you will have to do
+it yourself.
+
+[CAUTION]
+
+====
+
+Please keep in mind that if you did manual modifications on the Techniques in existing directories,
+or created new versions of them, you will have some merging work to do.
+
+====
+
+To upgrade you local techniques, run the following commands on the Rudder Root Server:
+
+----
+
+cd /var/rudder/configuration-repository
+cp -a /opt/rudder/share/techniques/* techniques/
+git status
+# Now, inspect the differences. If no conflict is noticeable, then go ahead.
+git add techniques/
+git commit -m "Technique upgrade" # Here, put a meaningful message about why you are updating.
+rudder server reload-techniques
+
+----
+
+This last command will reload the Technique library and trigger a full redeployment on nodes.
+
+Please check that the deployment is successful in the Rudder web interface.
+
diff --git a/src/reference/modules/ROOT/assets/images/branding/Configure.png b/src/reference/modules/plugins/assets/images/branding/Configure.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/branding/Configure.png
rename to src/reference/modules/plugins/assets/images/branding/Configure.png
diff --git a/src/reference/modules/ROOT/assets/images/branding/Customised_bar.png b/src/reference/modules/plugins/assets/images/branding/Customised_bar.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/branding/Customised_bar.png
rename to src/reference/modules/plugins/assets/images/branding/Customised_bar.png
diff --git a/src/reference/modules/ROOT/assets/images/branding/default-main-display.png b/src/reference/modules/plugins/assets/images/branding/default-main-display.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/branding/default-main-display.png
rename to src/reference/modules/plugins/assets/images/branding/default-main-display.png
diff --git a/src/reference/modules/ROOT/assets/images/branding/login-default.png b/src/reference/modules/plugins/assets/images/branding/login-default.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/branding/login-default.png
rename to src/reference/modules/plugins/assets/images/branding/login-default.png
diff --git a/src/reference/modules/ROOT/assets/images/rudder-agent-dsc-cli.png b/src/reference/modules/plugins/assets/images/rudder-agent-dsc-cli.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/rudder-agent-dsc-cli.png
rename to src/reference/modules/plugins/assets/images/rudder-agent-dsc-cli.png
diff --git a/src/reference/modules/ROOT/assets/images/rudder-agent-windows-known-issue.png b/src/reference/modules/plugins/assets/images/rudder-agent-windows-known-issue.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/rudder-agent-windows-known-issue.png
rename to src/reference/modules/plugins/assets/images/rudder-agent-windows-known-issue.png
diff --git a/src/reference/modules/ROOT/assets/images/rudder-datasources-description.png b/src/reference/modules/plugins/assets/images/rudder-datasources-description.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/rudder-datasources-description.png
rename to src/reference/modules/plugins/assets/images/rudder-datasources-description.png
diff --git a/src/reference/modules/ROOT/assets/images/rudder-datasources-description.svg b/src/reference/modules/plugins/assets/images/rudder-datasources-description.svg
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/rudder-datasources-description.svg
rename to src/reference/modules/plugins/assets/images/rudder-datasources-description.svg
diff --git a/src/reference/modules/ROOT/assets/images/rudder-technique-dsc.png b/src/reference/modules/plugins/assets/images/rudder-technique-dsc.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/rudder-technique-dsc.png
rename to src/reference/modules/plugins/assets/images/rudder-technique-dsc.png
diff --git a/src/reference/modules/ROOT/assets/images/technique_editor/rudder-technique-editor-filter.png b/src/reference/modules/plugins/assets/images/rudder-technique-editor-filter.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/technique_editor/rudder-technique-editor-filter.png
rename to src/reference/modules/plugins/assets/images/rudder-technique-editor-filter.png
diff --git a/src/reference/modules/plugins/nav.list b/src/reference/modules/plugins/nav.list
new file mode 100644
index 00000000..94a153d2
--- /dev/null
+++ b/src/reference/modules/plugins/nav.list
@@ -0,0 +1 @@
+index.adoc
diff --git a/src/reference/modules/ROOT/pages/80_extension_and_integration/00_intro.adoc b/src/reference/modules/plugins/pages/index.adoc
similarity index 89%
rename from src/reference/modules/ROOT/pages/80_extension_and_integration/00_intro.adoc
rename to src/reference/modules/plugins/pages/index.adoc
index b693fb96..bf853f30 100644
--- a/src/reference/modules/ROOT/pages/80_extension_and_integration/00_intro.adoc
+++ b/src/reference/modules/plugins/pages/index.adoc
@@ -1,6 +1,5 @@
-
 [[extending-and-integrating-rudder]]
-== Rudder extension and integration with third party software
+= Rudder extension and integration with third party software
 
 Rudder was thought from the begining to be a good citizen in you infrastructure.
 Part of that good will intent is translated into the fact that everything is done
diff --git a/src/reference/modules/ROOT/assets/graphviz/agent_workflow.dot b/src/reference/modules/reference/assets/graphviz/agent_workflow.dot
similarity index 100%
rename from src/reference/modules/ROOT/assets/graphviz/agent_workflow.dot
rename to src/reference/modules/reference/assets/graphviz/agent_workflow.dot
diff --git a/src/reference/modules/ROOT/assets/graphviz/asset_management_workflow.dot b/src/reference/modules/reference/assets/graphviz/asset_management_workflow.dot
similarity index 100%
rename from src/reference/modules/ROOT/assets/graphviz/asset_management_workflow.dot
rename to src/reference/modules/reference/assets/graphviz/asset_management_workflow.dot
diff --git a/src/reference/modules/ROOT/assets/graphviz/concepts.dot b/src/reference/modules/reference/assets/graphviz/concepts.dot
similarity index 100%
rename from src/reference/modules/ROOT/assets/graphviz/concepts.dot
rename to src/reference/modules/reference/assets/graphviz/concepts.dot
diff --git a/src/reference/modules/ROOT/assets/graphviz/data_workflow.dot b/src/reference/modules/reference/assets/graphviz/data_workflow.dot
similarity index 100%
rename from src/reference/modules/ROOT/assets/graphviz/data_workflow.dot
rename to src/reference/modules/reference/assets/graphviz/data_workflow.dot
diff --git a/src/reference/modules/ROOT/assets/graphviz/packages.dot b/src/reference/modules/reference/assets/graphviz/packages.dot
similarity index 100%
rename from src/reference/modules/ROOT/assets/graphviz/packages.dot
rename to src/reference/modules/reference/assets/graphviz/packages.dot
diff --git a/src/reference/modules/ROOT/assets/images/password-field.png b/src/reference/modules/reference/assets/images/password-field.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/password-field.png
rename to src/reference/modules/reference/assets/images/password-field.png
diff --git a/src/reference/modules/ROOT/assets/images/rudder-inventory-workflow.png b/src/reference/modules/reference/assets/images/rudder-inventory-workflow.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/rudder-inventory-workflow.png
rename to src/reference/modules/reference/assets/images/rudder-inventory-workflow.png
diff --git a/src/reference/modules/ROOT/assets/images/rudder-inventory-workflow.svg b/src/reference/modules/reference/assets/images/rudder-inventory-workflow.svg
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/rudder-inventory-workflow.svg
rename to src/reference/modules/reference/assets/images/rudder-inventory-workflow.svg
diff --git a/src/reference/modules/reference/nav.list b/src/reference/modules/reference/nav.list
new file mode 100644
index 00000000..6bb40310
--- /dev/null
+++ b/src/reference/modules/reference/nav.list
@@ -0,0 +1,3 @@
+index.adoc
+generic_methods.adoc
+techniques.adoc
diff --git a/src/reference/modules/ROOT/pages/_partials/glossary/active-techniques.adoc b/src/reference/modules/reference/pages/_partials/glossary/active-techniques.adoc
similarity index 100%
rename from src/reference/modules/ROOT/pages/_partials/glossary/active-techniques.adoc
rename to src/reference/modules/reference/pages/_partials/glossary/active-techniques.adoc
diff --git a/src/reference/modules/ROOT/pages/_partials/glossary/applied-policy.adoc b/src/reference/modules/reference/pages/_partials/glossary/applied-policy.adoc
similarity index 100%
rename from src/reference/modules/ROOT/pages/_partials/glossary/applied-policy.adoc
rename to src/reference/modules/reference/pages/_partials/glossary/applied-policy.adoc
diff --git a/src/reference/modules/ROOT/pages/_partials/glossary/cf-execd.adoc b/src/reference/modules/reference/pages/_partials/glossary/cf-execd.adoc
similarity index 100%
rename from src/reference/modules/ROOT/pages/_partials/glossary/cf-execd.adoc
rename to src/reference/modules/reference/pages/_partials/glossary/cf-execd.adoc
diff --git a/src/reference/modules/ROOT/pages/_partials/glossary/cf-serverd.adoc b/src/reference/modules/reference/pages/_partials/glossary/cf-serverd.adoc
similarity index 100%
rename from src/reference/modules/ROOT/pages/_partials/glossary/cf-serverd.adoc
rename to src/reference/modules/reference/pages/_partials/glossary/cf-serverd.adoc
diff --git a/src/reference/modules/ROOT/pages/_partials/glossary/cfengine.adoc b/src/reference/modules/reference/pages/_partials/glossary/cfengine.adoc
similarity index 100%
rename from src/reference/modules/ROOT/pages/_partials/glossary/cfengine.adoc
rename to src/reference/modules/reference/pages/_partials/glossary/cfengine.adoc
diff --git a/src/reference/modules/ROOT/pages/_partials/glossary/directive.adoc b/src/reference/modules/reference/pages/_partials/glossary/directive.adoc
similarity index 100%
rename from src/reference/modules/ROOT/pages/_partials/glossary/directive.adoc
rename to src/reference/modules/reference/pages/_partials/glossary/directive.adoc
diff --git a/src/reference/modules/ROOT/pages/_partials/glossary/dynamic-group.adoc b/src/reference/modules/reference/pages/_partials/glossary/dynamic-group.adoc
similarity index 100%
rename from src/reference/modules/ROOT/pages/_partials/glossary/dynamic-group.adoc
rename to src/reference/modules/reference/pages/_partials/glossary/dynamic-group.adoc
diff --git a/src/reference/modules/ROOT/pages/_partials/glossary/port-443-nodes.adoc b/src/reference/modules/reference/pages/_partials/glossary/port-443-nodes.adoc
similarity index 100%
rename from src/reference/modules/ROOT/pages/_partials/glossary/port-443-nodes.adoc
rename to src/reference/modules/reference/pages/_partials/glossary/port-443-nodes.adoc
diff --git a/src/reference/modules/ROOT/pages/_partials/glossary/port-443-user.adoc b/src/reference/modules/reference/pages/_partials/glossary/port-443-user.adoc
similarity index 100%
rename from src/reference/modules/ROOT/pages/_partials/glossary/port-443-user.adoc
rename to src/reference/modules/reference/pages/_partials/glossary/port-443-user.adoc
diff --git a/src/reference/modules/ROOT/pages/_partials/glossary/port-514.adoc b/src/reference/modules/reference/pages/_partials/glossary/port-514.adoc
similarity index 100%
rename from src/reference/modules/ROOT/pages/_partials/glossary/port-514.adoc
rename to src/reference/modules/reference/pages/_partials/glossary/port-514.adoc
diff --git a/src/reference/modules/ROOT/pages/_partials/glossary/port-5309-node.adoc b/src/reference/modules/reference/pages/_partials/glossary/port-5309-node.adoc
similarity index 100%
rename from src/reference/modules/ROOT/pages/_partials/glossary/port-5309-node.adoc
rename to src/reference/modules/reference/pages/_partials/glossary/port-5309-node.adoc
diff --git a/src/reference/modules/ROOT/pages/_partials/glossary/port-5309.adoc b/src/reference/modules/reference/pages/_partials/glossary/port-5309.adoc
similarity index 100%
rename from src/reference/modules/ROOT/pages/_partials/glossary/port-5309.adoc
rename to src/reference/modules/reference/pages/_partials/glossary/port-5309.adoc
diff --git a/src/reference/modules/ROOT/pages/_partials/glossary/port-5310.adoc b/src/reference/modules/reference/pages/_partials/glossary/port-5310.adoc
similarity index 100%
rename from src/reference/modules/ROOT/pages/_partials/glossary/port-5310.adoc
rename to src/reference/modules/reference/pages/_partials/glossary/port-5310.adoc
diff --git a/src/reference/modules/ROOT/pages/_partials/glossary/port-80.adoc b/src/reference/modules/reference/pages/_partials/glossary/port-80.adoc
similarity index 100%
rename from src/reference/modules/ROOT/pages/_partials/glossary/port-80.adoc
rename to src/reference/modules/reference/pages/_partials/glossary/port-80.adoc
diff --git a/src/reference/modules/ROOT/pages/_partials/glossary/rudder-node.adoc b/src/reference/modules/reference/pages/_partials/glossary/rudder-node.adoc
similarity index 100%
rename from src/reference/modules/ROOT/pages/_partials/glossary/rudder-node.adoc
rename to src/reference/modules/reference/pages/_partials/glossary/rudder-node.adoc
diff --git a/src/reference/modules/ROOT/pages/_partials/glossary/rudder-relay-server.adoc b/src/reference/modules/reference/pages/_partials/glossary/rudder-relay-server.adoc
similarity index 100%
rename from src/reference/modules/ROOT/pages/_partials/glossary/rudder-relay-server.adoc
rename to src/reference/modules/reference/pages/_partials/glossary/rudder-relay-server.adoc
diff --git a/src/reference/modules/ROOT/pages/_partials/glossary/rudder-root-server.adoc b/src/reference/modules/reference/pages/_partials/glossary/rudder-root-server.adoc
similarity index 100%
rename from src/reference/modules/ROOT/pages/_partials/glossary/rudder-root-server.adoc
rename to src/reference/modules/reference/pages/_partials/glossary/rudder-root-server.adoc
diff --git a/src/reference/modules/ROOT/pages/_partials/glossary/rudder.adoc b/src/reference/modules/reference/pages/_partials/glossary/rudder.adoc
similarity index 100%
rename from src/reference/modules/ROOT/pages/_partials/glossary/rudder.adoc
rename to src/reference/modules/reference/pages/_partials/glossary/rudder.adoc
diff --git a/src/reference/modules/ROOT/pages/_partials/glossary/rule.adoc b/src/reference/modules/reference/pages/_partials/glossary/rule.adoc
similarity index 100%
rename from src/reference/modules/ROOT/pages/_partials/glossary/rule.adoc
rename to src/reference/modules/reference/pages/_partials/glossary/rule.adoc
diff --git a/src/reference/modules/ROOT/pages/_partials/glossary/static-group.adoc b/src/reference/modules/reference/pages/_partials/glossary/static-group.adoc
similarity index 100%
rename from src/reference/modules/ROOT/pages/_partials/glossary/static-group.adoc
rename to src/reference/modules/reference/pages/_partials/glossary/static-group.adoc
diff --git a/src/reference/modules/ROOT/pages/_partials/glossary/technique-library.adoc b/src/reference/modules/reference/pages/_partials/glossary/technique-library.adoc
similarity index 100%
rename from src/reference/modules/ROOT/pages/_partials/glossary/technique-library.adoc
rename to src/reference/modules/reference/pages/_partials/glossary/technique-library.adoc
diff --git a/src/reference/modules/ROOT/pages/_partials/glossary/technique.adoc b/src/reference/modules/reference/pages/_partials/glossary/technique.adoc
similarity index 100%
rename from src/reference/modules/ROOT/pages/_partials/glossary/technique.adoc
rename to src/reference/modules/reference/pages/_partials/glossary/technique.adoc
diff --git a/src/reference/modules/ROOT/pages/90_reference/55_generic_methods_best_practices.adoc b/src/reference/modules/reference/pages/generic_methods.adoc
similarity index 79%
rename from src/reference/modules/ROOT/pages/90_reference/55_generic_methods_best_practices.adoc
rename to src/reference/modules/reference/pages/generic_methods.adoc
index 6d2801ea..60f61a15 100644
--- a/src/reference/modules/ROOT/pages/90_reference/55_generic_methods_best_practices.adoc
+++ b/src/reference/modules/reference/pages/generic_methods.adoc
@@ -1,3 +1,9 @@
+=== Generic methods
+
+This section documents all the generic methods available in the xref:23_configuration_management/41_technique_editor.adoc#technique-editor[Technique Editor].
+
+include::{partialsdir}/dyn/generic_methods.adoc[]
+
 === Best Practices for generic methods
 
 ==== Naming convention
diff --git a/src/reference/modules/reference/pages/index.adoc b/src/reference/modules/reference/pages/index.adoc
new file mode 100644
index 00000000..9cd00c76
--- /dev/null
+++ b/src/reference/modules/reference/pages/index.adoc
@@ -0,0 +1,708 @@
+== Reference
+
+This chapter contains the reference Rudder configuration files
+
+=== Inventory workflow, from nodes to Root server
+
+One of the main information workflow in a Rudder managed system is the node's
+inventory one.
+
+Node inventories are generated on nodes, are sent to the node policy server (be
+it a Relay or the Root server) up to the Root server, and stored in the
+Rudder database (technically an LDAP server), waiting for later use.
+
+The goal of that section is to detail the different steps and explain how to
+spot and solve a problem on the inventory workflow. Following diagram sum up
+the whole process.
+
+image:rudder-inventory-workflow.png[Inventory workflow, from node to
+Root server]
+
+
+==== Processing inventories on node
+
+Inventories are generated daily during an agent run in the 00:00-06:00 time
+frame window local to the node. The exact time is randomly spread on the time
+frame for a set of nodes, but each node will always keep the same time (modulo
+the exact time of the run).
+
+User can request the generation and upload of inventory with the command:
+
+----
+
+$ rudder agent inventory
+
+----
+
+In details, generating inventory does:
+
+* ask the node policy server for its UUID with an HTTP GET on
+  `https://server/uuid`,
+* generate an inventory by scanning the node hardware and software components,
+* optionally make a digital signature of the generated inventory file,
+* send file(s) to the node's policy server on `https://POLICY-SERVER/inventory-updates/`
+
+The individual commands can be displayed with the `-i` option to `rudder agent
+inventory` command.
+
+
+==== Processing inventories on relays
+
+On the Relay server:
+
+* the inventory is received by a `webdav` endpoint,
+* the `webdav` service store the file in the folder
+  `/var/rudder/inventories/incoming`
+* on each agent runs, files in `/var/rudder/inventories/incoming` are
+  forwarded to the Relay own policy server.
+
+==== Processing inventories on root server
+
+On the Root server, the start of the workflow is the same than on a relay:
+
+* the inventory is received by a `webdav` endpoint,
+* the `webdav` service store the file in the folder
+  `/var/rudder/inventories/incoming`
+
+Then, on each run, the agent:
+
+* look for inventory / signature pairs:
+** inventories without a corresponding signature file are processed only if
+   they are older than 2 minutes,
+* POST the inventory or inventory+signature pair to the local API of
+  "inventory-endpoint" application on `http://localhost:8080/endpoint/upload/`
+* the API makes some quick checks on inventory (well formed, mandatory fields...)
+  and :
+** if checks are OK, *ACCEPTS* (HTTP code `200`) the inventory,
+** if signature is configured to be mandatory and is missing, or if the
+   signature is not valid, refuses with *UNAUTHORIZED* error (HTTP code `401`)
+** else fails with a *PRECONDITION FAILED* error (HTTP code `412`)
+* on error, inventory file is moved to `/var/rudder/inventories/failed`,
+* on success:
+** the inventory file is moved to `/var/rudder/inventories/received`,
+** in parallel, _inventory web_ parses and updates Rudder database.
+
+==== Queue of inventories waiting to be parsed
+
+The _inventory endpoint_ has a limited number of slot available for succesfully
+uploaded inventories to be queued waiting for parsing.
+That number can be configured in file `/opt/rudder/etc/inventory-web.properties`:
+
+----
+
+waiting.inventory.queue.size=50
+
+----
+
+Since Rudder 3.1.18 / 3.2.11 / 4.0.3, the number of currently waiting
+inventories can be obtained via a local REST API call to
+`http://localhost:8080/endpoint/api/info`:
+
+----
+
+$ curl http://localhost:8080/endpoint/api/info
+
+{
+  "queueMaxSize": 50,
+  "queueFillCount": 50,
+  "queueSaturated": true
+}
+
+----
+
+=== Rudder Server data workflow
+
+To have a better understanding of the Archive feature of Rudder, a description
+of the data workflow can be useful.
+
+All the logic of Rudder Techniques is stored on the filesystem in
++/var/rudder/configuration-repository/techniques+.
+The files are under version control, using git.
+The tree is organized as following:
+
+. At the first level, techniques are classified in categories: applications,
+fileConfiguration, fileDistribution,  jobScheduling,  system,  systemSettings.
+The description of the category is included in +category.xml+.
+
+. At the second and third level, Technique identifier and version.
+
+. At the last level, each technique is described with a +metadata.xml+ file and
+one or several CFEngine template files (name ending with +.st+).
+
+[source,python]
+
+.An extract of Rudder Techniques filesystem tree
+
+----
+
++-- techniques
+|   +-- applications
+|   |   +-- apacheServer
+|   |   |   +-- 1.0
+|   |   |       +-- apacheServerConfiguration.st
+|   |   |       +-- apacheServerInstall.st
+|   |   |       +-- metadata.xml
+|   |   +-- aptPackageInstallation
+|   |   |   +-- 1.0
+|   |   |       +-- aptPackageInstallation.st
+|   |   |       +-- metadata.xml
+|   |   +-- aptPackageManagerSettings
+|   |   |   +-- 1.0
+|   |   |       +-- aptPackageManagerSettings.st
+|   |   |       +-- metadata.xml
+|   |   +-- category.xml
+|   |   +-- openvpnClient
+|   |   |   +-- 1.0
+|   |   |       +-- metadata.xml
+|   |   |       +-- openvpnClientConfiguration.st
+|   |   |       +-- openvpnInstall.st
+
+----
+
+At Rudder Server startup, or after the user has requested a reload of the
+Rudder Techniques, each +metadata.xml+ is mapped in memory, and used to create
+the LDAP subtree of Active Techniques.
+The LDAP tree contains also a set of subtrees for Node Groups, Rules and Node
+Configurations.
+
+At each change of the Node Configurations, Rudder Server creates CFEngine draft policies
+(+Cf3PolicyDraft+) that are stored in memory, and then invokes +cf-clerk+.
++cf-clerk+ finally generates the CFEngine promises for the Nodes.
+
+image::graphviz/data_workflow.png[Rudder data workflow]
+
+=== Configuration files for Rudder Server
+
+* /opt/rudder/etc/htpasswd-webdav
+
+* /opt/rudder/etc/inventory-web.properties
+
+* /opt/rudder/etc/logback.xml
+
+* /opt/rudder/etc/openldap/slapd.conf
+
+* /opt/rudder/etc/reportsInfo.xml
+
+* /opt/rudder/etc/rudder-users.xml
+
+* /opt/rudder/etc/rudder-web.properties
+
+=== Rudder Agent workflow
+
+In this chapter, we will have a more detailed view of the Rudder Agent
+workflow. What files and processes are created or modified at the installation
+of the Rudder Agent? What is happening when a new Node is created? What are the
+recurrent tasks performed by the Rudder Agent? How does the Rudder Server handle
+the requests coming from the Rudder Agent? The Rudder Agent workflow diagram
+summarizes the process that will be described in the next pages.
+
+image::graphviz/agent_workflow.png[Rudder agent workflow]
+
+Let's consider the Rudder Agent is installed and configured on the new Node.
+
+The Rudder Agent is regularly launched and performs following tasks
+sequentially, in this order:
+
+==== Request data from Rudder Server
+
+The first action of Rudder Agent is to fetch the +tools+ directory from Rudder
+Server. This directory is located at +/opt/rudder/share/tools+ on the Rudder
+Server and at +/var/rudder/tools+ on the Node. If this directory is already
+present, only changes will be updated.
+
+The agent then try to fetch new Applied Policies from Rudder Server. Only
+requests from valid Nodes will be accepted. At first run and until the Node has
+been validated in Rudder, this step fails.
+
+==== Launch processes
+
+Ensure that the CFEngine community daemons +cf-execd+ and +cf-serverd+ are
+running. Try to start these daemons if they are not already started.
+
+Daily between 5:00 and 5:05, relaunch the CFEngine Community daemons +cf-execd+
+and +cf-serverd+.
+
+Add a line in +/etc/crontab+ to launch +cf-execd+ if it's not running.
+
+Ensure again that the CFEngine community daemons +cf-execd+ and +cf-serverd+
+are running. Try to start these daemons if they are not already started.
+
+==== Identify Rudder Root Server
+
+Ensure the +curl+ package is installed. Install the package if it's not
+present.
+
+Get the identifier of the Rudder Root Server, necessary to generate reports.
+The URL of the identifier is http://Rudder_root_server/uuid
+
+
+==== Inventory
+
+If no inventory has been sent since 8 hours, or if a forced inventory has been
+requested (class +force_inventory+ is defined), do and send an inventory to the
+server.
+----
+
+rudder agent inventory
+
+----
+
+No reports are generated until the Node has been validated in Rudder Server.
+
+==== Syslog
+
+After validation of the Node, the system log service of the Node is configured
+to send reports regularly to the server. Supported system log providers are:
++syslogd+, +rsyslogd+ and +syslog-ng+.
+
+==== Apply Directives
+
+Apply other policies and write reports locally.
+
+=== Configuration files for a Node
+
+* /etc/default/rudder-agent
+
+=== Packages organization
+
+==== Packages
+
+Rudder components are distributed as a set of packages.
+
+image::graphviz/packages.png[Rudder packages and their dependencies]
+
++rudder-webapp+::
+
+Package for the Rudder Web Application. It is the graphical interface for
+Rudder.
+
++rudder-inventory-endpoint+::
+
+Package for the inventory reception service. It has no graphical interface. This
+service is using HTTP as transport protocol. It receives an parses the files
+sent by FusionInventory and insert the valuable data into the LDAP database.
+
++rudder-jetty+::
+
+Application server for +rudder-webapp+ and +rudder-inventory-endpoint+. Both
+packages are written in 'Scala'. At compilation time, they are converted into
++.war+ files. They need to be run in an application server. 'Jetty' is this
+application server. It depends on a compatible Java 7 Runtime Environment.
+
++rudder-techniquess+::
+
+Package for the Techniques. They are installed in
++/opt/rudder/share/techniques+. At runtime, the Techniques are
+copied into a 'git' repository in +/var/rudder/configuration-repository+. Therefore, the package depends
+on the +git+ package.
+
++rudder-inventory-ldap+::
+
+Package for the database containing the inventory and configuration information
+for each pending and validated Node. This 'LDAP' database is build upon
+'OpenLDAP' server.  The 'OpenLDAP' engine is contained in the package.
+
++rudder-reports+::
+
+Package for the database containing the logs sent by each Node and the reports
+computed by Rudder. This is a 'PostgreSQL' database using the 'PostgreSQL'
+engine of the distribution. The package has a dependency on the +postgresl+
+package, creates the database named +rudder+ and installs the inialisation
+scripts for that database in +/opt/rudder/etc/postgresql/*.sql+.
+
++rudder-server-root+::
+
+Package to ease installation of all Rudder services. This package depends on
+all above packages. It also
+
+- installs the Rudder configuration script:
+
+----
+
+/opt/rudder/bin/rudder-init
+
+----
+
+- installs the initial promises for the Root Server in:
+
+----
+
+/opt/rudder/share/initial-promises/
+
+----
+
+- installs the init scripts (and associated +default+ file):
+
+----
+
+/etc/init.d/rudder
+
+----
+
+- installs the logrotate configuration:
+
+----
+
+/etc/logrotate.d/rudder-server-root
+
+----
+
++rudder-agent+::
+
+One single package integrates everything needed for the Rudder Agent. It
+contains CFEngine Commmunity, FusionInventory, and the initial promises for a
+Node. It also contains an init script:
+
+----
+
+/etc/init.d/rudder
+
+----
+
+The +rudder-agent+ package depends on a few libraries and utilities:
+
+* +OpenSSL+
+* +libpcre+
+* +liblmdb+ (On platforms where it is available as a package - on others the rudder-agent package bundles it)
+* +uuidgen+
+
+==== Software dependencies and third party components
+
+The Rudder Web application requires the installation of 'Apache 2 httpd',
+'JRE 7+', and 'cURL'; the LDAP Inventory service needs 'rsyslog' and
+the report service requires 'PostgreSQL'.
+
+When available, packages from your distribution are used. These packages are:
+
+Apache::
+
+The Apache Web server is used as a proxy to give HTTP access to the Web
+Application. It is also used to give writable WebDAV access for the inventory.
+The Nodes send their inventory to the WebDAV service, the inventory is stored in
++/var/rudder/inventories/incoming+.
+
+PostgreSQL::
+
+The PostgreSQL database is used to store logs sent by the Nodes and
+reports generated by Rudder. Rudder 4.0 is tested for PostgreSQL 9.2 and higher. It still works with version 8.4 to 9.1, but not warranties are made that it will hold in the future. It is really recommanded to migrate to PostgreSQL 9.2 at least.
+
+rsyslog and rsyslog-pgsql::
+
+The rsyslog server is receiving the logs from the nodes and insert them into a
+PostgreSQL database. On SLES, the +rsyslog-pgsql+ package is not part of the
+distribution, it can be downloaded alongside Rudder packages.
+
+Java 7+ JRE::
+
+The Java runtime is needed by the Jetty application server. Where possible, the
+package from the distribution is used, else a Java RE must be downloaded
+from Oracle's website (http://www.java.com).
+
+curl::
+
+This package is used to send inventory files from
++/var/rudder/inventories/incoming+ to the Rudder Endpoint.
+
+git::
+
+The running Techniques Library is maintained as a git repository in
++/var/rudder/configuration-repository/techniques+.
+
+[[_building_the_rudder_agent]]
+=== Building the Rudder Agent
+
+==== Get source
+
+Make sure you have network access and the git command.
+
+Go to your build directory and checkout rudder-packages
+
+----
+
+cd /usr/src
+git clone https://github.com/Normation/rudder-packages.git
+cd rudder-packages
+
+----
+
+Choose the branch to build
+
+----
+
+# For branch 4.1 (branches before 4.1 are not supported)
+git checkout branches/rudder/4.1
+cd rudder-agent
+
+----
+
+Now choose one of the 3 next chapter, depending on your case: dpkg (debian-like package), rpm (redhat-like package) or other.
+
+==== Build a dpkg package
+
+Set the version to build:
+
+* Update the debian/changelog file to make the first entry match the version you want to build.
+* Edit the SOURCES/Makefile file and set the value of RUDDER_VERSION_TO_PACKAGE: see http://www.rudder-project.org/archives/ for a complete list of available versions.
+
+Run the dpkg package builder:
+
+----
+
+dpkg-buildpackage
+
+----
+
+The package will be stored in the parent directory.
+
+==== Build an rpm package
+
+Set the version to build:
+
+* Edit the SOURCES/Makefile file and set the value of RUDDER_VERSION_TO_PACKAGE: see http://www.rudder-project.org/archives/ for a complete list of available versions.
+
+Run the rpm package builder:
+
+----
+
+# make sure you are in in rudder-agent, then
+ln -s `pwd` /root/rpmbuild
+rpmbuild -ba --define 'real_version 4.1.0' SPECS/*.spec
+
+----
+
+The package will be stored in RPMS/
+
+==== Build an agent locally
+
+Before building the agent, you must decide on some environment variables:
+
+
+* RUDDER_VERSION_TO_PACKAGE: the version of the sources that will be used, see http://www.rudder-project.org/archives/ for a complete list. If a 'rudder-sources' directory exists in SOURCES it will be used instead of downloading sources. The Variable still needs to be defined though.
+
+* DESTDIR: where to put the installation, use / to install on the system and leave the default of ./target to prepare a package.
+
+* USE_SYSTEM_OPENSSL: (default true), use system openssl (depends on libssl-dev) or build it with the agent.
+
+* USE_SYSTEM_LMDB: (default false), use system lmdb (depends on liblmdb-dev) or build it with the agent.
+
+* USE_SYSTEM_PCRE: (default true), use system pcre (depends on libpcre3-dev) or build it with the agent.
+
+* USE_SYSTEM_PERL: (default false), use system perl (depends on perl) or build it with the agent.
+
+* USE_SYSTEM_FUSION: (default false), use system fusion (depends on fusioninventory-agent), or build it with the agent. We advise you to use the Rudder version since it contains some patches.
+
+----
+
+# example
+env="RUDDER_VERSION_TO_PACKAGE=4.1.0 DESTDIR=/ USE_SYSTEM_PERL=true"
+make $env
+make install $env
+
+----
+
+
+// Man page
+
+=== Man pages
+
+include::{partialsdir}/dyn/rudder.adoc[leveloffset=+3]
+
+=== Package format
+
+Rudder has a specific package format for plugins.
+
+You can manage Rudder packages with the rudder-pkg command. This is the documentation of how they are created.
+
+==== File description
+
+A Rudder package file ends with the `.rpkg` extension.
+
+A Rudder package file is an archive file and can be managed with the 'ar' command.
+
+The archive contains:
+
+* A metadata file in JSON format named medatata
+* A tarball file in txz format name scripts.txz that contains package setup utility scripts
+* One or more tarball files in txz format that contain the package files
+
+The metadata file is a JSON file and is named 'metadata':
+
+----
+
+{
+  # the only currently supported type in "plugin" (mandatory)
+  "type": "plugin",
+  # the package name must consist of ascii characters without whitespace (mandatory)
+  "name": "myplugin",
+  # the package version has the form "rudder_major-version_major.version_minor" for a plugin (mandatory)
+  "version": "4.1-1.0",
+  # these are is purely informative (optional)
+  "build-date": "2017-02-22T13:58:23Z",
+  "build-commit": "34aea1077f34e5abdaf88eb3455352aa4559ba8b",
+  # the list of jar files to enable if this is a webapp plugin (optional)
+  "jar-files": [ "test.jar" ],
+  # the list of packages or other plugins that this package depends on (optional)
+  # this is currently only informative
+  "depends": {
+    # dependency on a specific binary that must be in the PATH
+    "binary": [ "zip" ]
+    # dependencies on dpkg based systems
+    "dpkg": [ "apache2" ],
+    "rpm": [ ],
+    # dependency specific to debian-8
+    "debian-8": [ ],
+    "sles-11": [ ],
+    # rudder dependency, ie this is a Rudder format package
+    "rudder": [ "new-plugin" ]
+  },
+  # the plugin content (mandatory)
+  "content": {
+    # this will put the content of the extracted files.txz into /opt/rudder/share
+    "files.txz": "/opt/rudder/share",
+    "var_rudder.txz": "/var/rudder"
+  }
+}
+
+----
+
+To see a package metadata file use:
+
+----
+
+ar p package.rpkg medatada
+
+----
+
+The scripts.txz is a tarball that can contain zero or more executable files named:
+
+* preinst that will be run before installing the package files
+* postinst that will be run after installing the package files
+* prerm that will be run before removing the package files
+* postrm that will be run after removing the package files
+
+preinst and postinst take one parameter that can be 'install' or 'upgrade'. The value 'upgrade' is used when a previous version of the package is already installed.
+
+To create the scripts.txz file use:
+
+----
+
+tar cvfJ scripts.txz preinst postinst prerm postrm
+
+----
+
+To create a Rudder package file use the ar command:
+
+----
+
+ar r mypackage-4.1-3.0.rpkg medatada scripts.txz files.txz
+
+----
+
+Note that ar r inserts or replaces files so you can create your package with incremental inserts.
+
+To extract files, 'use ar x' instead.
+
+
+=== Rudder relay API
+
+The `rudder-server-relay` package provides an HTTP API.
+It is available on simple relays and root servers.
+It is an internal API, not exposed to users, and used
+to provide various Rudder features.
+
+==== Remote Run
+
+The remote run API is available at `https://relay/rudder/relay-api/remote-run`.
+It allows triggering a run on nodes (like with the `rudder remote run` command).
+
+===== Description
+
+The remote run API applies to all nodes that are below the target relay server, which means:
+
+* All nodes directly connected to the server
+* All the relays that have the target node as policy server, and all nodes that are below them
+
+In particular, it does not act on the target relay itself (except for the root server which is its own policy server).
+
+There are different methods, whether you want to trigger all nodes, or only a part of them.
+
+===== Security
+
+The remote run calls are not authenticated, but restricted to:
+
+* Local calls on the relay
+* The relay's policy server
+
+They requires allowing the policy server to connect to its nodes on port 5309.
+
+===== Usage
+
+This API provides the following methods:
+
+* *POST* `/rudder/relay-api/remote-run/all`: Trigger a run on all nodes below the target relay.
+* *POST* `/rudder/relay-api/remote-run/nodes/`__: Trigger a run on the _node-id_ node (which must be under the target relay).
+* *POST* `/rudder/relay-api/remote-run/nodes` Trigger a run on the given nodes (which must be under the target relay), see the `nodes` parameter below.
+
+The general parameters are:
+
+* `keep_output` = *true* or *false*: Should the agent output be returned (default: *false*)
+* `asynchronous` = *true* or *false*: Should the server return immediately after trigerring the agent or wait for remote runs to end (default: *false*)
+* `classes` = *class* or *class1,class2,etc.* for multiple classes: Classes to pass to the agent (default: none)
+
+And, only for the `/rudder/relay-api/remote-run/nodes` call:
+
+* `nodes` = *node_uuid* or *node_uuid1,node_uuid2,etc.* for multiple nodes: Nodes to trigger (default: none)
+
+==== Shared Files
+
+===== Description
+
+The goal of this API is to share a file from node to node. The source nodes uses an API call to send the file,
+and the destination node will get the file using the same protocol as files shared from the policy server.
+
+The relay that receives an API call to share a file (*PUT*) will:
+
+* share the file directly if the target nodes is one of its managed nodes.
+* send the file to a sub-relay if the target is somewhere under it
+* forward the file to its policy server if the target is nowhere under it
+
+The relay that receive a *HEAD* call will:
+
+* If the file exists, compare the provided hash with the hash of the stored file, and return the result (*true* or *false*).
+* If the file does not exist, return *false*.
+
+The ttl is stored along with the file, and a clean task will regularly run and check for outdated files to remove.
+
+There are ncf generic method that allow easy access to those methods from the nodes.
+
+===== Security
+
+This call is open to all nodes in the allowed networks of the target relay.
+The sent files are signed with the node's key, and the signature is checked before being traited.
+
+===== Usage
+
+This API provides the following methods:
+
+* *PUT* `/shared-files/` __ `/` __ `/` __
+* *HEAD* `/shared-files/` __ `/` __ `/` __ `?hash=` _file-hash_
+
+The common URL parameters are:
+
+* `target-uuid` = *destination_node_uuid*:  where to send the file to
+* `source-uuid` = *source_node_uuid*: who sent the file
+* `file-id` = *my_file_id*: under which name to store the file, this needs to be unique
+
+The URL parameters specific to the *HEAD* call are:
+
+* `file-hash` = *value of the hash*: hash of the shared file
+
+The following are only needed for the *PUT* call:
+
+* `hash_value` = *value of the hash*: hash of the shared file
+* `algorithm` = *sha1*, *sha256* or *sha512*: algorithm used to hash the file
+* `digest` = **: signature of the file
+* `pubkey` = **: public key
+* `ttl` = **: can be a number of second or a string of the long form "1day 2hours 3minute 4seconds" or abbreviated in the form "5h 3s"
+* `header` = *rudder-signature-v1*: signing format (for now, only one possible value)
+
diff --git a/src/reference/modules/ROOT/pages/90_reference/70_techniques.adoc b/src/reference/modules/reference/pages/techniques.adoc
similarity index 52%
rename from src/reference/modules/ROOT/pages/90_reference/70_techniques.adoc
rename to src/reference/modules/reference/pages/techniques.adoc
index 6ba05aba..759f59ed 100644
--- a/src/reference/modules/ROOT/pages/90_reference/70_techniques.adoc
+++ b/src/reference/modules/reference/pages/techniques.adoc
@@ -62,7 +62,7 @@ All the tag name in the .xml are in upper case, all the attributes are in camel
   Deprecation message                
   true/false                               
                                                   
-    OS Name                           
+    OS Name                          
     cfengine-community         
   
   true/false                   
@@ -128,7 +128,7 @@ A SECTION has the following attributes:
 
 NOTE: A multivalued section can only contain variable, and cannot contain section
 
-NOTE: If there are no SECTION defined with 'component="true"', a default SECTION for reporting will be generated, named after the id of the Technique (the folder name of the Technique) 
+NOTE: If there are no SECTION defined with 'component="true"', a default SECTION for reporting will be generated, named after the id of the Technique (the folder name of the Technique)
 
 ===== Variables definitions in the 
tags @@ -147,7 +147,7 @@ There are three tags to create a variable: true/false value - + defaultValue @@ -350,7 +350,7 @@ Hooks have only one parameter, which is a JSON entry, in the format { "parameters": - { + { "parameterName1":"parameterValue1", "parameterName2":"parameterValue2", }, @@ -384,3 +384,463 @@ For the moment, there is only one TRACKINGKEY, so it is not possible to have sev It is a side effect of the previous limitation. +=== Reports reference + +This page describes the concept behind the reporting in Rudder, and specifically how to write the Techniques to get proper reporting in Rudder + +==== Concepts + +Each Technique, when converted into a Directive and applied to a Node, must generate reports for Rudder to get proper compliance reports. This reports must contains specific information : + + * The Report type, that can be logs for information purpose or result to express a compliance + * The Rule Id (autogenerated) + * The Directive Id (autogenerated) + * The Version Id (revision of the Rule) (autogenerated) + * The name of the component the report is related to + * The value of the key variable in the component (or None if not available) + * The Execution Timestamp, to know in which execution of the agent the promise has been generated + +These reports are sent via Syslog to the Rudder Root Server, parsed and put in a database, that is queried to generate the reporting + +==== Report format + +A report has the following format : + +---- + +@@Technique@@Type@@RuleId@@DirectiveId@@VersionId@@Component@@Key@@ExecutionTimeStamp##NodeId@#HumanReadableMessage + +---- + + * Technique : Human readable Technique name + * Type : type of report (see bellow) + * RuleId : The Id of the Configuration Rule, autogenerated + * DirectiveId : The Id of the Directive, autogenerated + * VersionId : the revision of the ConfigurationRule, autogenerated + * Component : the name of the component this Directive is related to (if no component are defined in the metadata.xml, then the Technique name is used) + * Key : the value of the reference variable. If there is no reference variable, then the value None should be used + * ExecutionTimeStamp : the timestamp of the current CFEngine execution + * NodeId : the id of the node + * HumanReadableMessage : a message than a Human can understand + +===== Valid report types + +[cols="1,1,1,1,4", options="header"] +.Report Types +|=== +| Name +| Type +| Mode +| Max number +| Details + +| log_trace +| log +| any +| infinity +| Should be used for advanced debuging purpose only. + +| log_debug +| log +| any +| infinity +| Should be used for debug purpose only. + +| log_info +| log +| any +| infinity +| Use for standard logging purposes. + +| log_warn +| log +| any +| infinity +| Used for logging only for the moment. Should be used when something unexpected happens. + +| log_repaired +| log +| enforce +| infinity +| Used for logging purposes, to list all that is repaired by the promises. + +| result_na +| result +| enforce +| one per component/key +| Defines the status of the Component to Not Applicable (if there are no result_success, result_repaired, result_error). Should be used only when the component is not applicable because it does not match the target context. + +| result_success +| result +| enforce +| one per component/key +| Defines the status of the Component to Success (if there are no result_repaired or result_error). Should be used only when everything is already in the correct state in this component for this key. + +| result_repaired +| result +| enforce +| one per component/key +| Defines the status of the Component to Repaired (if there are no result_error). Should be used only when something was not in the correct state, but could be corrected. + +| result_error +| result +| enforce +| infinity per component/key +| Defines the status of the Component to Error. Should be used when something was not in the correct state, and could not be corrected. + +| audit_na +| result +| audit +| one per component/key +| Defines the status of an Component to Not Applicable (if there are no result_success, result_repaired, result_error). Should be used only when the component is not applicable because it does not match the target context. + +| audit_compliant +nent was not applicable to the node. +| result +| audit +| one per component/key +| Defines the status of the Component to Compliant (if there are no audit_noncompliant or audit_error). Should be used only when everything is already in the correct state in this component for this key. + +| audit_noncompliant +| result +| audit +| one per component/key +| Defines the status of the Component to Non Compliant (if there are no audit_error). Should be used only when something was not in the correct state. + +| audit_error +| result +| audit +| infinity per component/key +| Defines the status of the Component to Error. Should be used when the audit could not be done or was interrupted. + +|=== + +Variables used to generate the reports + +Some facilities have been created to help putting the right values at the right place + + * `&TRACKINGKEY&`: this is an auto generated variable, put in the technique file, that Rudder will replace when writing the promises by + +---- + +
RuleId@@DirectiveId@@VersionId
+
+----
+
+   * `$(g.execRun)`: this is replaced at runtime by CFEngine 3 to the current execution time
+   * `$(g.uuid)`: this is replaced at runtime by CFEngine 3 to the Node Id
+
+
+=== Syntax of the Techniques
+
+==== Generalities
+
+The Techniques use the http://www.stringtemplate.org/[StringTemplate] engine. A Technique *must* have the .st extension to be extended by Rudder (have some variables replaced, some part removed or added given some parameters).
+
+==== Variable remplacement
+
+Note : Rudder use a StringTemplate grammar slighlty different from the default one. Rather than using "$" as a variable identifier, the Techniques use "&" to avoid collision with the CFEngine variables
+
+===== Single-valued variable remplacement
+
+----
+
+&UUID&
+
+----
+
+   * Will be remplaced by the value of the variable UUID
+
+===== Remplacement of variable with one or more values
+
+----
+
+&DNS_RESOLVERS: { "&it&" };separator=", "&
+
+----
+
+   * Will be remplaced by `"8.8.8.8", "8.8.4.4"`
+   * Here, `&it&` is an alias for the current item in the list (with no confusion, because there is only one variable)
+
+----
+
+&POLICYCHILDREN, CHILDRENID : {host, uuid |
+"/var/rudder/share/&uuid&/"
+maproot => { host2ip("&host&"), escape("&host&") },
+admit => { host2ip("&host&"), escape("&host&") };
+
+} &
+
+----
+
+   * `host` is an alias for the current value of POLICYCHILDREN
+   * `uuid` is an alias for the current value of CHILDRENID
+   * Both item are iterated at the same time, so both list must have the same length
+
+===== Remplacement of variable with one or more value, and writing an index all along
+
+----
+
+&FILE_AND_FOLDER_MANAGEMENT_PATH:{path |"file[&i&][path]" string => "&path&";
+}&
+
+----
+
+   * _i_ is an iterator, starting at 1
+
+The result would be:
+
+----
+
+"file[1][path]" string => "/var";
+"file[2][path]" string => "/bin";
+
+----
+
+===== Conditionnal writing of a section
+
+----
+
+&if(INITIAL)&
+
+something
+
+&endif&
+
+----
+
+The variable must either be:
+
+   * A boolean: If its value is true, then the section will be displayed
+   * A variable with the parameter `MAYBEEMPTY="true"`: If the value is not set, then the section won't be displayed, otherwise it will be displayed
+
+More information can be found here: https://theantlrguy.atlassian.net/wiki/display/ST/ST+condensed+--+Templates+and+expressions
+
+===== Unique identifier of Directive for Techniques with separated policy generation
+
+As of Rudder 4.3, Techniques with separated policy generation (see tag POLICYGENERATION in metadata.xml) need to have a way to identify uniquely their generated files, and bundles and methods. The special placeholder RudderUniqueID is replaced at generation by the identifier of the Directive. It can be used anywhere in the .st files, or even in the OUTPATH.
+
+=== Syntax of the Techniques
+
+==== Generalities
+
+The Techniques use the http://www.stringtemplate.org/[StringTemplate] engine. A Technique *must* have the .st extension to be extended by Rudder (have some variables replaced, some part removed or added given some parameters).
+
+==== Variable remplacement
+
+Note : Rudder use a StringTemplate grammar slighlty different from the default one. Rather than using "$" as a variable identifier, the Techniques use "&" to avoid collision with the CFEngine variables
+
+===== Single-valued variable remplacement
+
+----
+
+&UUID&
+
+----
+
+   * Will be remplaced by the value of the variable UUID
+
+===== Remplacement of variable with one or more values
+
+----
+
+&DNS_RESOLVERS: { "&it&" };separator=", "&
+
+----
+
+   * Will be remplaced by `"8.8.8.8", "8.8.4.4"`
+   * Here, `&it&` is an alias for the current item in the list (with no confusion, because there is only one variable)
+
+----
+
+&POLICYCHILDREN, CHILDRENID : {host, uuid |
+"/var/rudder/share/&uuid&/"
+maproot => { host2ip("&host&"), escape("&host&") },
+admit => { host2ip("&host&"), escape("&host&") };
+
+} &
+
+----
+
+   * `host` is an alias for the current value of POLICYCHILDREN
+   * `uuid` is an alias for the current value of CHILDRENID
+   * Both item are iterated at the same time, so both list must have the same length
+
+===== Remplacement of variable with one or more value, and writing an index all along
+
+----
+
+&FILE_AND_FOLDER_MANAGEMENT_PATH:{path |"file[&i&][path]" string => "&path&";
+}&
+
+----
+
+   * _i_ is an iterator, starting at 1
+
+The result would be:
+
+----
+
+"file[1][path]" string => "/var";
+"file[2][path]" string => "/bin";
+
+----
+
+===== Conditionnal writing of a section
+
+----
+
+&if(INITIAL)&
+
+something
+
+&endif&
+
+----
+
+The variable must either be:
+
+   * A boolean: If its value is true, then the section will be displayed
+   * A variable with the parameter `MAYBEEMPTY="true"`: If the value is not set, then the section won't be displayed, otherwise it will be displayed
+
+More information can be found here: https://theantlrguy.atlassian.net/wiki/display/ST/ST+condensed+--+Templates+and+expressions
+
+===== Unique identifier of Directive for Techniques with separated policy generation
+
+As of Rudder 4.3, Techniques with separated policy generation (see tag POLICYGENERATION in metadata.xml) need to have a way to identify uniquely their generated files, and bundles and methods. The special placeholder RudderUniqueID is replaced at generation by the identifier of the Directive. It can be used anywhere in the .st files, or even in the OUTPATH.
+
+=== Best Practices for Techniques
+
+==== Naming convention
+
+   * The name of bundle and classes should be written with underscore (i.e: this_is_a_good_example) instead of CamelCase (i.e: ThisIsABadExample)
+   * All variable, class and bundle names should be prefixed by "rudder_"
+   * The bundle entry point for the Technique should be named rudder_
+   * The bundles which makes all the actions should be suffixed by a meaningful name ( "rudder__installation", "rudder__configuration", "rudder__reporting", ..). This rule applies even if there is only one bundle
+   * The prefix of classes should all be "rudder__"
+   * The classes defined as an outcome should be named:
+
+      * `rudder___kept`
+      * `rudder___repaired`
+      * `rudder___failed`
+      * `rudder___denied`
+      * `rudder___timeout`
+      * `rudder___error` (error include failed, denied and timeout)
+
+   * The name of the bodies written in the Rudder Library should be prefixed: `rudder_common_`
+
+==== Raising classes
+
+   * `rudder___error` should be raised simultaneously as `rudder___failed`, `rudder___denied` or `rudder___timeout`.
+   * The body *rudder_common_classes* automatically abide by this rule
+
+==== Writing convention
+
+===== Technique naming guidelines
+
+The following rules should be followed when naming a new Technique:
+
+   * Try to keep names as short as possible, to improve readability
+   * Read the existing technique list, and particularly techniques related to what you are writing. The new names should be consistent with existing ones.
+   * The name should be a nominal group, use "File content" and "Service state" but never "Manage file content" or "Set Service state". It describes the target of the action, not the action itself.
+   * The name should look like: General Concept (package, file, etc.) + Source (from file, etc.) + Implementation details (platform, software name, etc.)
+     * Package sources (Zypper)
+     * HTTP server (Apache)
+     * Variable from local file (string)
+   * The general idea is to go from the most general information to the most precise.
+   * Use "directory" and never "folder"
+   * Use "settings" and never "configuration"
+   * Use *sentence case*, only the first word is capitalised, like in a normal sentence ("Variable from local file" and not "Variable from Local File").
+
+===== In the Technique
+
+   * We try to follow CFEngine conventions but with some exceptions like using brackets "{}" instead of parenthesis "()"
+   * When defining bundles or bodies, the opening bracket should be on a dedicated line. Exemple:
+
+----
+
+bundle common control
+{
+  bundlesequence => { "exemple" };
+}
+
+----
+
+   * Indentation should be made by spaces. A incrementation of indentation is equal to two spaces
+   * The promise type should be indented by two spaces (instead of being at the same indentation level than the bundle name)
+   * The class expression should be indented by four spaces (two spaces after the promise type)
+   * The promiser should be indented by six spaces (two spaces after the class expression or four spaces after the promise type if no class expression is defined)
+   * Attributes of promises should be indented by eight spaces (two spaces after the promiser) and it should be only one attribute by line.
+   * Attribute's arrows '=>' should all be at the same level, one character after the largest attribute name
+
+----
+
+bundle agent example
+{
+  type:
+      "promiser"
+        attribute  => "value1";
+
+    class::
+      "promiser2"
+        attribute2 => "value2";
+}
+
+----
+
+   * Attributes of promise type "vars" and "classes" should be on only one line except if there are more than one attribute.
+   * For promise type "vars" and "classes" on one line, attribute names and the arrows should be aligned
+   * A list should be written multilines if it needs more than 80 characters in one line
+   * Multilines list should have comma after each element, except the last one.
+   * Multilines list should begin with only a bracket "{"
+
+----
+
+    vars:
+        "value" slist =>
+          {
+            "one",
+            "two",
+            "three"
+          };
+
+----
+
+   * The name of the variable in argument of the bundle should be named "params"
+   * The call of the variables should be made with by using brackets `${var_correctly_called}` instead of parenthesis `$(var_wrongly_called)`
+   * Alternance of brackets and parenthesis are tolerated when lots of variables are imbricated for more readability: `${var_lv1[$(var_lvl2[${var_lvl3}])]}`
+   * A Technique should have its bundle wrote with parameters
+   * All the bundles should have as first argument "prefix" which contains the prefix to use for all the classes made from an outcome. This prefix should never be hardcoded in the bundle.
+   * Always write comments with # when a promise needs more than 30 seconds of thought.
+   * If classes should be created in order to iterate for make a workaround of the normal ordering (i.e: "iteration_1", "iteration_2", "iteration_3"), they should always be defined at the end of the promise type "classes".
+   * The order to the promise type must always be in the order of the normal ordering : https://docs.cfengine.com/docs/3.10/reference-language-concepts-normal-ordering.html
+   * StringTemplate variables should always be written in UPPERCASE
+   * StringTemplate variables should be written with underscore
+   * StringTemplate variables should always be prefixed by the Technique name in uppecase too. i.e: `CHECK_GENERIC_FILE_FILE_NAME`
+
+===== In the metadata.xml
+
+   * Name of sections should always be written in literary English (no CamelCase or underscores).
+   * The value of variable "Don't change" should always be "dontchange" or "" if the easier.
+
+==== Files convention
+
+   * File names in a Technique should not be prefixed by the name of the Technique
+   * When a Technique needs specific bodies, the bodies should be written in a bodies.st file
+   * The file containing the bundle which makes all the actions (and containing the bundle "run") should be named "main.cf"
+   * The file containing all the variables and calling the bundle "run" should be name config.st
+   * Initialization of a new Technique should always be made from the file "technique-metadata-sample.xml" which is present on the root of the "rudder-techniques" repository
+   * Rudder standard library should be located in "common" Technique
+
+==== Maintenance
+
+   * These rules were introduced after the 2.5 release of Rudder and before the 2.6 release. Therefore, they were enforced as of rudder-techniques-2.6.*.
+   * Always follow the conventions above when Techniques are updated but only for the lines edited. This rule concerns the Techniques on all the branches of git.
+   * On any branches that have released versions on them, we only allow minimal modifications. No lines should be modified if not to fix a bug (respecting these best practices is not currently considered a bug).
+
+==== Testing
+
+   * There is a test suite in scripts/check-techniques.sh that check metadata.xml and normal ordering in code
+   * The list of all maintained techniques (techniques and versions) is in maintained-techniques file, and should be updated when new techniques or versions are created.
+
+
diff --git a/src/reference/modules/ROOT/assets/graphviz/generate_policy_workflow.dot b/src/reference/modules/usage/assets/graphviz/generate_policy_workflow.dot
similarity index 100%
rename from src/reference/modules/ROOT/assets/graphviz/generate_policy_workflow.dot
rename to src/reference/modules/usage/assets/graphviz/generate_policy_workflow.dot
diff --git a/src/reference/modules/ROOT/assets/images/Directive_management.png b/src/reference/modules/usage/assets/images/Directive_management.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/Directive_management.png
rename to src/reference/modules/usage/assets/images/Directive_management.png
diff --git a/src/reference/modules/ROOT/assets/images/Global_run_settings.png b/src/reference/modules/usage/assets/images/Global_run_settings.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/Global_run_settings.png
rename to src/reference/modules/usage/assets/images/Global_run_settings.png
diff --git a/src/reference/modules/ROOT/assets/images/Node_settings.png b/src/reference/modules/usage/assets/images/Node_settings.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/Node_settings.png
rename to src/reference/modules/usage/assets/images/Node_settings.png
diff --git a/src/reference/modules/ROOT/assets/images/Rule_compliance.png b/src/reference/modules/usage/assets/images/Rule_compliance.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/Rule_compliance.png
rename to src/reference/modules/usage/assets/images/Rule_compliance.png
diff --git a/src/reference/modules/ROOT/assets/images/Rule_config.png b/src/reference/modules/usage/assets/images/Rule_config.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/Rule_config.png
rename to src/reference/modules/usage/assets/images/Rule_config.png
diff --git a/src/reference/modules/ROOT/assets/images/Rule_management.png b/src/reference/modules/usage/assets/images/Rule_management.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/Rule_management.png
rename to src/reference/modules/usage/assets/images/Rule_management.png
diff --git a/src/reference/modules/ROOT/assets/images/audit_mode_general_overview.png b/src/reference/modules/usage/assets/images/audit_mode_general_overview.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/audit_mode_general_overview.png
rename to src/reference/modules/usage/assets/images/audit_mode_general_overview.png
diff --git a/src/reference/modules/ROOT/assets/images/configuration_concepts.svg b/src/reference/modules/usage/assets/images/configuration_concepts.svg
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/configuration_concepts.svg
rename to src/reference/modules/usage/assets/images/configuration_concepts.svg
diff --git a/src/reference/modules/ROOT/assets/images/event_log.png b/src/reference/modules/usage/assets/images/event_log.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/event_log.png
rename to src/reference/modules/usage/assets/images/event_log.png
diff --git a/src/reference/modules/ROOT/assets/images/groups.png b/src/reference/modules/usage/assets/images/groups.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/groups.png
rename to src/reference/modules/usage/assets/images/groups.png
diff --git a/src/reference/modules/ROOT/assets/images/node-compliance.png b/src/reference/modules/usage/assets/images/node-compliance.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/node-compliance.png
rename to src/reference/modules/usage/assets/images/node-compliance.png
diff --git a/src/reference/modules/ROOT/assets/images/node-lifecycle-accept-state.png b/src/reference/modules/usage/assets/images/node-lifecycle-accept-state.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/node-lifecycle-accept-state.png
rename to src/reference/modules/usage/assets/images/node-lifecycle-accept-state.png
diff --git a/src/reference/modules/ROOT/assets/images/node-lifecycle-nodelist.png b/src/reference/modules/usage/assets/images/node-lifecycle-nodelist.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/node-lifecycle-nodelist.png
rename to src/reference/modules/usage/assets/images/node-lifecycle-nodelist.png
diff --git a/src/reference/modules/ROOT/assets/images/node-lifecycle-settings.png b/src/reference/modules/usage/assets/images/node-lifecycle-settings.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/node-lifecycle-settings.png
rename to src/reference/modules/usage/assets/images/node-lifecycle-settings.png
diff --git a/src/reference/modules/ROOT/assets/images/node_workflow.svg b/src/reference/modules/usage/assets/images/node_workflow.svg
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/node_workflow.svg
rename to src/reference/modules/usage/assets/images/node_workflow.svg
diff --git a/src/reference/modules/ROOT/assets/images/nodes.png b/src/reference/modules/usage/assets/images/nodes.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/nodes.png
rename to src/reference/modules/usage/assets/images/nodes.png
diff --git a/src/reference/modules/ROOT/assets/images/objects-used-in-generation.png b/src/reference/modules/usage/assets/images/objects-used-in-generation.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/objects-used-in-generation.png
rename to src/reference/modules/usage/assets/images/objects-used-in-generation.png
diff --git a/src/reference/modules/ROOT/assets/images/policy_generation.png b/src/reference/modules/usage/assets/images/policy_generation.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/policy_generation.png
rename to src/reference/modules/usage/assets/images/policy_generation.png
diff --git a/src/reference/modules/ROOT/assets/images/rudder-admin-settings.png b/src/reference/modules/usage/assets/images/rudder-admin-settings.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/rudder-admin-settings.png
rename to src/reference/modules/usage/assets/images/rudder-admin-settings.png
diff --git a/src/reference/modules/ROOT/assets/images/rudder-home.png b/src/reference/modules/usage/assets/images/rudder-home.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/rudder-home.png
rename to src/reference/modules/usage/assets/images/rudder-home.png
diff --git a/src/reference/modules/ROOT/assets/images/rudder-parameters.png b/src/reference/modules/usage/assets/images/rudder-parameters.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/rudder-parameters.png
rename to src/reference/modules/usage/assets/images/rudder-parameters.png
diff --git a/src/reference/modules/ROOT/assets/images/rudder-quicksearch.png b/src/reference/modules/usage/assets/images/rudder-quicksearch.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/rudder-quicksearch.png
rename to src/reference/modules/usage/assets/images/rudder-quicksearch.png
diff --git a/src/reference/modules/ROOT/assets/images/rudder-rule-compliance-history.png b/src/reference/modules/usage/assets/images/rudder-rule-compliance-history.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/rudder-rule-compliance-history.png
rename to src/reference/modules/usage/assets/images/rudder-rule-compliance-history.png
diff --git a/src/reference/modules/ROOT/assets/images/rudder-rule-compliance.png b/src/reference/modules/usage/assets/images/rudder-rule-compliance.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/rudder-rule-compliance.png
rename to src/reference/modules/usage/assets/images/rudder-rule-compliance.png
diff --git a/src/reference/modules/ROOT/assets/images/technique_editor/1-rudder-technique-editor.png b/src/reference/modules/usage/assets/images/technique_editor/1-rudder-technique-editor.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/technique_editor/1-rudder-technique-editor.png
rename to src/reference/modules/usage/assets/images/technique_editor/1-rudder-technique-editor.png
diff --git a/src/reference/modules/ROOT/assets/images/technique_editor/2-list-techniques.png b/src/reference/modules/usage/assets/images/technique_editor/2-list-techniques.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/technique_editor/2-list-techniques.png
rename to src/reference/modules/usage/assets/images/technique_editor/2-list-techniques.png
diff --git a/src/reference/modules/ROOT/assets/images/technique_editor/3-ntp-configuration.png b/src/reference/modules/usage/assets/images/technique_editor/3-ntp-configuration.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/technique_editor/3-ntp-configuration.png
rename to src/reference/modules/usage/assets/images/technique_editor/3-ntp-configuration.png
diff --git a/src/reference/modules/ROOT/assets/images/technique_editor/4-list-generics-method.png b/src/reference/modules/usage/assets/images/technique_editor/4-list-generics-method.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/technique_editor/4-list-generics-method.png
rename to src/reference/modules/usage/assets/images/technique_editor/4-list-generics-method.png
diff --git a/src/reference/modules/ROOT/assets/images/technique_editor/5-configure-generic-method.png b/src/reference/modules/usage/assets/images/technique_editor/5-configure-generic-method.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/technique_editor/5-configure-generic-method.png
rename to src/reference/modules/usage/assets/images/technique_editor/5-configure-generic-method.png
diff --git a/src/reference/modules/usage/assets/images/technique_editor/rudder-technique-editor-filter.png b/src/reference/modules/usage/assets/images/technique_editor/rudder-technique-editor-filter.png
new file mode 100644
index 00000000..170fc68d
Binary files /dev/null and b/src/reference/modules/usage/assets/images/technique_editor/rudder-technique-editor-filter.png differ
diff --git a/src/reference/modules/ROOT/assets/images/workflows/Conflict.png b/src/reference/modules/usage/assets/images/workflows/Conflict.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/workflows/Conflict.png
rename to src/reference/modules/usage/assets/images/workflows/Conflict.png
diff --git a/src/reference/modules/ROOT/assets/images/workflows/Details.png b/src/reference/modules/usage/assets/images/workflows/Details.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/workflows/Details.png
rename to src/reference/modules/usage/assets/images/workflows/Details.png
diff --git a/src/reference/modules/ROOT/assets/images/workflows/Diff.png b/src/reference/modules/usage/assets/images/workflows/Diff.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/workflows/Diff.png
rename to src/reference/modules/usage/assets/images/workflows/Diff.png
diff --git a/src/reference/modules/ROOT/assets/images/workflows/Enabling.png b/src/reference/modules/usage/assets/images/workflows/Enabling.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/workflows/Enabling.png
rename to src/reference/modules/usage/assets/images/workflows/Enabling.png
diff --git a/src/reference/modules/ROOT/assets/images/workflows/History.png b/src/reference/modules/usage/assets/images/workflows/History.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/workflows/History.png
rename to src/reference/modules/usage/assets/images/workflows/History.png
diff --git a/src/reference/modules/ROOT/assets/images/workflows/Informations.png b/src/reference/modules/usage/assets/images/workflows/Informations.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/workflows/Informations.png
rename to src/reference/modules/usage/assets/images/workflows/Informations.png
diff --git a/src/reference/modules/ROOT/assets/images/workflows/Management.png b/src/reference/modules/usage/assets/images/workflows/Management.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/workflows/Management.png
rename to src/reference/modules/usage/assets/images/workflows/Management.png
diff --git a/src/reference/modules/ROOT/assets/images/workflows/Notification.png b/src/reference/modules/usage/assets/images/workflows/Notification.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/workflows/Notification.png
rename to src/reference/modules/usage/assets/images/workflows/Notification.png
diff --git a/src/reference/modules/ROOT/assets/images/workflows/Popup.png b/src/reference/modules/usage/assets/images/workflows/Popup.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/workflows/Popup.png
rename to src/reference/modules/usage/assets/images/workflows/Popup.png
diff --git a/src/reference/modules/ROOT/assets/images/workflows/Rule_Update_Diff.png b/src/reference/modules/usage/assets/images/workflows/Rule_Update_Diff.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/workflows/Rule_Update_Diff.png
rename to src/reference/modules/usage/assets/images/workflows/Rule_Update_Diff.png
diff --git a/src/reference/modules/ROOT/assets/images/workflows/States.png b/src/reference/modules/usage/assets/images/workflows/States.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/workflows/States.png
rename to src/reference/modules/usage/assets/images/workflows/States.png
diff --git a/src/reference/modules/ROOT/assets/images/workflows/Table.png b/src/reference/modules/usage/assets/images/workflows/Table.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/workflows/Table.png
rename to src/reference/modules/usage/assets/images/workflows/Table.png
diff --git a/src/reference/modules/ROOT/assets/images/workflows/Validation.png b/src/reference/modules/usage/assets/images/workflows/Validation.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/workflows/Validation.png
rename to src/reference/modules/usage/assets/images/workflows/Validation.png
diff --git a/src/reference/modules/ROOT/assets/images/workflows/Warning.png b/src/reference/modules/usage/assets/images/workflows/Warning.png
similarity index 100%
rename from src/reference/modules/ROOT/assets/images/workflows/Warning.png
rename to src/reference/modules/usage/assets/images/workflows/Warning.png
diff --git a/src/reference/modules/usage/nav.list b/src/reference/modules/usage/nav.list
new file mode 100644
index 00000000..d329d0b6
--- /dev/null
+++ b/src/reference/modules/usage/nav.list
@@ -0,0 +1,6 @@
+web_interface.adoc
+node_management.adoc
+advanced_node_management.adoc
+configuration_management.adoc
+advanced_configuration_management.adoc
+security.adoc
diff --git a/src/reference/modules/usage/pages/advanced_configuration_management.adoc b/src/reference/modules/usage/pages/advanced_configuration_management.adoc
new file mode 100644
index 00000000..b6d1befa
--- /dev/null
+++ b/src/reference/modules/usage/pages/advanced_configuration_management.adoc
@@ -0,0 +1,911 @@
+=== Policy generation
+
+Each time a change occurs in the Rudder interface, having an impact on the
+policy needed by a node, it is necessary to regenerate the modified
+promises for every impacted node. By default this process is launched after each
+change.
+
+The process of policy generation:
+
+* Use configured policies and information about the nodes to generate
+  the files defining the policy that reflects the desired state
+* Compute and store expected reports that will be produced when executing these policies
+* Check the validity of the generated policies
+* Replace the old version of the policies by the new one for impacted node
+* Restart the policy server on the Rudder central server is authorizations have changed
+
+image::objects-used-in-generation.png[Objects and parameters used in policy generation]
+
+You can customize some of these actions and add new ones using the xref:42_advanced_configuration_management/60_server_event_hooks.adoc#_server_event_hooks[Server Event Hooks].
+
+image::policy_generation.png[Status of policy generation]
+
+==== +Update policies+ button
+
+The button +Update policies+ on the top right of the screen, in the +Status+ menu, allows you to force
+the regeneration of the policies. As changes in the inventory of the nodes are
+not automatically taken into account by Rudder, this feature can be useful
+after some changes impacting the inventory information.
+
+==== +Regenerate all policies+ button
+
+The button +Regenerate all policies+ on the top right of the screen, in the +Status+ menu, allows you to force
+the regeneration of all policies. It will clear all internal caches, and force a complete
+computation of the policies. This is generally useful to make sure everything is correct after a problem
+on the central server.
+
+=== Technique creation
+
+Rudder provides a set of pre-defined Techniques that cover some basic
+configuration and system administration needs. You can also create your own
+Techniques, to implement new functionalities or configure new services. This
+paragraph will walk you through this process.
+
+There is two ways to configure new Techniques, either thanks to the web
+Technique Editor in Rudder or by coding them by hand.
+
+The use of the Technique Editor (code name: http://www.ncf.io/pages/ncf-builder.html[ncf-builder])
+is the easiest way to create new Techniques and is fully integrated with Rudder. On the other hand,
+it does not allow the same level of complexity and expressiveness than coding a Technique by hand.
+Of course, coding new Techniques by hand is a more involved process that needs to learn how the
+Technique description language and Technique reporting works.
+
+We advice to always start to try to create new Techniques with the Technique Editor and switch to
+the hand-coding creation only if you discover specific needs not addressed that way.
+
+==== Recommended solution: Technique Editor
+
+The easiest way to create your own Techniques is to use the Technique editor,
+a web interface to create and manage Techniques based on the ncf framework.
+
+Creating a technique in the Technique Editor will generate a Technique for Rudder automatically.
+You can then use that Technique to create a Directive that will be applied on your Nodes thanks
+to a Rule.
+
+For more information about ncf and the Technique editor, you can visit: http://www.ncf.io/
+
+===== Using the Technique Editor
+
+The Technique Editor is available in the Directive screen or directly in the Utilities menu.
+Once on the Technique Editor, creating a Technique simply consist to add desired "Generic Methods"
+building block and configure them.
+
+When the Technique match your expectations, hitting save will automatically add it to available
+Technique in the Directive screen of Rudder (in the "User Technique" category).
+
+
+===== Logs
+
+In case of any issue with the Technique Editor, the first step should always be to look for its log messages.
+These logs are sent to Apache system error logs:
+
+- On Debian, by default: `/var/log/apache2/error.log`
+- On RHEL, by default: `/var/log/httpd/error_log`
+
+
+==== Understanding how Technique Editor works
+
+In this chapter, we are giving an overview about how the Technique Editor works and how it is
+integrated with the main Rudder application.
+
+===== Directory layout
+
+As explained in http://www.ncf.io/, ncf uses a structured directory tree composed of several layers of logic,
+from internal libraries to Techniques and user services. All the files and logic in these folders will be named
+"library" for simplicity
+
+ncf directory structure exists in two root folders:
+
+* `/usr/share/ncf/tree`
+** This is the standard library installation folder. It is created and updated by the
+   the ncf package. This folder will be completely overwritten when you update ncf
+   package so you should never modify anything here: it will be lost at some point.
+
+* `/var/rudder/configuration-repository/ncf`
+** This is were you add your own ncf Generic Methods and Techniques.
+   Techniques created with the Technique Editor will be located here, and both
+   Generic and Techniques in that place will be accessible in the Technique Editor
+   alongside what is provided by the standard library.
+
+==== Sharing ncf code with nodes
+
+To share those folders to all nodes, Rudder makes a copy of these folders in two
+places:
+
+* `/var/rudder/ncf`, for part common to all nodes - so NOT techniques,
+  ** `/var/rudder/ncf/local` is a copy of node-independant directories from
+     `/var/rudder/configuration-repository/ncf`, so almost everything *BUT*
+     `/var/rudder/configuration-repository/ncf/50_techniques`.
+  ** `/var/rudder/ncf/common` is a copy `/usr/share/ncf/tree`
+* `/var/rudder/share/xxxx-yyyy-node-id-zzzz/rules/cfengine-community/Technique_Name/1.0/Technique_Name.cf`
+  for techniques, with one directory for each technique applied to the node.
+* `/var/rudder/share/xxxx-yyyy-node-id-zzzz/rules/cfengine-community/rudder_expected_reports.csv`
+  contains information about report expected for all ncf techniques applied to that node.
+
+
+
+Files in `/var/rudder/ncf` are synchronized automatically by the "rudder agent update"
+command when the agent runs on the server. So any modification done in files
+in these directories will be lost at the next synchronization.
+
+Files under `/var/rudder/share/` are updated during policy generation.
+
+A node updates its ncf local library by copying the content of these two folders
+during its promise update phase.
+
+===== From ncf Technique Editor to Rudder Techniques and back
+
+Here we will explain how the Technique Editor integration to Rudder is done to
+transform ncf techniques into full fledge Rudder one. We will also get the
+big picture of the web flow and the resulting events triggered on Rudder servier
+side.
+
+Each action in the Technique Editor interface produces requests to an API defined over ncf.
+
+All of the requests are authenticated thanks to a token passed in the JSESSIONID header.
+The token is generated when an authenticated user is connected to the Rudder interface
+(typically thanks to his browser).
+
+That token is shared to the Technique Editor interface, which itself passes the
+JSESSIONID header to all requests.
+
+If you have authentication issue, check that your Rudder session is not expired.
+
+Get request::
+
+Get request will get all Techniques and Generic Methods in a path passed as parameters of the
+request in the "path" javascript variable:
+
+https://your.rudder.server/ncf-builder/#!?path=/var/rudder/configuration-repository/ncf
+
+Get requests are triggered when accessing Technique editor.
+
+The ncf API will parse all files in the parameter path by running "cf-promises -pjson" on all Techniques,
+checking that all Techniques are correctly formed.
+
+The ncf API will also look to all Generic Methods description data to build the catalog of available
+Generic Methods.
+
+The resulting information are sent back to the Technique Editor for displaying.
+
+Post requests::
+
+Post requests are issued when a Technique is created, modified or deleted.
+They will only work on Techniques available in the path given in parameter.
+
+They are triggered when clicking on save/delete button.
+
+The main difference with get requests is that hooks are launched before and after the action is made.
+
+We will see all hooks behavior in the following dedicated hooks section.
+
+===== Hooks
+
+On each POST request, pre- and post- hooks are executed by the Technique Editor.
+These hooks are used for the Rudder integration to help transform pure ncf Techniques into Rudder one.
+
+- pre-hooks are located in: `/var/rudder/configuration-repository/ncf/pre-hooks.d`
+- post-hooks are located in: `/var/rudder/configuration-repository/ncf/post-hooks.d`
+
+As of March 2015, we have two post-hooks defined and no pre-hooks:
+
+* `post.write_technique.commit.sh`
+** It commits the Technique newly created into Rudder Git configuration repository
+located in `/var/rudder/configuration-repository`.
+* `post.write_technique.rudderify.sh`
+** It generates a valid Rudder Technique from a the newly created Technique and reloads Rudder
+Technique Library so that updates are taken into account.
+
+If you want to run post hooks by hand, you can use the following command:
+
+      /var/rudder/configuration-repository/ncf/post-hooks.d/post.write_technique.commit.sh /var/rudder/configuration-repository bundle_name
+
+==== Create Technique manually
+
+===== Prerequisite
+
+To create a Technique, you'll need a few things:
+
+CFEngine knowledge:: Rudder's Techniques are implemented using CFEngine.
+Rudder takes care of a lot of the work of using CFEngine, but you'll need to
+have a reasonable understanding of the CFEngine syntax.
+
+Rudder installation for testing:: To be able to test your new Technique,
+you'll need a working Rudder installation (at least a server and a node).
+
+Text editor:: The only other tool you need is your favorite text editor!
+
+===== Define your objective
+
+Before starting to create a new Technique, have you checked that it doesn't
+already exist in Rudder? The full list of current Techniques is available from
+GitHub, at http://github.com/normation/rudder-techniques/[GitHub rudder-techniques repository].
+
+OK, now we've got that over with, let's go on.
+
+A Technique should be an abstract configuration. This means that your Technique
+shouldn't just configure something one way, but instead it should implement
+*how* to configure something, and offer options for users to choose what way
+they want it configured. Before starting, make sure you've thought through what
+you want to create.
+
+Here's a quick checklist to help:
+
+* Do you need to install packages?
+* Do you need to create or edit configuration files?
+* Do you need to copy files from a central location?
+* Do you need to launch processes or check that they're running?
+* Do you need to run commands to get things working?
+
+Once you've made a list of what needs doing, consider what options could be
+presented in the user interface, when you create a Directive from your new
+Technique. Intuitively, the more variables there are, the more flexible your
+Technique will be. However, experience shows that making the Technique *too*
+configurable will actually make it harder to use, so a subtle balance comes in
+to play here.
+
+At this stage, make a list of all the variables that should be presented to
+users configuring a Directive from your Technique.
+
+===== Initialize your new Technique
+
+The simplest way to create a new Technique and be able to test it as you work is
+to start on a Rudder server. Open a terminal and connect to your Rudder server
+by ssh, and cd into the directory where Techniques are stored:
+
+----
+
+cd /var/rudder/configuration-repository/techniques
+
+----
+
+Under this directory, you'll find a set of categories, and sub-categories.
+Before creating your Technique, choose a category to put it in, and change to
+that directory. For example:
+
+----
+
+cd applications
+
+----
+
+You can consult the description of each category by looking at the
++category.xml+ file in each directory. For this example:
+
+----
+
+cat category.xml
+
+----
+
+Will output:
+
+----
+
+
+    Application management
+    This category contains Techniques designed to install,
+	configure and manage applications
+
+
+----
+
+Once you've decided on a category, it's time to create the basic skeleton of
+your Technique. The technical name for your Technique is it's directory name, so
+choose wisely:
+
+----
+
+mkdir sampleTechnique
+
+----
+
+All directories under this one are version numbers. Let's start with a simple
+1.0 version. From now on, we'll work in this directory.
+
+----
+
+mkdir sampleTechnique/1.0
+cd sampleTechnique/1.0
+
+----
+
+Now, you need a minimum of two files to get your Technique working:
+
+metadata.xml:: This file describes the Technique, and configures how it will be
+displayed in the web interface.
+
+st files:: These files are templates for CFEngine configuration files. You need
+at least one, but can have as many as you like. Rudder processes them to
+generate .cf files ready to be used by CFEngine.
+
+To get started, copy and paste these sample files, or download them from GitHub:
+
++metadata.xml+ (original file:
+https://github.com/normation/rudder-techniques/blob/master/technique-metadata-sample.xml[+technique-metadata-sample.xml+])
+
+----
+
+include::technique-metadata-sample.xml
+
+----
+
++sample_technique.st+ (original file:
+https://github.com/normation/rudder-techniques/blob/master/technique-st-sample.xml[+technique-st-sample.xml+])
+
+----
+
+include::technique-st-sample.xml
+
+----
+
+[[_node_properties]]
+=== Node properties
+
+Node properties can be found in the "properties" tab of each node in Rudder.
+
+Node properties can be modified using Rudder's API, see http://www.rudder-project.org/rudder-api-doc/#api-Nodes-updateNodeProperties
+
+Properties can also be defined on the node itself, to override locally properties.
+
+Each property is a key=value pair. The value can be a string or a well-formatted JSON data structure.
+
+Some examples:
+`datacenter=Paris`
+`datacenter= { "id": "FRA1", "name": "Colo 1, Paris", "location": "Paris, France", "dns_suffix": "paris.example.com" }`
+
+
+==== Using properties
+
+You can use node properties almost everywhere in Rudder:
+
+* in directive parameters
+* in the technique editor
+* in your own techniques and generic methods
+
+To use a property, simply use the variable node.properties with the variable call syntax.
+
+Example with a property named 'datacenter':
+
+----
+
+${node.properties[datacenter]}
+
+----
+
+WARNING: Before Rudder 3.1.14 and 3.2.7, node properties could not be used in JavaScript expressions (see following section), since they are evaluated during policy generation and node properties were only made available to agents at runtime. Since Rudder 3.1.14, 3.2.7 and 4.0.0 and later, you can enable a feature switch in "Administration/Settings" to enable node properties expansion in directive parameters. More details are available at xref:42_advanced_configuration_management/45_node_properties_in_directives.adoc#_node_properties_expansion_in_directives[Node properties expansion in directives].
+
+In a mustache template, use:
+
+----
+
+{{{vars.node.properties.datacenter}}}
+
+----
+
+==== Local override
+
+The agent searches for optionnal properties files `/var/rudder/local/properties.d/*.json`, and will override existing properties.
+
+As a result, if you have node properties defined server side as
+`"sysctls_postgresql":{"kernel.shmall":"903330","kernel.shmmax":"3700041320"}` and
+`"vm":{"vm.dirty_ratio":"10"}`
+
+and a local property file `/var/rudder/local/properties.d/postgresql_config.json` as
+
+----
+
+{
+  "properties":
+  {
+    "sysctls_postgresql": {
+      "kernel.shmmax":"5368709120"
+    }
+  }
+
+}
+
+----
+
+The resulting properties will be:
+
+`"sysctls_postgresql":{"kernel.shmmax":"5368709120"}` and
+`"vm":{"vm.dirty_ratio":"10"}`
+
+`sysctls_postgresql` has been overriden by local property, and `vm` has been left untouched.
+Note that it is an override, as the semantic of merging is not deterministic with literal values, and it does not allow to unset values. If you need to merge, please refer to the next paragraph.
+
+
+==== Merging properties
+
+If you want to merge server defined properties with local defined properties, rather than override them, you will need to use the generic method variable_dict_merge_tolerant to define which variables you need to merge, and define the local variables in a different namespace than properties.
+
+For instance, if you have defined in the node properties the following properties
+
+`"sysctls_postgresql":{"kernel.shmall":"903330","kernel.shmmax":"3700041320"}`
+
+and you wish to merge these values on a node with locally defined variable, to change the value of kernel.shmmax and set the value of kernel.shmmni, you can define the file /var/rudder/local/properties.d/postgresql_config.json with the following content
+
+----
+
+{
+    "local_properties":
+    {
+        "sysctls_postgresql": {
+            "kernel.shmmax":"5368709120",
+            "kernel.shmmni":"4096"
+        }
+    }
+
+}
+
+----
+
+and use the generic method `variable_dict_merge_tolerant` to merge `node.properties[sysctls_postgresql]` and `node.local_properties[sysctls_postgresql]`, and set the result in merged_properties.sysctls_postgresql (for instance): `variable_dict_merge_tolerant("merged_properties", "sysctls_postgresql", "node.properties[sysctls_postgresql]", "node.local_properties[sysctls_postgresql]")`
+
+As a result, merged_properties.sysctls_postgresql will contain
+
+---
+
+"sysctls_postgresql": {
+    "kernel.shmall":"903330",
+    "kernel.shmmax":"5368709120",
+    "kernel.shmmni":"4096"
+}
+
+
+---
+
+
+
+==== Under the hood
+
+On the server, one or more properties files are written for each node in the
+`/var/rudder/share//rules/cfengine-community/properties.d/` directory.
+This directory is then copied to each node by the agent with all other promise files.
+
+In the agent, properties are made available in the `node.` container that contains the values.
+Those values are read from
+`/var/rudder/cfengine-community/inputs/properties/*.json`. All files are taken
+in order and override the previous ones - the last one wins.
+
+The agent searches for optionnal properties files `/var/rudder/local/properties.d/*.json`, and will define variables
+or override existing properties.
+
+Each file must contain at least 2 levels of JSON content, the first level is the namespace level
+and the second level is the key level.
+
+The namespace name must be an ASCII name that doesn't start with `_` and must
+match the following regex: `[a-zA-Z0-9][a-zA-Z0-9_]*`
+
+For example:
+
+----
+
+{
+  "properties":
+  {
+    "datacenter": "Paris",
+    "environment": "production",
+    "customer": "Normation"
+  }
+}
+
+----
+
+The merge is a first level merge done at the namespace level. This means that:
+
+* a key in a namespace is fully overridden by the same key in the same namespace in a later file.
+* a key in a namespace is never overriden by the same key in a different namespace
+* a key that is overriden never retains original data even if it is a data container itself
+
+The result key is available in the `node.` data variable. A usage
+example:
+
+----
+${node.properties[datacenter]}
+----
+
+To get the original data (for debug only) there is the
+`properties.property_` variable. A usage example:
+
+----
+${properties.property__var_rudder_cfengine_community_inputs_properties_d_properties_json[properties][datacenter]}
+----
+
+[[_node_properties_expansion_in_directives]]
+=== Node properties expansion in directives
+
+It is possible to use properties defined on nodes to build Directive values. The
+resulting values will be computed during policy generation, and can therefore
+provide unique values for each node or be used in JavaScript expressions.
+
+Properties on nodes are defined using Rudder's REST API, with the 'Update Node properties' API call.
+More details in our http://www.rudder-project.org/rudder-api-doc[API documentation].
+
+Properties can also be defined directly on the nodes, by creating properties files
+`/var/rudder/local/properties.d/*.json/`
+
+
+==== Feature availability
+
+This feature was introduced in Rudder 3.1.14, Rudder 3.2.7 and Rudder 4.0.0.
+
+If you upgraded to 3.1.14 (or a later 3.1.x version) or 3.2.7 (or a later 3.2.x
+version) from a previous Rudder version, this feature is disabled by default
+in order to mitigate any risk of undesired side effects on existing
+installations. You can enable it in the Administration/Settings page, using the
+*Enable node properties expansion in Directives* switch.
+
+Rudder installations from 4.0.0 onwards have this feature enabled by default.
+
+==== Usage
+
+In any directive text field, you can access properties defined on nodes using the following syntax:
+
+----
+
+${node.properties[property_name][key_one][key_two]}
+
+----
+
+
+where:
+
+- `property_name` is the name of the property defined via the API
+- `key_one` and `key_two` are keys in the JSON structure
+- the value obtained is the string representation, in compact mode, of the entire node property or sub-structure of the JSON value
+- if the key is not found, an error will be raised that will stop policy generation
+- spaces are authorized around separators ([,],|,}..)
+
+===== Providing a default value
+
+Most of the time, you will need to provide a default value to node properties expansion to avoid a policy generation
+error due to missing node properties.
+This is also a good case to allow a simple override mechanism for a parameter where only some nodes have a specific value.
+
+You can also use other node properties, or other Rudder parameters as defaults, using the same syntax as above.
+
+Some examples:
+
+----
+
+${node.properties[datacenter][id] | default = "LON2" }
+${node.properties[datacenter][name] | default = """Co-location with "Hosting Company" in Paris (allows quotes)""" }
+${node.properties[datacenter][id] | default = ${rudder.param.default_datacenter} }
+${node.properties[netbios_name] | default = ${rudder.node.hostname} }
+${node.properties[dns_suffix] | default = ${node.properties[datacenter][dns_suffix] | default = "${rudder.node.hostname}.example.com" }
+
+#or even use cfengine variables in the default
+${node.properties[my_override] | default = "${cfengine.key}"}
+
+----
+
+===== Forcing expansion on the node
+
+In some cases, you will want to use a `${node.properties[key]}` in a directive parameter, but you don't want to expand it during
+policy generation on the Rudder server, but instead let the value be expanded during the agent run on the node. Typically if the value is to be used by a templating
+tool, or if the value is known only on the node.
+
+For these cases, you can add the "node" option to the property expression:
+
+----
+
+${node.properties[datacenter][id] | node }
+
+----
+
+This will be rewritten during policy generation into:
+
+----
+
+${node.properties[datacenter][id]}
+
+----
+
+Which will be considered as a standard variable by the agent, which will replaced this expression by its value if it's defined, or kept as is if it's unknown.
+
+The variable content is read from `/var/rudder/cfengine-community/inputs/properties.d/properties.json`, and from the optionally defined `/var/rudder/local/properties.d/*.json` files.
+You can find more information on node properties in xref:42_advanced_configuration_management/40_node_properties.adoc#_node_properties[node properties documentation].
+
+
+=== JavaScript evaluation in Directives
+
+It is possible to use JavaScript expressions to build Directive values. The
+resulting values will be computed during policy generation, and can therefore
+provide unique values for each node.
+
+==== Feature availability
+
+This feature was introduced in Rudder 3.1.12, Rudder 3.2.5  for password fields
+only, and generalized for all fields in Rudder 3.1.14, Rudder 3.2.7 and Rudder 4.0.
+
+If you upgraded to 3.1.12 (or a later 3.1.x version) or 3.2.5 (or a later 3.2.x
+version) from a previous Rudder version, this feature is disabled by default
+in order to mitigate any risk of undesired side effects on existing
+installations. You can enable it in the Administration/Settings page, using the
+*Enable script evaluation in Directives* parameter.
+
+Rudder installations from 4.0 onwards have this feature enabled by default.
+
+==== Usage
+
+All standard JavaScript methods are available, and a Rudder-specific
+library, prefixed with `rudder.` also provides some extra utilities. This
+library is documented below.
+
+For example, to get the first 3 letters of each node's hostname, you can write:
+----
+"${rudder.node.hostname}".substring(0,3)
+----
+
+[TIP]
+
+[[limits-of-scripts, Limitation of the scripting language]]
+
+.Limitation of the scripting language
+
+====
+
+JavaScript expressions are evaluated in a sandboxed JavaScript environment. It has some
+limitations, such as:
+
+* It cannot write on the filesystem
+* Scripts are killed after 5 seconds of execution, to prevent overloading the system
+
+====
+
+==== Rudder utility library
+
+===== Standard hash methods
+
+The following methods allow to simply hash a value using standard algorithms:
+
+* `rudder.hash.md5(string)`
+* `rudder.hash.sha256(string)`
+* `rudder.hash.sha512(string)`
+
+These methods do not use a salt for hashing, and as such are not suitable for
+distributing passwords for user accounts on UNIX systems. See below for a
+preferable approach for this.
+
+===== UNIX password-compatible hash methods
+
+The following methods are specially designed to provided hashes that can be
+used as user passwords on UNIX systems (in `/etc/shadow`, for example). Use
+these if you want to distribute hashes of unique passwords for each of your
+nodes, for example.
+
+Two different cases exist: support for generic Unix-like systems (Linux, BSD,
+...) and support for AIX systems (which use a different hash algorithm).
+
+Available methods are:
+
+* `rudder.password.auto(algorithm, password [, salt])`
+* `rudder.password.unix(algorithm, password [, salt])`
+* `rudder.password.aix(algorithm, password [, salt])`
+
+The parameters are:
+
+* `algorithm` can be "MD5", "SHA-512", "SHA512", "SHA-256", "SHA256" (case insensitive)
+* `password` is the plain text password to hash
+* `salt` is the optional salt to use in the password (we *strongly* recommend providing this value - see warning below)
+
+The `unix` method generates Unix crypt password compatible hashes (for use on
+Linux, BSD, etc), while the `aix` method generates AIX password compatible
+hashes. The `auto` method automatically uses the appropriate algorithm for
+each node type (AIX nodes will have a AIX compatible hash, others will
+have a Unix compatible hash). We recommend always using `auto` for simplicity.
+
+For example, to use the first 8 letters of each node's hostname as a password,
+you could write:
+----
+rudder.password.auto("SHA-256", "${rudder.node.hostname}".substring(0,8), "abcdefg")
+----
+
+[WARNING]
+
+.Providing a salt
+
+====
+
+It is strongly recommended to provide a *salt* to the methods above. If no
+salt is provided, a random salt is created, and will be recreated at each
+policy generation, causing the resulting hashes to change each time. This, in
+turn, will generate an unnecessary "repaired" status for the password component on all nodes
+at each policy generation.
+
+====
+
+[TIP]
+
+.JVM requirements
+
+====
+
+This features is tested only on HotSpot 1.7 and 1.8, OpenJDK 1.7 and 1.8,
+IBM JVM 1.7 and 1.8.
+
+====
+
+[TIP]
+
+.JVM requirements for AIX password hashes
+
+====
+
+AIX password generation depends on the availability of *PBKDF2WithHmacSHA256* and
+*PBKDF2WithHmacSHA512* in the JVM. These algorithms are included by default on
+HotSpot 1.8 and OpenJDK 1.8 and upward. In the case where your JVM does not support these
+algorithms, typically on an IBM JDK or a JVM 1.7 version of HotSpot and OpenJDK, the hashing
+algorithm falls back to *SHA1* with *PBKDF2WithHmacSHA1*, and an error message will be
+logged. You can also check your JVM editor manual to add support for these algorithms.
+
+====
+
+==== Status and future support
+
+In a future version of Rudder, JavaScript evaluation will be supported in all
+fields in Directives.
+
+In the meantime, you can already test this functionality out by entering a JavaScript
+expression in any Directive field, prefixed by "evaljs:". Please be aware that
+this is unsupported and untested, so do this at your own risk.
+
+If you do encounter any issues, please get in touch or open a ticket - we'd
+love to hear about them!
+
+There is currently no plan to extend this support to the fields in the
+Technique editor.
+
+[[_server_event_hooks]]
+=== Server Event Hooks
+
+Rudder 4.1 introduces the possibility to execute files (hooks), typically scripts,
+when some predefined event occurs on Rudder.
+
+include::{partialsdir}/dyn/hooks.adoc[leveloffset=+3]
+
+
+=== New directives default naming scheme
+
+When a new directive is created, by default the 'Name' field is filled
+with the Technique name. For example, if you create a new Directive from
+the 'Users' Technique, the Name field will get the value: "Users".
+
+This not always what you want, especially for your custom Techniques. So you
+have the possibility to define new default values for Name, at Technique or
+at Technique and Version granularity.
+
+This is done by adding or updating the file:
+`/var/rudder/configuration-repository/techniques/default-directive-names.conf`.
+
+That file need to be commited in git, and the Technique library reloaded
+to take effect:
+
+----
+cd /var/rudder/configuration-repository/techniques/
+vi default-directive-names.conf
+ ....
+git add default-directive-names.conf
+git commit -m "Change default names for new directives"
+rudder server reload-techniques
+----
+
+The file format is a simple `techniqueId[/optionalVersion]: default name to use` format.
+The Technique ID is the name of the directory containing the Technique version directory
+in `/var/rudder/configuration-repository/techniques`.
+
+For example, if we imagine that in your company, you have the internal
+convention to create one directive by user role with the login in the
+name, you would prefer to have a default value to:
+
+----
+Role : 
+----
+
+And then, for Users Technique version 7, you changed your mind and now
+use the scheme:
+
+----
+Role: [user-role] (with login [login])
+----
+
+Then the file will look like:
+
+----
+# Default pattern for new directive from "userManagement" technique:
+userManagement= Role : 
+
+# For userManagement version 2.0, prefer that pattern in new Directives:
+userManagement/7.0: Role: [user-role] (with login [login])
+----
+
+
+=== Directives ordering
+
+Configuration in Rudder are based on desired states, describing the expected state of the system. However, there are cases where having order is desirable (like ensuring that a JVM is present before deploying an Application server, or ensuring a user is present before setting it sudoers), even if it will converge over the course of several agent runs.
+
+In Rudder, there is two separated ways to order things, depending the type of Technique". So, before that, we need to explain how Policies are generated on the
+agent from Directives based on the same Technique.
+
+==== Policy generation and Directive merge
+
+In Rudder, Policies are generated from Directives, but several Directives based on the same Technique always lead to *one* Policy on the agent.
+For unique (non multi-instance) Technique, the one with the highest priority is selected. For multi-instance Technique, the different Directive values are *merged*
+into one Policy after having been sorted.
+
+.Separated Policy Generation in Rudder 4.3
+[TIP]
+=====
+ In Rudder 4.3, that limitation is lifted and Technique can be made to generate ONE Policy for each Directive. That capacity is controled by the
+ `POLICYGENERATION` tag, where the value `merged` is the pre-4.3 default behavior, and values `separated` or `separated-with-param` lead to one Policy per Directive.
+
+ See https://www.rudder-project.org/redmine/issues/10625[Don't merge directive from same technique on generation] for more information.
+=====
+
+
+==== Sorting Directives based on the *same* Technique
+
+For Directive based on the same Technique, the sort order is based on the *Priority* value of the Directive. Between two Directive, the one with the highest *Priority*
+is the first:
+
+- for a *non* multi-instance Technique, it means that it is there is only one that is chosen in the resulting Policies (the others are discared),
+- for a multi-instance Technique, it means that the variables in the Policy will be declared and check in sorting order of Directives (so the first Directive's
+  variables will be declared in first position and check first during an agent run).
+
+If several *Directives* have the same *Priority*, the *Rule name*, and then the *Directive name* are used for sorting in alphanumeric order.
+
+.Priority field value and meaning
+[WARNING]
+======
+The *Priority* field of a Directive used to be a number, from 0 to 10, where 0 means "highest priority".
+This changed with https://www.rudder-project.org/redmine/issues/11725 but if you knew Rudder before that change, please
+use "0" whenever the documentation says "highest priority".
+======
+
+
+===== Special use case: overriding generic_variable_definition
+
+You can use the merging of Directive to define variable override with the "Generic Variable Definition" Technique.
+
+For example, let say you want to define a *DNS* variable with default value *[default dns]* and on some node case,
+a value *[overrided dns]*:
+
+- Create a Directive [1] with *high* priority: it will be your *default* case, so set *DNS* to *[default dns]*.
+- Create an other Directive [2] with *lower* priority: it will be you specialized case, so set *DNS* to *[overrided dns]*.
+
+Then, a node with only Directive [1] will have the default value defined, and a node with both Directives will have the overriding one.
+
+It works because on the agent, you can redeclare a variable name and reassign to it a new value: the last one wins (so in our case, the *less* prioritary).
+
+
+==== Sorting Policies
+
+Rudder uses a best-effort method for ordering Policies, based on alphanumeric ordering of the corresponding Rule, then Directive name.
+
+When several Directive were merged, Rudder choose the first (Rule name, Directive name) as the ordering value to use for the resulting Policy.
+
+
+.Best practice
+[TIP]
+=====
+You should always start Rules and Directives name by 2 (or 3) digits to be able to easily reorder Policy evaluation if the need happen:
+
+Do not use: "My general security rule" and "Check ssh configuration"
+
+But use: "05. My general security rule" and "40. Check ssh configuration"
+=====
+
+==== Example
+
+- given three Techniques A, B and C
+- directives A1 and A2 based on Technique A, directives B1 and B2 based on B, directives C1 and C2 based on C
+- all Directives have the same priority,
+- rule R0 having [C1], R1 having [A1, B2] and rule R2 having [A2, B1, C2], all applied on a same node,
+- merging (R0, C1) and (R2, C2) => [C1, C2] and keep (R0, C1) as Policy order
+- merging (R1, A1) and (R2, A2) => [A1, A2] and keep (R1, A1) as Policy order,
+- merging (R1, B2) and (R2, B1) => [B2, B1] (because R1 < R2) and keep (R1, B2) for policy order,
+- so policies are sort: (R0, C1) then (R1, A1) then (R1, B2)
+- resulting ordering of directive's values will be: [C1, C2] then [A1, A2] then [B1, B2]
+
diff --git a/src/reference/modules/usage/pages/advanced_node_management.adoc b/src/reference/modules/usage/pages/advanced_node_management.adoc
new file mode 100644
index 00000000..8a45a216
--- /dev/null
+++ b/src/reference/modules/usage/pages/advanced_node_management.adoc
@@ -0,0 +1,458 @@
+=== Node management
+
+==== Reinitialize policies for a Node
+
+To reinitialize the policies for a Node, delete the local copy of the Applied
+Policies fetched from the Rudder Server, and create a new local copy of the
+initial promises.
+
+----
+
+rudder agent reset
+
+----
+
+At next run of the Rudder Agent (it runs every five minutes), the initial promises will be used.
+
+[CAUTION]
+
+====
+
+Use this procedure with caution: the Applied Policies of a Node should never get
+broken, unless some major change has occurred on the Rudder infrastructure, like
+a full reinstallation of the Rudder Server.
+
+====
+
+==== Completely reinitialize a Node
+
+You may want to completely reinitialize a Node to make it seen as a new node
+on the server, for example after cloning a VM.
+
+[WARNING]
+
+====
+
+This command will permanently delete your node uuid and keys, and no configuration will
+be applied before re-accepting and configuring the node on the server.
+
+====
+
+The command to reinitialize a Node is:
+
+----
+
+rudder agent reinit
+
+----
+
+This command will delete all local agent data, including its uuid and keys, and
+also reset the agent internal state. The only configuration kept is the server
+hostname or ip configured in +policy_server.dat+. It will also send an inventory
+to the server, which will treat it as a new node inventory.
+
+[[_change_the_agent_run_schedule]]
+==== Change the agent run schedule
+
+By default, the agent runs on all nodes every 5 minutes. You can modify this value in
+ *Settings* -> *General* page in *Agent Run Schedule* section, as well as the "splay time"
+across nodes (a random delay that alters scheduled run time, intended to spread
+load across nodes).
+
+image::Global_run_settings.png[]
+
+This settings can also be modified Node by Node, allowing you to customize the agent behavior (Node with little ressource like a Raspberry Pi or with limited bandwith). To do that, go into the Node details in the *Settings* tab
+
+image::Node_settings.png[]
+
+
+[WARNING]
+
+====
+
+When reducing notably the run interval length, reporting can be in 'No report' state
+until the next run of the agent, which can take up to the previous (longer) interval.
+
+====
+
+
+==== Installation of the Rudder Agent
+
+===== Static files
+
+At installation of the Rudder Agent, files and directories are created in
+following places:
+
++/etc+:: Scripts to integrate Rudder Agent in the system (init, cron).
+
++/opt/rudder/share/initial-promises+:: Initialization promises for the Rudder
+Agent. These promises are used until the Node has been validated in Rudder. They
+are kept available at this place afterwards.
+
++/opt/rudder/lib/perl5+:: The FusionInventory Inventory tool and its Perl
+dependencies.
+
++/opt/rudder/bin/run-inventory+:: Wrapper script to launch the inventory.
+
++/opt/rudder/sbin+:: Binaries for CFEngine Community.
+
++/var/rudder/cfengine-community+:: This is the working directory for CFEngine
+Community.
+
+===== Generated files
+
+At the end of installation, the CFEngine Community working directory is
+populated for first use, and unique identifiers for the Node are generated.
+
++/var/rudder/cfengine-community/bin/+:: CFEngine Community binaries are copied
+there.
+
++/var/rudder/cfengine-community/inputs+:: Contains the actual working CFEngine
+Community promises. Initial promises are copied here at installation. After
+validation of the Node, Applied Policies, which are the CFEngine promises
+generated by Rudder for this particular Node, will be stored here.
+
++/var/rudder/cfengine-community/ppkeys+:: An unique SSL key generated for the
+Node at installation time.
+
++/opt/rudder/etc/uuid.hive+:: An unique identifier for the Node is generated
+into this file.
+
+===== Services
+
+After all of these files are in place, the CFEngine Community daemons are
+launched:
+
+include::{partialsdir}/glossary/cf-execd.adoc[]
+
+include::{partialsdir}/glossary/cf-serverd.adoc[]
+
+===== Configuration
+
+At this point, you should configure the Rudder Agent to actually enable the
+contact with the server. Type in the IP address of the Rudder Root Server in the
+following file:
+
+----
+
+echo *root_server_IP_address* > /var/rudder/cfengine-community/policy_server.dat
+
+----
+
+==== Rudder Agent interactive
+
+You can force the Rudder Agent to run from the console and observe what happens.
+
+----
+
+rudder agent run
+
+----
+
+[CAUTION]
+
+.Error: the name of the Rudder Root Server can't be resolved
+
+====
+
+If the Rudder Root Server name is not resolvable, the Rudder Agent will issue
+this error:
+
+----
+
+rudder agent run
+
+Unable to lookup hostname (rudder-root) or cfengine service: Name or service not known
+
+----
+
+To fix it, either you set up the agent to use the IP address of the Rudder root
+server instead of its Domain name, either you set up accurately the name
+resolution of your Rudder Root Server, in your DNS server or in the hosts file.
+
+The Rudder Root Server name is defined in this file
+
+----
+
+echo *IP_of_root_server* > /var/rudder/cfengine-community/policy_server.dat
+
+----
+
+====
+
+[CAUTION]
+
+.Error: the CFEngine service is not responding on the Rudder Root Server
+
+====
+
+If the CFEngine is stopped on the Rudder Root Server you will get this error:
+
+----
+
+# rudder agent run
+ !! Error connecting to server (timeout)
+ !!! System error for connect: "Operation now in progress"
+ !! No server is responding on this port
+Unable to establish connection with rudder-root
+
+----
+
+Restart the CFEngine service:
+
+----
+
+service rudder-agent restart
+
+----
+
+====
+
+==== Processing new inventories on the server
+
+===== Verify the inventory has been received by the Rudder Root Server
+
+There is some delay between the time when the first inventory of the Node is
+sent, and the time when the Node appears in the New Nodes of the web interface.
+For the brave and impatient, you can check if the inventory was sent by listing
+incoming Nodes on the server:
+
+----
+
+ls /var/rudder/inventories/incoming/
+
+----
+
+===== Process incoming inventories
+
+On the next run of the CFEngine agent on Rudder Root Server, the new inventory
+will be detected and sent to the Inventory Endpoint. The inventory will be then
+moved in the directory of received inventories. The Inventory Endpoint do
+its job and the new Node appears in the interface.
+
+You can force the execution of CFEngine agent on the console:
+
+----
+
+rudder agent run
+
+----
+
+===== Validate new Nodes
+
+User interaction is required to validate new Nodes.
+
+===== Prepare policies for the Node
+
+Policies are not shared between the Nodes for obvious security and
+confidentiality reasons. Each Node has its own set of policies. Policies are
+generated for Nodes according in the following states:
+
+. Node is new;
+
+. Inventory has changed;
+
+. Technique has changed;
+
+. Directive has changed;
+
+. Group of Node has changed;
+
+. Rule has changed;
+
+. Regeneration was forced by the user.
+
+image::graphviz/generate_policy_workflow.png[Generate policy workflow]
+
+
+==== Agent execution frequency on nodes
+
+===== Checking configuration (CFEngine)
+
+By default, Rudder is configured to check and repair configurations using the CFEngine
+agent every 5 minutes, at 5 minutes past the hour, 10 minutes past the hour,
+etc.
+
+The exact run time on each machine will be delayed by a random interval, in
+order to "smooth" the load across your infrastructure (also known as "splay
+time"). This reduces simultaneous connections on relay and root servers (both
+for the CFEngine server and for sending reports).
+
+See xref:41_advanced_node_management/10_node_management.adoc#_change_the_agent_run_schedule[Change the agent run schedule] Section to see how to configure it
+
+
+===== Inventory (FusionInventory)
+
+The FusionInventory agent collects data about the node it's running on such as
+machine type, OS details, hardware, software, networks, running virtual
+machines, running processes, environment variables...
+
+This inventory is scheduled once every 24 hours, and will happen in between
+0:00 and 5:00 AM. The exact time is randomized across nodes to "smooth" the
+load across your infrastructure.
+
+
+[[extend-nodes-inventory, Extend node inventory]]
+
+=== Extend node inventory
+
+
+It is quite common to need to gather information on your nodes that are not present
+in the standard Rudder inventory information.
+
+As of Rudder 4.3.0, you can get more information about a node thanks to
+`inventory hooks`. These information will be available as standard *node properties*.
+
+
+==== Overview
+
+On the node, you create `inventory hooks` executable and place them in `/var/rudder/hooks.d`.
+These binaries are executed in the alphanumerical order, only if executable, and their output is checked to
+ensure that it is proper JSON.
+
+For example, one hook can output:
+
+----
+
+{
+    "my_prop1": ["a", "json", "array"],
+    "my_prop2": {"some": "more", "key": "value"}
+}
+
+----
+
+When the node inventory is processed server side, the node properties will get new values, one per
+first-level key of all hooks.
+
+These node properties are marked as "provided by inventory" and can not be deleted nor overwritten.
+Appart from that characteristic, they are normal node properties that can be used to create group, or as
+variables in Directives parameters.
+
+==== Creating a node inventory hook
+
+An inventory hook can be any kind of executable that can be called without parameters, from a shell script to a C program.
+
+Hooks are located in directory `/var/rudder/hooks.d`. You may need to create that directory the first time you want to add hooks:
+
+----
+
+mkdir /var/rudder/hooks.d
+
+----
+
+They need to be executable by rudder agent.
+
+For example, this hook will create a new "hello_inventory" node property:
+
+----
+
+% cd /var/rudder/hooks.d
+
+% cat < hello-world
+#!/bin/sh
+echo '{"hello_inventory": "a simple string value from inventory"}'
+EOF
+
+% chmod +x hello-world
+
+% rudder agent inventory
+
+----
+
+And then, after the server has processed the inventory, the node (here with ID '74d10806-b41d-4575-ab86-8becb419949b') has the corresponding property:
+
+----
+% curl -k -H "X-API-Token: ......" -H "Content-Type: application/json" -X GET 'https://..../rudder/api/latest/nodes/74d10806-b41d-4575-ab86-8becb419949b?include=minimal,properties' | jq '.'
+{
+  "action": "nodeDetails",
+  "id": "74d10806-b41d-4575-ab86-8becb419949b",
+  "result": "success",
+  "data": {
+    "nodes": [
+      {
+        "id": "74d10806-b41d-4575-ab86-8becb419949b",
+        ....
+        "properties": [
+          {
+            "name": "hello_inventory",
+            "value": "a simple string value from inventory",
+            "provider": "inventory"
+          }
+        ]
+      }
+    ]
+  }
+}
+----
+
+
+==== Overriding
+
+If two hooks provide the same first-level key, then the last executed hook values for that key are kept.
+
+You should always use the first level keys as a namespace for your hooks to avoid unwanted overriding.
+
+
+==== Inventory XML format
+
+Properties comming from inventory hooks are stored in a tag named ``. The tag contains a
+JSON array with all the inventory hook properties merged:
+
+----
+
+[{ "key1" : "values"},{ "key2" : "values"}]
+
+----
+
+
+[[node-lifecycle, Node Lifecycle]]
+
+=== Node Lifecycle
+
+Imagine you have a node that you must disconnect for a maintenance period.
+You know what is happening on the node, and during the maintenance period,
+you don't want that the Rudder shows up the node as `Not responding`
+and trigger alert on global compliance level.
+
+An other common use case is to be able to set specific policies for nodes
+just after acceptation that are used for provisioning, or just before
+node end of life to clean it up.
+
+In Rudder 4.3, we introduced a way to manage the Node lifecycle, for both of theses uses cases:
+
+* nodes disconnected from Rudder Server can be excluded from policy generation and Compliance with the `Ignored` state,
+* the main states of a system life can be applied with the 4 states `Initializing`, `Enabled`,
+`Preparing End of Life` and `Empty policies`.
+
+
+image::node-lifecycle-settings.png[]
+
+
+States `Ignored` and `Empty policies` automatically changes the policy generation and compliance:
+
+* `Ignored` prevents any new policy generation for the Nodes in this states.
+* `Empty policies` generates a minimal set of policies, only to manage the Rudder Agent itself.
+
+Both states remove the nodes from the compliance.
+
+Nodes with non-default state appears with a label next to their name in the nodes list to show their
+states, and their compliance doesn’t show up in `Ignored` nor `Empty policies` mode. You can filter by
+node lifecycle state in that list with the common `Filter` input field.
+
+image::node-lifecycle-nodelist.png[]
+
+
+Node with a given lifecycle state can be searched thanks to the quicksearch tool in Rudder status
+bar. That state can also be used to construct groups (`Node state` attribute of `Node summary`)
+and they also show up in the API responses concerning node information.
+
+Finally, the default state for a Node can be configured in the Settings page, to define in which
+mode accepted Nodes use.
+
+image::node-lifecycle-settings.png[]
+
+
+In the future, these states will be configurable on a per node basis at acceptation, and the
+lifecycle states list will be configurable by users.
+
+
diff --git a/src/reference/modules/usage/pages/configuration_management.adoc b/src/reference/modules/usage/pages/configuration_management.adoc
new file mode 100644
index 00000000..1897e7f3
--- /dev/null
+++ b/src/reference/modules/usage/pages/configuration_management.adoc
@@ -0,0 +1,644 @@
+== Configuration concepts
+
+We adopted the following terms to describe the configurations in Rudder:
+
+====
+
+include::{partialsdir}/glossary/technique.adoc[]
+
+include::{partialsdir}/glossary/directive.adoc[]
+
+include::{partialsdir}/glossary/rule.adoc[]
+
+include::{partialsdir}/glossary/applied-policy.adoc[]
+
+====
+
+As illustrated in this summary diagram, the rules are linking the
+functions of inventory management and configuration management.
+
+.Concepts diagram
+
+image::configuration_concepts.svg[]
+
+=== Techniques
+
+==== Concepts
+
+A Technique defines a set of operations and configurations to reach the
+desired behaviour. This includes the initial set-up, but also a regular check on
+the parameters, and automatic repairs (when possible).
+
+All the Techniques are built with the possibility to change only part of a
+service configuration: each parameter may be either active, either set on the
+"Don't change" value, that will let the default values or in place. This allows
+for a progressive deployment of the configuration management.
+
+Finally, the Techniques will generate a set of reports which are sent to
+the Rudder Root Server, which will let you analyse the percentage of compliance
+of your policies, and soon, detailed reports on their application.
+
+==== Manage the Techniques
+
+The Techniques shipped with Rudder are presented in a library that you can
+reorganize in *Configuration > Techniques*. The library
+is organized in two parts: the available Techniques, and the selection
+made by the user.
+
+include::{partialsdir}/glossary/technique-library.adoc[]
+
+include::{partialsdir}/glossary/active-techniques.adoc[]
+
+==== Create new Techniques
+
+The standard library only provides the most common Techniques. You can create
+new Technique with the xref:23_configuration_management/41_technique_editor.adoc#technique-editor[Technique Editor].
+
+=== Directives
+
+Once you have selected and organized your Techniques, you can create your
+configurations in the *Configuration Management > Directives* section.
+
+include::{partialsdir}/glossary/directive.adoc[]
+
+The screen is divided in three parts:
+
+- on the left, The list of Directives, grouped by Technique
+
+- on the right, The selected Directive form.
+
+Click on the name of a Technique to show its description, and how to Create a Directive base on it.
+
+Click on the name of a Directive to see the Directive Summary containing the
+description of the Technique its derived from, and the configuration items
+of the Directive.
+
+
+image::Directive_management.png[]
+
+.Create a Directive for Name resolution
+
+====
+
+Use the Technique 'Name resolution' to create a new Directive called
++Google DNS Servers+, and shortly described as 'Use Google DNS Server'. Check in
+the options 'Set nameservers' and 'Set DNS search suffix'.  Set the value of the
+variable 'DNS resolver' to +8.8.8.8+ and of 'Domain search suffix' according to
+your organization, like +rudder-project.org+.
+
+====
+
+=== Rules
+
+include::{partialsdir}/glossary/rule.adoc[]
+
+image::Rule_management.png[]
+
+When a Rule is created or modified, the promises for the target nodes are generated. Rudder computes all the promises each nodes must have, and makes them available for the nodes. This process can take up to several minutes, depending on the number of managed nodes and the Policy Server configuration. During this time, The status icon on the top of the page turns to grey, with moving arrows.
+if you feel the generated promises should be modified (for instance, if you changed the configuration of Rudder), you can click on the status menu in the top bar and click on "Regenerate policies"
+
+image::Rule_config.png[]
+
+
+
+=== Variables
+
+==== User defined parameters
+
+Rudder provides a simple way to add common and reusable variables in either plain Directives, or techniques created using the Technique editor: the parameters.
+
+image::rudder-parameters.png[Parameters]
+
+The parameters enable the user to specify a content that can be put anywhere, using the following syntax:
+
+* In Directives: '${rudder.param.name}' will expand the content of the "name" parameter.
+* In the Technique Editor: '${rudder_parameters.name}' will do the same.
+
+Using this, you can specify common file headers (this is the default parameter, "rudder_file_edit_header"), common DNS or domain names, backup servers,
+site-specific elements...
+
+==== System variables
+
+Rudder also provides system variables that contain information about nodes
+and their policy server. You can use them like user defined parameters.
+
+The information about a Node:
+
+* '${rudder.node.id}' returns the Rudder generated id of the Node
+* '${rudder.node.hostname}' returns the hostname of the Node
+* '${rudder.node.admin}' returns the administrator login of the Node
+
+The information about a Node's policy server.
+
+* '${rudder.node.policyserver.id}' returns the Rudder generated id of the Policy Server
+* '${rudder.node.policyserver.hostname}' returns the hostname of the Policy Server
+* '${rudder.node.policyserver.admin}' returns the administrator login of the Policy Server
+
+
+[[compliance-and-drift-assessment]]
+=== Compliance and Drift Assessment
+
+
+==== Overview in Rudder
+
+
+Rudder is built to continuously assess drift compared to defined policies, with or without auto-healing.
+
+By auto-healing, we mean that optionally, Rudder can continuously enforce correct configuration over time, correcting the assessed drift so that
+your configuration converges towards desired states. This behavior is optionnal, and Rudder can only report drift without changing configuration.
+That policy enforce or audit mode can be configured by node, rule or directive (see xref:23_configuration_management/40_policy_mode.adoc#_policy_mode_audit_enforce[policy mode documentation] for more details).
+
+Rudder is able to adapt to complex process and only do the minimal required work so that the server converges to the desired state,
+and so whatever was the starting state point. Rudder works as a GPS would, adapting the path to your destination depending of the path
+you actually took. This process is much more resilient to changes than a step by step, procedural description of the commands to execute.
+
+Compliance and drift from expected configurations are then reported with possibility to drill down in non-compliance issues to identify the root problem.
+
+Of course, one can always correct a drift error by hand by updating coniguration target and changing policy mode from "audit" to "enforce" mode.
+
+===== Compliance and drift reporting
+
+Compliance drifts (non compliances, enforcement errors, repaires) are reported in Rudder by several means:
+
+- Compliance are reported in aggregated format globally in the dashboard, and by rules or nodes (example for Rule below)
+- they are stored in Rudder compliance database, and each Rule displays an history of changes as depicted in "Changes history on a Rule" below.
+- each drifts fires an event which is logged in file /var/log/rudder/compliance/non-compliant-reports.log and can be used
+  to integrates with log aggregation engine like Logstash, or hooks (typically to send notification to IRC or Slack, send email, etc)
+  - see for example the Slack connector here: https://github.com/Normation/rudder-tools/blob/master/scripts/rudder-notification/forward-non-compliance-to-slack.sh
+- compliance and drift are also available from Rudder API to provide deeper integration with your IT Infrastructure.
+
+
+
+.Compliance on a Rule
+
+image::Rule_compliance.png[Rule compliance]
+
+The Rule detailed compliance screen will also graph compliance deviations on
+a recent period as well as display a deviation log history for this period.
+
+
+
+.Changes history on a Rule
+
+image::rudder-rule-compliance-history.png[Changes compliance history]
+
+
+
+
+==== How compliance is calculated ?
+
+As previously seen, in Rudder you define Rules which target groups of Nodes, and are composed of configuration Directives.
+
+A Directive contains one or multiple sub-configuration elements which generates reports.
+For example, for a Sudoers Directive, each user can be such an element.
+
+Reports have states explaining what is the drift between the expected configuration and the actual configuration.
+Some states depends if the user choose to auto-matically enforce drift correction
+or if he chose to only reports on drift).
+
+Finaly, a node can get a global state if reports don't come at expected frequency or for expected policy configuration version.
+
+Below you will find all details about the possible states and their meaning with the actual compliance calculus method.
+
+*Checking that the node is correctly reporting, at correct frequency*
+
+At the node level, we are checking that the node is sending reports according to the
+expected frequency, and for the currently defined version of the configuration for it.
+
+Based on this information, we get a
+
+Applying::
+
+When a new set of policies are defined for a node (or any update to existing one), Rudder waits during a grace period
+for reports so that the node has time to apply the new policies.
+During this period, the configuration is said 'Applying'.
+
+No report::
+
+The system didn't send any reports since a time incompatible with the agent frequency run interval. Most
+likelly, the node is not online or there is an ongoing network issue between the node and Rudder server.
+
+
+
+*At directive level: checking for drift and auto-healing*
+
+
+Success or Compliant::
+
+The system is already in the desired state. No change is needed. Conformity is reached.
+
+Repaired::
+
+When a configuration policy is "enforced", that state means that the system was not in the desired state.
+Rudder applied some change and repaired what was not correct. Now the system is in the desired state.
+
+Error::
+
+When configuration is enforced, it means that the system is not in the desired state and Rudder wasn't able to repair the system.
+
+Non compliant::
+
+When configuration is not enforced, it means that the systemn is not in the desired state. A drift is reported.
+
+Not applicable::
+
+A specific configuration may not be applicable on a given node because some precondition
+are not met. For example, the specified configuration is only relevant for Linux nodes, and
+thus is Not applicable on a Windows server.
+
+Unexpected::
+
+We have a special kind of report for unexpected states (both for enforce and audit mode). These
+reports generally mean that the node is sending reports for unexpected configuration components. It
+may be due to bad parameters for the configuration, or an error in the Technique.
+
+
+*Compliance calculus*
+
+Based on these facts, the compliance of a Rule is calculated like this:
+
+Number of Nodes for which conformity is reached for every Directive of the
+Rule / Total number of Nodes on which the Rule has been applied
+
+
+=== Validation workflow in Rudder
+
+The validation workflow is a feature whose purpose is to hold any change (Rule, Directive, Group) made by users in the web interface,
+to be reviewed first by other users with the adequate privileges before actual deployment.
+
+The goal is to improve safety and knowledge sharing in the team that is using Rudder.
+
+To enable it, you only have to tick "Enable Change Requests" in the Administration - Settings tab of the web interface. (This feature
+is optional and can be disabled at any time without any problem, besides risking the invalidation of yet-unapproved changes)
+
+image::workflows/Enabling.png[]
+
+==== What is a Change request ?
+
+A Change request represents a modification of a Rule/Directive/Group from an old state to a new one.
+The Change is not saved and applied by the configuration, before that, it needs to be reviewed and approved by other members of the team.
+
+A Change request has:
+
+- An Id (an integer > 0)
+- A title.
+- A description.
+- A creator.
+- A status.
+- Its own history.
+
+This information can be updated on the change request detail page.
+For now, a Change request is linked to one change at a time.
+
+===== Change request status
+
+There is 4 Change request status:
+
+Pending validation::
+- The change has to be reviewed and validated.
+- Can be send to: Pending deployment, Deployed, Cancelled.
+
+Pending deployment::
+- The change was validated, but now require to be deployed.
+- Can be send to: Deployed, Cancelled.
+
+Deployed::
+- The change is deployed.
+- This is a final state, it can't be moved anymore.
+
+Cancelled::
+- The change was not approved.
+- This is a final state, it can't be moved anymore.
+
+Here is a diagram about all those states and transitions:
+
+image::workflows/States.png[]
+
+==== Change request management page
+
+All Change requests can be seen on the /secure/utilities/changeRequests page.
+There is a table containing all requests, you can access to each of them by clicking on their id.
+You can filter change requests by status and only display what you need.
+
+image::workflows/Management.png[]
+
+===== Change request detail page
+
+Each Change request is reachable on the /secure/utilities/changeRequest/id.
+
+image::workflows/Details.png[]
+
+The page is divided into two sections:
+
+Change request information::
+
+display common information (title, description, status, id) and a form to edit them.
+
+image::workflows/Informations.png[]
+
+Change request content::
+
+In this section, there is two tabs:
+- History about that change request
+
+image:workflows/History.png[]
+
+- Display the change proposed
+
+image:workflows/Rule_Update_Diff.png[]
+
+
+==== How to create a Change request ?
+
+If they are enabled in Rudder, every change in Rudder will make you create a Change request.
+You will have a popup to enter the name of your change request and a change message.
+
+The change message will be used as description for you Change Request, so we advise to fill it anyway to keep an explanation ab out your change.
+
+image::workflows/Popup.png[]
+
+Change request are not available for Rule/Directive/Groups creation, they are only active if the Rule/Directive/Groups existed before:
+
+Here is a small table about all possibilities:
+
+image::workflows/Table.png[]
+
+==== How to validate a Change request ?
+
+===== Roles
+
+Not every user can validate or deploy change in Rudder.
+Only those with one of the following roles can act on Change request:
+
+Validator::
+Can validate Change request
+
+Deployer::
+To deploy Change Request
+
+Both of those roles:
+
+- Give you access to pending Change requests
+- Allow you to perform actions on them (validate or cancel)
+
+You have to change users in */opt/rudder/etc/rudder-users.xml* and include those rights.
+Without one of those roles, you can only access Change Request in 'Deployed' or 'Cancelled' and those you opened before.
+
+You can deploy directly if you have both the validator and deployer roles.
+The *administrator* Role gives you both the deployer and valdiator role.
+
+There is also the possibility to access Change requests in Read only mode by using the role 'validator_read' or 'deployer_read'.
+
+image::workflows/Validation.png[]
+
+===== Self Validations
+
+Using Change requests means that you want your team to share knowledge, and validate each other change.
+So by default:
+
+- *Self validation* is disabled.
+- *Self deployment* is enabled.
+
+Those two behaviours can be changed in the property file */opt/rudder/etc/rudder-web.properties*.
+'rudder.workflow.self.validation' and 'rudder.workflow.self.deployment' are the properties that define this behaviour.
+
+==== Change request and conflicts
+
+When the initial state of a Change request has changed (i.e.: you want to modify a Directive, but someone else changes about that Directive has been accepted before yours), your change can't be validated anymore.
+
+image::workflows/Conflict.png[]
+
+For now, we decided to reduce to the possibility of an error or inconsistency when there are concurrent changes.
+In a future version of Rudder, there will be a system to handle those conflicts, and make sure actual changes are not overwritten.
+
+==== Notifications:
+
+In several parts of Rudder webapp there are some Notifications about Change requests.
+
+===== Pending change requests
+
+This notification is displayed only if the validator/deployer role is active on your user account.
+It shows you how many Change requests are waiting to be reviewed/deployed.
+Clicking on it will lead you to the Change request management page, with a filter already applied.
+
+image::workflows/Notification.png[]
+
+===== Change already proposed on Rule/Directive/Group
+
+When there is a change about the Rule/Directive/Group already proposed but not deployed/cancelled, you will be notified that there are some pending Change requests about that element.
+You will be provided a Link to those change request, So you can check if the change is already proposed.
+
+image::workflows/Warning.png[]
+
+
+[[_policy_mode_audit_enforce]]
+=== Policy Mode (Audit/Enforce)
+
+Rudder 4.0 includes a policy mode setting, that allows two distinct behaviors:
+
+* *Audit*: Test if the system is in the desired state, and report about it
+* *Enforce*: Test if the system is in the desired state, if not, try to act to get to this state, and report about actions taken and final state
+
+This allows for example xref:26_manage_your_it/5_usecases/0_usecases_intro.adoc#_using_rudder_as_an_audit_tooli[to use Rudder as an audit tool] or xref:26_manage_your_it/5_usecases/0_usecases_intro.adoc#_using_audit_mode_to_validate_a_policy_before_applying_it[to test a policy before enforcing it].
+
+image:audit_mode_general_overview.png[]
+
+This mode can be set:
+
+* Globally on the Rudder root server. In this can case there are two options: allow to override this mode on specific items, or use the global configuration everywhere.
+* On a directive.
+* On a node.
+
+A lot of attention and several safeguards have been put in place to ensure that if you choose to use "Audit"
+for a target, nothing will be changed on the node for that target (except Rudder's own configuration under `/var/rudder`), and only some harmless
+commands will be run (like listing installed packages or refreshing package lists).
+
+Nodes are fully aware of exactly what directives need to be executed in Audit or in Enforce mode, and the "rudder agent" command line has been enhanced to let you see the result with a glimpse: the first column in `rudder agent run` output is now the mode (*A* for *Audit* and *E* for *Enforce*), and the compliance summary is split by audit mode.
+In addition to pre-existing technical reports, new ones have been added to report on "audit-compliant" (the check was OK), "audit-non-compliant" (the check was done, but the result is not the one expected), "audit-not-applicable" (the check is not applicable for that node, for example because of a limitation on the OS type), "audit-error" (the check wasn't able to finish correctly) status.
+
+==== How is the effective mode computed?
+
+We will here explain what is the computation made during generation to
+decide which mode to apply to a directive on a node, based on the current settings.
+
+The short rule is: *Override wins, then Audit wins*
+
+For a given directive on a given node at a given time, we have three different policy mode
+settings:
+
+* The global mode, called *G*, which can be *Audit* or *Enforce*
+* The node mode called *N*, which can be *Global* (if not overridden), *Audit, or *Enforce*
+* The directive mode, called *D*, which can be *Global* (if not overridden), *Audit, or *Enforce*
+
+The result is:
+
+* If override is not allowed, the policy mode is *always* the global mode *G*.
+* If override is allowed:
+
+** If *N* and *D* are set to use the *Global* default value (i.e. no override), the policy mode is the global mode *G*.
+** If *N* uses the *global* value and *D* is overriden to *Audit* or *Enforce*, the *D* value is used.
+** If *D* uses the *global* value and *N* is overriden to *Audit* or *Enforce*, the *N* value is used.
+** If *N* and *D* are overriden to *Audit* or *Enforce*, the value is *Audit* if at least one of *N* or *D* is *Audit*, *Enforce* if both are in *Enforce* mode
+
+
+[[technique-editor]]
+=== Technique editor
+
+==== Introduction
+
+===== First, what is a Technique ?
+
+A technique is a description in code form of what the agent has to do on the node.
+This code is actually composed of a series of Generic method calls.
+These different Generic method calls are conditional.
+
+===== What is a Generic method?
+
+A generic method is a description of an elementary state independent of the operating system (ex: a package is installed, a file contains such line, etc...).
+Generic methods are independent of the operating system (It has to work on any operating system).
+Generic methods calls are conditioned by condition expressions, which are boolean expression combining basic conditions with classic boolean operators (ex : operating system is Debian, such generic method produced a modification, did not produce any modification, produced an error, etc…)
+
+
+==== Technique Editor
+
+===== Utility
+
+Rudder provides a set of pre-defined Techniques that cover some basic configuration and system administration needs. Of course,this set of techniques cannot responds to all of the specific needs of each client. That’s why Rudder integrate the *Technique _editor_*, a tool to create advanced Techniques.
+Directly accessible from Ruder menu (_Utilities > Technique editor_), this tool has an easy-to-use interface, which doesn’t require any programming skills but nevertheless allows to create complex Techniques.
+
+===== Interface
+
+Here is an overview of its interface :
+
+image::technique_editor/1-rudder-technique-editor.png[]
+
+The interface is divided into 3 columns:
+
+
+- A column listing custom Techniques
+
+image::technique_editor/2-list-techniques.png[]
+
+Here, we can see our previously created Techniques. We can click on them to see their details/edit them, or create a new one by clicking on the “New” button. Theses Techniques are visible in the *ncf techniques* category in the *Directives _tree_*, so can be used to create new Directives.
+
+- A column with the Technique content
+
+When we create a new Technique, or when we edit an existing one, the configuration form appears at the center of the interface, instead of the title and the description of the tool.
+
+image::technique_editor/3-ntp-configuration.png[]
+
+Then we can see the name, the description, the Bundle name, the version and the Generic methods list of the current Technique. Only the name and the description are editable, the Bundle name and the version are automatically defined during the Technique creation.
+
+- A column listing Generic methods / displaying generic method details
+
+To the right of the interface is the list of Generic methods available for Technique configuration.
+This list is made up of about a hundred Generic methods, grouped according to their category to make them easier to use. (An exhaustive list of them available at any time in the online product documentation can be found on the following link: http://www.rudder-project.org/doc/_generic_methods.html)
+
+image::technique_editor/4-list-generics-method.png[]
+
+You just need to click on a Generic method or drag'n drop it in the area provided for such purpose to add it to the current Technique. Once it's done, you can configure it by clicking on it. Then a new display containing the method details appears instead of the Generic methods list:
+
+image::technique_editor/5-configure-generic-method.png[]
+
+The Generic method details are divided into 3 blocks :
+
+. Conditions
+  - Conditions allow user to restrict the execution of the method.
+. Parameters
+  - Parameters are in mono or multi line text format. They can contains variables which will be extended at the time of the execution.
+. Result conditions
+  - One result condition of three will be defined following the execution of a generic method:
+    * Success, when the configuration is correct and no action are needed
+    * Repaired, when the configuration is wrong and actions to fix it were executed with success
+    * Error, when the configuration is wrong but actions to fix it failed
+
+Theses conditions can be used in another Generic methods conditions. ie, you can execute a command if a previous one failed or was repaired.
+
+
+==== Create your first Technique
+
+Now we are going to see how to create a simple technique to configure a ntp server, step by step.
+
+===== 1. General information
+
+Let's start from the beginning. Click on the "_New_ Technique" button and start filling in the General information fields (only name is required).
+
+In our case:
+
+- *Name*: _Configure NTP_
+- *Description*: _Install, configure and ensure the ntpd is running. Uses a template file to configuration._
+
+===== 2. Add and configure generic methods
+
+Now, we have to find and add the generic methods which correspond to the actions we want to execute. In our case, we want to add the following methods:
+
+* Package install (You can find it in the *Package category*)
+  - This method only take one parameter, the name of the package to install. So here, fill in the *package_name* field with the value _ntp_.
+
+* File from template (You can find it in the *File category*)
+  - This method take two parameters. The first one corresponds to the absolute path of the source file containing a template to be expanded. We are going to use a Rudder variable here to get the correct path. Fill in the *source_template* field with the value _$\{path_technique\}/templates/ntp.conf_.
+  - The second corresponds to the absolute path of the destination file. Fill in with the value _/etc/ntp.conf_.
+
+* Service restart (You can find it in the *Service category*)
+  - This method only take one parameter, the name of the service we want to restart. So here, fill in the *service_name* field with the value _ntp_.
+  - Also, we want to restart the service only if it has just been installed, so only if the result conditions defined following the execution of *Package install* method is *Repaired* (package_install_ntp_repaired). So here, fill in the *Other conditions* field in the Conditions panel with the value _package_install_ntp_repaired_.
+
+* Service ensure running (You can find it in the *Service category*)
+  - This method only take one parameter, the name of the service we want to check. Again, here, fill in the *service_name* field with the value _ntp_.
+
+===== 3. Save and apply your technique
+
+And… It’s already done. Rather fast, right? Don't forget to save. Now you can see it in the *Directives _tree_*, and  use it to create a Directive that will be applied on your _Nodes_ thanks to a _Rule_.
+
+
+
+=== Usecases
+
+This chapter gives a few examples for using Rudder. We have no doubt that you'll
+have your own ideas, that we're impatient to hear about...
+
+==== Dynamic groups by operating system
+
+Create dynamic groups for each operating system you administer, so that you can
+apply specific policies to each type of OS. When new nodes are added to Rudder,
+these policies will automatically be enforced upon them.
+
+==== Library of preventive policies
+
+Why not create policies for emergency situations in advance? You can then put
+your IT infrastructure in "panic" mode in just a few clicks.
+
+For example, using the provided Techniques, you could create a Name
+resolution Directive to use your own internal DNS servers for normal situations,
+and a second, alternative Directive, to use Google's public DNS servers, in case
+your internal DNS servers are no longer available.
+
+==== Standardizing configurations
+
+You certainly have your own best practices (let's call them good habits) for
+setting up your SSH servers.
+
+But is that configuration the same on all your servers? Enforce the settings
+your really want using an OpenSSH server policy and apply it to all your Linux
+servers. SSH servers can then be stopped or reconfigured manually many times,
+Rudder will always restore your preferred settings and restart the SSH server in
+less than 5 minutes.
+
+[[_using_rudder_as_an_audit_tool]]
+==== Using Rudder as an Audit tool
+
+Using Rudder as an Audit tool is useful if you do not want to make any changes on the system,
+temporarily (freeze period, etc.) or permanently.
+
+To use Rudder as an Audit tool without modifying any configuration on your systems,
+set the Policy Mode to *Audit* in the Settings, and do not allow overriding.
+
+==== Using Audit mode to validate a policy before applying it
+
+Before applying a configuration policy to some systems (a new policy or a new system),
+you can switch the policy mode of the directive defining this policy or of the nodes
+it is applied to to *Audit*.
+
+This is particularly useful when adding rules to enforce policies that are supposed to be already applied:
+you can measure the gap between expected and actual state, and check what changes would be made before applying them.
+
diff --git a/src/reference/modules/ROOT/pages/21_node_management/22_search_nodes.adoc b/src/reference/modules/usage/pages/node_management.adoc
similarity index 59%
rename from src/reference/modules/ROOT/pages/21_node_management/22_search_nodes.adoc
rename to src/reference/modules/usage/pages/node_management.adoc
index 41087c3f..f6c9f344 100644
--- a/src/reference/modules/ROOT/pages/21_node_management/22_search_nodes.adoc
+++ b/src/reference/modules/usage/pages/node_management.adoc
@@ -1,12 +1,93 @@
+[[_node_management_2]]
+= Node management
 
-[[search-nodes, Search Nodes]]
+[[inventory, Node Inventory]]
+
+image::node_workflow.svg[]
+
+== Node inventory
+
+image::nodes.png[]
+
+Rudder integrates a node inventory tool which harvest useful information
+about the nodes. This information is used by Rudder to handle the nodes, and
+you can use the inventory information for Configuration Management purposes:
+search Nodes, create Groups of Nodes, determine some configuration management
+variables. 
+
+In the Rudder Web Interface, each time you see a Node name, you can click on it
+and display the collection of information about this Node. The inventory is
+organized as following: first tab is a 'summary' of administrative information
+about the Node; other tabs are specialized for 'hardware', 'network' interfaces,
+and 'software' for every Node; tabs for 'reports' and 'logs' are added on
+Rudder managed Nodes.
+
+The 'Node Summary' presents administrative information like the Node
+'Hostname', 'Operating System', 'Rudder Client name', 'Rudder ID' and 'Date'
+when the inventory was 'last received'. When the Node has been validated, some
+more information is displayed like the Node 'Name' and the 'Date first
+accepted in Rudder'.
+
+The 'hardware' information is organized as following: 'General', 'File
+systems', 'Bios', 'Controllers', 'Memory', 'Port', 'Processor', 'Slot', 'Sound',
+'Storage', 'Video'.
+
+'Network' connections are detailed as following: 'Name' of the interface on the
+system, 'IP address', 'Network Mask', usage of 'DHCP' or static configuration,
+'MAC address', 'Type' of connection, 'Speed' of the connection and 'Status'.
+
+And finally, you get the list of every 'software' package present on the
+system, including version and description.
+
+On Nodes managed by Rudder, the 'Compliance Reports' tab displays information about the
+status of the latest run of Rudder Agent, whereas the 'Technical Logs' tab displays
+information about changes for the Node.
+
+image::node-compliance.png[]
+
+
+
+[[accept-new-nodes, Accept new Nodes]]
+== Accept new Nodes
+
+At the starting point, the Rudder Server doesn't know anything about the Nodes.
+After the installation of the Rudder Agent, each Node registers itself to the
+Rudder Server, and sends a first inventory. Every new Node must be manually
+validated in the Rudder Web Interface to become part of Rudder Managed Nodes.
+This task is performed in the *Node Management > Accept new Nodes* section of
+the application. You can select Nodes waiting for an approval, and determine
+whether you consider them as valid or not. Click on each Node name to display
+the extended inventory. Click on the magnifying glass icon to display the
+policies which will be applied after the validation.
+
+.Accept the new Node +debian-node.rudder-project.org+
+
+====
+
+. Install and configure the Rudder Agent on the new Node
++debian-node.rudder-project.org+
+
+. Wait a few minutes for the first run of the Rudder Agent.
+
+. Navigate to *Node Management > Accept new Nodes*.
+
+. Select the new Node in the list.
+
+. Validate the Node.
+
+. The Node is now integrated in Rudder, you can search it using the search
+tools.
+
+====
 
-=== Search Nodes
+
+[[search-nodes, Search Nodes]]
+== Search Nodes
 
 You can navigate to *Node Management > Search Nodes* to display information
 about the Nodes which have been already validated, and are managed by Rudder.
 
-==== General behavior
+=== General behavior
 
 In the Advanced Search tool, you can create complex searches based on Node
 Inventory information. The benefit of the Advanced Search tool is to save the
@@ -28,7 +109,7 @@ among the list of fields concerning this theme.
 The matching rule can be selected between following possibilities: 'Is defined',
 'Is not defined', '=', '≠' or 'Regex'  followed by the term you are searching for presence or
 absence. Depending on the field, the list of searchable terms is either an free
-text field, either the list of available terms. 
+text field, either the list of available terms.
 
 - 3. Add another rule
 
@@ -49,22 +130,22 @@ create this 2 lines request:
 . First search line: 'Node', 'Operating System', +=+, 'Linux'.
 
 . Second search line: 'Software', 'Name', +=+, +ssh+.
- 
+
 ====
 
-==== Search numbers with units
+=== Search numbers with units
 
 Some parameters for the advanced search tool allow using units. For example, in
 the search criterion for RAM size, you can type +512MB+ instead of a value in
 bytes. This paragraph describes supported units by parameter type.
 
-===== Bytes and multiples
+==== Bytes and multiples
 
 All criteria using a memory size (RAM, hard disk capacity, etc) is by default
 expected in bytes. If no other unit is specified, all values will be assumed to
 be in bytes.
 
-===== Convenience notation
+==== Convenience notation
 
 All memory sizes can be written using spaces or underscores (+_+) to make the
 numbers easier to read. Numbers must begin with a digit. For example, the
@@ -87,7 +168,7 @@ _1234
 
 ----
 
-===== Supported units
+==== Supported units
 
 Units used are non binary units, and a multiplication factor of 1024 is applied
 between each unit. Units are case-insensitive. Therefore, +Mb+ is identical to
@@ -101,7 +182,7 @@ above):
 [options="header"]
 
 |====
-| Notation | Alternate | Value 
+| Notation | Alternate | Value
 | +b+ | +o+ | bytes (equivalent to not specifying a unit)
 | +kb+ | +ko+ | 1024 bytes
 | +mb+ | +mo+ | 1024^2 bytes
@@ -113,7 +194,7 @@ above):
 | +yb+ | +yo+ | 1024^8 bytes
 |====
 
-==== Regex matching rule
+=== Regex matching rule
 
 You can use regular expressions to find whatever you want in Node inventories.
 A search request using a regexp will look for every node that match the pattern you
@@ -126,19 +207,19 @@ for more details.
 
 ====
 
-Assuming you want to search every node using an ip address match 192.168.x.y, where x<10 
+Assuming you want to search every node using an ip address match 192.168.x.y, where x<10
 and y could be everything. You will to add that line to your search request:
 
 * 'Node summary', 'Ip address', +Regex+, '192\ .168\ .\d\ . .*'
- 
+
 ====
 
-==== Composite search (name=value)
+=== Composite search (name=value)
 
 Some fields allow you to look for more than one piece of information at a time.
-That's the case for environment variable. For those fields you have to enter 
+That's the case for environment variable. For those fields you have to enter
 the first element then the separator then following elements.
-The name of the fields tells you about what is expected. It would look like 
+The name of the fields tells you about what is expected. It would look like
 +firstelementsecondelement+ assuming that  is the separator.
 
 .Search Environment Variable +LANG=C+.
@@ -149,20 +230,20 @@ Assuming you want to search every node having the environment variable LANG set
 You will have to add that search line to your request:
 
 * 'Environment variable', 'Name=Value', +=+, 'LANG=C'.
- 
+
 ====
 
-==== Node properties search
+=== Node properties search
 
-Node properties are special because they support both `key=value` and `key=JSON` content. 
-As of Rudder 4.3.2, we have the possibility to choose among three operator givent the use case, 
-so let's see which is the best on each case. 
+Node properties are special because they support both `key=value` and `key=JSON` content.
+As of Rudder 4.3.2, we have the possibility to choose among three operator given the use case,
+so let's see which is the best on each case.
 
-===== Check for property existence: [Name equals]
+==== Check for property existence: [Name equals]
 
 The +Name equals+ operator allows to find all nodes which have the property with the given name
 defined. That operator only look for the property name and don't care if the value is a string or
-a JSON one. 
+a JSON one.
 
 .Search for nodes with the property +datacenter+ defined
 
@@ -173,13 +254,13 @@ a JSON one.
 ====
 
 
-===== Lookup node by property name=value pair
+==== Lookup node by property name=value pair
 
-If you want to lookup nodes by a property name and its value, you can use the "name=value" operator, 
-as explained in paragraph "composite search" above. 
+If you want to lookup nodes by a property name and its value, you can use the "name=value" operator,
+as explained in paragraph "composite search" above.
 
-Please note that if the value is JSON, you will need to use the exact serialization of the JSON 
-value (in a compact formating: no spaces, etc). It is generally not what you want to do with 
+Please note that if the value is JSON, you will need to use the exact serialization of the JSON
+value (in a compact formating: no spaces, etc). It is generally not what you want to do with
 JSON value, and for them it is better to use JSON path query (see below).
 
 .Search for the node property "datacenter" with value "Paris"
@@ -190,7 +271,7 @@ JSON value, and for them it is better to use JSON path query (see below).
 
 ====
 
-===== Lookup node by property name=value pair with regex
+==== Lookup node by property name=value pair with regex
 
 You can also use regex on node properties name and value. The regex can be done on each part, so
 the following examples allow to fill different purposes:
@@ -221,12 +302,12 @@ the following examples allow to fill different purposes:
 ====
 
 [[search-nodes-properties-json-path, JSON Path queries on Node Properties]]
-===== JSON Path queries on Node Properties
+==== JSON Path queries on Node Properties
 
 Since Rudder 4.3.2, you can use a JSON path query to lookup nodes based of specific feature
-of the JSON value of a node property. 
+of the JSON value of a node property.
 
-The operator is +Name:JSON Path+ and the general usage is: 
+The operator is +Name:JSON Path+ and the general usage is:
 
 .Search for nodes with a property by JSON path query
 
@@ -238,7 +319,7 @@ The operator is +Name:JSON Path+ and the general usage is:
 
 Where +json-path-query+ is JSON path selector from https://github.com/json-path/JsonPath.
 Nodes are selected if the JSON path selector result is not empty, i.e if the JSON value
-contains selected elements. 
+contains selected elements.
 
 For example, let's say you have a node with a property whose name is +datacenter+ and value is:
 
@@ -262,7 +343,7 @@ The following query would all select the node:
 
 .Example of JSON path queries
 
-==== 
+====
 
 //select based on the value of a JSON leaf - boolean type
 'Properties', 'Name=Value', +Name:JSON Path+, 'datacenter:$.[?(@.accepted==true)]'
@@ -281,3 +362,38 @@ The following query would all select the node:
 
 ====
 
+
+[[groups, Groups]]
+
+== Group of Nodes
+
+You can create Group of Nodes based on search criteria to ease attribution of
+Rules in Configuration Management. The creation of groups can be done from the
+'Node Management > Search Nodes' page, or directly from the Groups list in
+'Node Management > Groups'. A group can be either Dynamic or Static.
+
+include::{partialsdir}/glossary/dynamic-group.adoc[]
+
+include::{partialsdir}/glossary/static-group.adoc[]
+
+image::groups.png[]
+
+.Create a dynamic group for Linux Nodes with +ssh+ having an ip address in 192.18.42.x.
+
+====
+
+To create that dynamic group like described above, You first have to create a new group
+with group type set to +Dynamic+. Then you have to set its search request to:
+
+. Operator: +AND+.
+
+. First search line: 'Node', 'Operating System', +=+, 'Linux'.
+
+. Second search line: 'Software', 'Name', +=+, +ssh+.
+
+. Third search line: 'Node summary', 'Ip address', +Regex+, '192\ .168\ .\d\ . .*' .
+
+Finally, you have to click on Search to populate the group and click on Save to actually save it.
+
+====
+
diff --git a/src/reference/modules/ROOT/pages/26_manage_your_it/30_security/10_policy_security.adoc b/src/reference/modules/usage/pages/security.adoc
similarity index 92%
rename from src/reference/modules/ROOT/pages/26_manage_your_it/30_security/10_policy_security.adoc
rename to src/reference/modules/usage/pages/security.adoc
index bc1ca6d8..1c859092 100644
--- a/src/reference/modules/ROOT/pages/26_manage_your_it/30_security/10_policy_security.adoc
+++ b/src/reference/modules/usage/pages/security.adoc
@@ -1,4 +1,6 @@
-==== Data confidentiality
+= Security considerations
+
+== Data confidentiality
 
 Rudder is designed to strictly separate policies between nodes,
 and to only let a node access its own policies.
@@ -6,7 +8,7 @@ and to only let a node access its own policies.
 This section will give details about how the policies are secured, and which
 content is node-specific or global.
 
-===== Private data
+=== Private data
 
 All confidential information should be stored in private data, namely:
 
@@ -16,7 +18,7 @@ All confidential information should be stored in private data, namely:
 
 There are:
 
-* always transfered encrypted between nodes (using agent copy protocol or https for the interface and the API)
+* always transferred encrypted between nodes (using agent copy protocol or https for the interface and the API)
 * only available to the nodes that need it
 * only accessible locally by the users that need it
 
@@ -30,7 +32,7 @@ More precisely:
 * relay: only the data needed for the served nodes and the relay itself are available and stored locally, only accessible to the root user
 * node: only the data needed to configure the node is available and stored locally, only accessible to the root user
 
-===== Common data
+=== Common data
 
 This refers to content available from all nodes in the authorized networks, readable from all users
 on the nodes (and that can be transfered withtout encryption when using initial promises of a pre-4.0 node).
@@ -41,11 +43,11 @@ These unprotected contents are:
 * the common ncf part (`/var/rudder/ncf/common`), which includes all the content distibuted in the `ncf` package
 * the Rudder techniques sources (without parameters), which includes all the content distibuted in the `rudder-techniques` package
 
-==== Node-Server communication security
+== Node-Server communication security
 
 This section gives more details about the different flows between nodes and servers.
 
-===== File copy
+=== File copy
 
 File copy is used to get policies and files copied during policy execution (named *shared-files*).
 
@@ -68,7 +70,7 @@ The access policy is:
 The old hostame and IP ACL are still generated for node-specific files to ensure compatibility with older nodes,
 but will be removed in the future.
 
-===== Inventory
+=== Inventory
 
 Nodes send an inventory to the server after installation or upgrade, and once a day.
 
@@ -79,7 +81,7 @@ This inventory contains various information, including:
 
 The inventory security policy is:
 
-* Inventories are sent by the node to its configured policy_server over HTTPS, currently whithout certificate validation.
+* Inventories are sent by the node to its configured policy_server over HTTPS, currently without certificate validation.
 * Inventories are signed by the node using its private key, which allows the server to check this signature using
   the public key coming from previous inventory and to ensure it really comes from the right node.It avoids treating a
   malicious (or bogus) inventory coming from another node, and you should check the public key when accepting a new node.
diff --git a/src/reference/modules/ROOT/pages/20_usage/20_quick_search_anything.adoc b/src/reference/modules/usage/pages/web_interface.adoc
similarity index 62%
rename from src/reference/modules/ROOT/pages/20_usage/20_quick_search_anything.adoc
rename to src/reference/modules/usage/pages/web_interface.adoc
index c4d00395..652e09c3 100644
--- a/src/reference/modules/ROOT/pages/20_usage/20_quick_search_anything.adoc
+++ b/src/reference/modules/usage/pages/web_interface.adoc
@@ -1,7 +1,104 @@
+= Web interface usage
+
+This chapter is a general presentation of the Rudder Web Interface. You will
+find how to authenticate in the application, a description of the design of the
+screen, and some explanations about usage of common user interface items like
+the search fields and the reporting screens.
+
+== Authentication
+
+When accessing the Rudder web interface, a login / password is required.  The
+default account is "admin" (Password: admin).
+
+You can change the user accounts by following the xref:30_basic_administration/80_user_management.adoc#user-management[User management]
+procedure.
+
+== Presentation of Rudder Web Interface
+
+The web interface is organised according to the concepts described earlier. It
+is divided in three logical parts: Node Management, Configuration Management
+and Administration.
+
+=== Rudder Home
+
+The home page summarizes the content of the other parts and provides quick links
+for the most common actions.
+
+.Rudder Homepage
+
+image::rudder-home.png[Home menu]
+
+=== Node Management
+
+In the Node Management section, you will find the list of all Nodes, the validation tool for new
+Nodes, a search engine for validated Nodes, and the management tool for groups
+of Nodes.
+
+.List of Nodes
+
+image::nodes.png[Nodes]
+
+.Node compliance
+
+image::node-compliance.png[Node Compliance]
+
+.Groups
+
+image::groups.png[Groups]
+
+=== Configuration Management
+
+In the Configuration Management section, you can select the Techniques,
+configure the Directives and manage the Rules and check their compliance.
+
+.Rules screen
+
+image::Rule_config.png[Rules list]
+
+.Rule compliance
+
+image::Rule_compliance.png[Rule compliance]
+
+.Directive list
+
+image::Directive_management.png[Directives]
+
+=== Utilities
+
+This section contains tools useful for your everyday usage of Rudder.
+This is where you will find the technique editor, the event logs table
+or the change requests if you have enabled that feature.
+
+.Event Logs
+
+image::event_log.png[Event logs]
+
+.Technique Editor
+
+image::technique_editor/1-rudder-technique-editor.png[Technique editor]
+
+.Technique details
+
+image::technique_editor/5-configure-generic-method.png[Technique details]
+
+=== Settings
+
+The Settings section provides you a way to modify your Rudder setup: you can setup the
+available networks for the Policy Server, configure agent run and policy mode,
+enable web interface options and manage installed plugins.
+
+.Settings screen
+
+image::rudder-admin-settings.png[Settings]
+
+.Changing global agent run
+
+image::Global_run_settings.png[Global run settings]
+
 
 [[quick-search-anything, Quick Search Anything]]
 
-=== Quick search anything
+== Quick search anything
 
 You might have noticed the small text area at the top of the Rudder interface:
 it is the Quick Search bar. Its purpose is to enable a user to easily search for
diff --git a/src/reference/tools/generate-nav.py b/src/reference/tools/generate-nav.py
index cb815b0c..adf213bc 100755
--- a/src/reference/tools/generate-nav.py
+++ b/src/reference/tools/generate-nav.py
@@ -2,8 +2,13 @@
 
 import os
 import re
+import sys
 
-PAGESDIR = "modules/ROOT/pages/"
+MODULE = sys.argv[1]
+NAME = sys.argv[2]
+
+MODULEDIR = "modules/"+MODULE
+PAGESDIR = MODULEDIR+"/pages/"
 TITLE = re.compile("^(=+) (.+)$")
 ID = re.compile(r"^[[(.+),?.*]]$")
 
@@ -28,13 +33,17 @@ def slugify(s):
     s = s.rstrip('_')
     return s
 
-os.chdir(PAGESDIR)
-
 # Get all standalone .adoc pages, sorted alphanumerically
 # We exclude files in root (the index of the doc), _partials which are not actual pages
-files = sorted([root.split('/', 1)[-1]+"/"+file for root, dirs, files in os.walk('.') for file in files if file.endswith(".adoc") and not "_partials" in root and not root == "."])
 
-result = ["// Automatically generated list of content - do not edit"]
+with open(MODULEDIR + "/nav.list") as f:
+    files = f.read().splitlines()
+#files = sorted([file for root, dirs, files in os.walk('.') for file in files if file.endswith(".adoc") and not "_partials" in root])
+#files = sorted([root.split('/', 1)[-1]+"/"+file for root, dirs, files in os.walk('.') for file in files if file.endswith(".adoc") and not "_partials" in root])
+
+result = ["// Automatically generated list of content - do not edit", "* "+NAME]
+
+os.chdir(PAGESDIR)
 
 for file in files:
     with open(file) as f:
@@ -44,14 +53,15 @@ def slugify(s):
     for line in content:
         search_title = TITLE.search(line)
         if search_title:
-            level = search_title.group(1).count("=")
+            level = search_title.group(1).count("=") + 1
             title = search_title.group(2)
             search_id = ID.search(prev)
             if search_id:
                 page_id = search_id.group(1)
             else:
                 page_id = slugify(title)
-            result.append("*" * level + " xref:" + file + "#" + page_id + "[" + title + "]")
+            if level < 6:
+                result.append("*" * level + " xref:" + file + "#" + page_id + "[" + title + "]")
         prev = line
 
 print("\n".join(result))