diff --git a/.gitattributes b/.gitattributes
index 24d63fe05dd5..5153c0907c9e 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -5,8 +5,6 @@ scripts/unittest eol=lf
*.groovy eol=lf
*.csv binary
*.json eol=lf
-Documentation/Books/SummaryBlacklist.txt eol=lf
-Documentation/Examples/*.generated merge=ours
VERSION merge=ours
STARTER_REV merge=ours
lib/V8/v8-json.cpp merge=ours
diff --git a/CHANGELOG b/CHANGELOG
index a85b4392034d..aebd26009540 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,6 +1,12 @@
devel
-----
+* removed content from Documentation/Books, but keeping the subfolders.
+ The documentation is in a separate repository (except DocuBlocks and Scripts):
+ https://github.com/arangodb/docs.git
+
+* TOKENS function updated to deal with primitive types and arrays
+
* MinReplicationFactor:
Collections can now be created with a minimal replication factor (minReplicationFactor) default 1.
If minReplicationFactor > 1 a collection will go into "read-only" mode as soon as it has less then minReplicationFactor
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 4d6de8908483..3d11f79f43ca 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -296,18 +296,18 @@ set(ARANGODB_PACKAGE_VENDOR "ArangoDB GmbH")
set(ARANGODB_PACKAGE_CONTACT "info@arangodb.com")
set(ARANGODB_DISPLAY_NAME "ArangoDB")
set(ARANGODB_URL_INFO_ABOUT "https://www.arangodb.com")
-set(ARANGODB_HELP_LINK "https://docs.arangodb.com/${ARANGODB_VERSION_MAJOR}.${ARANGODB_VERSION_MINOR}/")
+set(ARANGODB_HELP_LINK "https://www.arangodb.com/docs/${ARANGODB_VERSION_MAJOR}.${ARANGODB_VERSION_MINOR}/")
set(ARANGODB_CONTACT "hackers@arangodb.com")
set(ARANGODB_FRIENDLY_STRING "ArangoDB - the native multi-model NoSQL database")
# MSVC
-set(ARANGO_BENCH_FRIENDLY_STRING "arangobench - stress test program")
-set(ARANGO_DUMP_FRIENDLY_STRING "arangodump - export")
-set(ARANGO_RESTORE_FRIENDLY_STRING "arangrestore - importer")
-set(ARANGO_EXPORT_FRIENDLY_STRING "arangoexport - datae xporter")
-set(ARANGO_IMPORT_FRIENDLY_STRING "arangoimport - TSV/CSV/JSON importer")
-set(ARANGOSH_FRIENDLY_STRING "arangosh - commandline client")
-set(ARANGO_VPACK_FRIENDLY_STRING "arangovpack - vpack printer")
+set(ARANGO_BENCH_FRIENDLY_STRING "arangobench - stress test tool")
+set(ARANGO_DUMP_FRIENDLY_STRING "arangodump - dump data and configuration")
+set(ARANGO_RESTORE_FRIENDLY_STRING "arangrestore - restore data and configuration")
+set(ARANGO_EXPORT_FRIENDLY_STRING "arangoexport - data exporter")
+set(ARANGO_IMPORT_FRIENDLY_STRING "arangoimport - data importer")
+set(ARANGOSH_FRIENDLY_STRING "arangosh - command-line client")
+set(ARANGO_VPACK_FRIENDLY_STRING "arangovpack - VelocyPack pretty-printer")
# libraries
set(LIB_ARANGO arango)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index bf74ab7d90ad..f65590aa1d87 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -33,12 +33,8 @@ Contributing features, documentation, tests
commit message so the issues will get updated automatically with comments.
* If the modifications change any documented behavior or add new features,
- document the changes. The documentation can be found in arangod/Documentation
- directory. To recreate the documentation locally, run make doxygen. This will
- re-create all documentation files in the Doxygen directory in your
- repository. You can inspect the documentation in this folder using a text
- editor or a browser. We recently agreed that future documentation should be
- written in American English (AE).
+ document the changes. It should be written in American English.
+ The documentation can be found at https://github.com/arangodb/docs
* When done, run the complete test suite and make sure all tests pass. You can
check [README_maintainers.md](README_maintainers.md) for test run instructions.
diff --git a/Documentation/Books/.gitignore b/Documentation/Books/.gitignore
deleted file mode 100644
index 6be6cbffe0ab..000000000000
--- a/Documentation/Books/.gitignore
+++ /dev/null
@@ -1,8 +0,0 @@
-*/manual.epub
-*/manual.mobi
-*/manual.pdf
-*/node_modules/
-books
-repos
-ppbooks
-allComments.txt
diff --git a/Documentation/Books/AQL/.gitkeep b/Documentation/Books/AQL/.gitkeep
new file mode 100644
index 000000000000..936ca3adc4e3
--- /dev/null
+++ b/Documentation/Books/AQL/.gitkeep
@@ -0,0 +1,5 @@
+Git can not track empty repositories.
+This file ensures that the directory is kept.
+
+Some of the old documentation building scripts are still
+used by the new system which copy files into this folder.
\ No newline at end of file
diff --git a/Documentation/Books/AQL/Advanced/ArrayOperators.md b/Documentation/Books/AQL/Advanced/ArrayOperators.md
deleted file mode 100644
index 5a8f61f5a4ec..000000000000
--- a/Documentation/Books/AQL/Advanced/ArrayOperators.md
+++ /dev/null
@@ -1,302 +0,0 @@
-Array Operators
-===============
-
-
-Array expansion
----------------
-
-In order to access a named attribute from all elements in an array easily, AQL
-offers the shortcut operator [\*] for array variable expansion.
-
-Using the [\*] operator with an array variable will iterate over all elements
-in the array, thus allowing to access a particular attribute of each element. It is
-required that the expanded variable is an array. The result of the [\*]
-operator is again an array.
-
-To demonstrate the array expansion operator, let's go on with the following three
-example *users* documents:
-
-```json
-[
- {
- name: "john",
- age: 35,
- friends: [
- { name: "tina", age: 43 },
- { name: "helga", age: 52 },
- { name: "alfred", age: 34 }
- ]
- },
- {
- name: "yves",
- age: 24,
- friends: [
- { name: "sergei", age: 27 },
- { name: "tiffany", age: 25 }
- ]
- },
- {
- name: "sandra",
- age: 40,
- friends: [
- { name: "bob", age: 32 },
- { name: "elena", age: 48 }
- ]
- }
-]
-```
-
-With the [\*] operator it becomes easy to query just the names of the
-friends for each user:
-
-```
-FOR u IN users
- RETURN { name: u.name, friends: u.friends[*].name }
-```
-
-This will produce:
-
-```json
-[
- { "name" : "john", "friends" : [ "tina", "helga", "alfred" ] },
- { "name" : "yves", "friends" : [ "sergei", "tiffany" ] },
- { "name" : "sandra", "friends" : [ "bob", "elena" ] }
-]
-```
-
-This is a shortcut for the longer, semantically equivalent query:
-
-```js
-FOR u IN users
- RETURN { name: u.name, friends: (FOR f IN u.friends RETURN f.name) }
-```
-
-Array contraction
------------------
-
-In order to collapse (or flatten) results in nested arrays, AQL provides the [\*\*]
-operator. It works similar to the [\*] operator, but additionally collapses nested
-arrays.
-
-How many levels are collapsed is determined by the amount of asterisk characters used.
-[\*\*] collapses one level of nesting - just like `FLATTEN(array)` or `FLATTEN(array, 1)`
-would do -, [\*\*\*] collapses two levels - the equivalent to `FLATTEN(array, 2)` - and
-so on.
-
-Let's compare the array expansion operator with an array contraction operator.
-For example, the following query produces an array of friend names per user:
-
-```js
-FOR u IN users
- RETURN u.friends[*].name
-```
-
-As we have multiple users, the overall result is a nested array:
-
-```json
-[
- [
- "tina",
- "helga",
- "alfred"
- ],
- [
- "sergei",
- "tiffany"
- ],
- [
- "bob",
- "elena"
- ]
-]
-```
-
-If the goal is to get rid of the nested array, we can apply the [\*\*] operator on the
-result. But simply appending [\*\*] to the query won't help, because *u.friends*
-is not a nested (multi-dimensional) array, but a simple (one-dimensional) array. Still,
-the [\*\*] can be used if it has access to a multi-dimensional nested result.
-
-We can extend above query as follows and still create the same nested result:
-
-```js
-RETURN (
- FOR u IN users RETURN u.friends[*].name
-)
-```
-
-By now appending the [\*\*] operator at the end of the query...
-
-```js
-RETURN (
- FOR u IN users RETURN u.friends[*].name
-)[**]
-```
-
-... the query result becomes:
-
-```json
-[
- [
- "tina",
- "helga",
- "alfred",
- "sergei",
- "tiffany",
- "bob",
- "elena"
- ]
-]
-```
-
-Note that the elements are not de-duplicated. For a flat array with only unique
-elements, a combination of [UNIQUE()](../Functions/Array.md#unique) and
-[FLATTEN()](../Functions/Array.md#flatten) is advisable.
-
-Inline expressions
-------------------
-
-It is possible to filter elements while iterating over an array, to limit the amount
-of returned elements and to create a projection using the current array element.
-Sorting is not supported by this shorthand form.
-
-These inline expressions can follow array expansion and contraction operators
-[\* ...] , [\*\* ...] etc. The keywords *FILTER*, *LIMIT* and *RETURN*
-must occur in this order if they are used in combination, and can only occur once:
-
-`anyArray[* FILTER conditions LIMIT skip,limit RETURN projection]`
-
-Example with nested numbers and array contraction:
-
-```js
-LET arr = [ [ 1, 2 ], 3, [ 4, 5 ], 6 ]
-RETURN arr[** FILTER CURRENT % 2 == 0]
-```
-
-All even numbers are returned in a flat array:
-
-```json
-[
- [ 2, 4, 6 ]
-]
-```
-
-Complex example with multiple conditions, limit and projection:
-
-```js
-FOR u IN users
- RETURN {
- name: u.name,
- friends: u.friends[* FILTER CONTAINS(CURRENT.name, "a") AND CURRENT.age > 40
- LIMIT 2
- RETURN CONCAT(CURRENT.name, " is ", CURRENT.age)
- ]
- }
-```
-
-No more than two computed strings based on *friends* with an `a` in their name and
-older than 40 years are returned per user:
-
-```json
-[
- {
- "name": "john",
- "friends": [
- "tina is 43",
- "helga is 52"
- ]
- },
- {
- "name": "sandra",
- "friends": [
- "elena is 48"
- ]
- },
- {
- "name": "yves",
- "friends": []
- }
-]
-```
-
-### Inline filter
-
-To return only the names of friends that have an *age* value
-higher than the user herself, an inline *FILTER* can be used:
-
-```js
-FOR u IN users
- RETURN { name: u.name, friends: u.friends[* FILTER CURRENT.age > u.age].name }
-```
-
-The pseudo-variable *CURRENT* can be used to access the current array element.
-The *FILTER* condition can refer to *CURRENT* or any variables valid in the
-outer scope.
-
-### Inline limit
-
-The number of elements returned can be restricted with *LIMIT*. It works the same
-as the [limit operation](../Operations/Limit.md). *LIMIT* must come after *FILTER*
-and before *RETURN*, if they are present.
-
-```js
-FOR u IN users
- RETURN { name: u.name, friends: u.friends[* LIMIT 1].name }
-```
-
-Above example returns one friend each:
-
-```json
-[
- { "name": "john", "friends": [ "tina" ] },
- { "name": "sandra", "friends": [ "bob" ] },
- { "name": "yves", "friends": [ "sergei" ] }
-]
-```
-
-A number of elements can also be skipped and up to *n* returned:
-
-```js
-FOR u IN users
- RETURN { name: u.name, friends: u.friends[* LIMIT 1,2].name }
-```
-
-The example query skips the first friend and returns two friends at most
-per user:
-
-```json
-[
- { "name": "john", "friends": [ "helga", "alfred" ] },
- { "name": "sandra", "friends": [ "elena" ] },
- { "name": "yves", "friends": [ "tiffany" ] }
-]
-```
-
-### Inline projection
-
-To return a projection of the current element, use *RETURN*. If a *FILTER* is
-also present, *RETURN* must come later.
-
-```js
-FOR u IN users
- RETURN u.friends[* RETURN CONCAT(CURRENT.name, " is a friend of ", u.name)]
-```
-
-The above will return:
-
-```json
-[
- [
- "tina is a friend of john",
- "helga is a friend of john",
- "alfred is a friend of john"
- ],
- [
- "sergei is a friend of yves",
- "tiffany is a friend of yves"
- ],
- [
- "bob is a friend of sandra",
- "elena is a friend of sandra"
- ]
-]
-```
\ No newline at end of file
diff --git a/Documentation/Books/AQL/Advanced/README.md b/Documentation/Books/AQL/Advanced/README.md
deleted file mode 100644
index fc81c451b1df..000000000000
--- a/Documentation/Books/AQL/Advanced/README.md
+++ /dev/null
@@ -1,7 +0,0 @@
-Advanced features
-=================
-
-This section covers additional, powerful AQL features, which you may wanna look
-into once you made yourself familiar with the basics of the query language.
-
-- [Array operators](ArrayOperators.md): Shorthands for array manipulation
diff --git a/Documentation/Books/AQL/CommonErrors.md b/Documentation/Books/AQL/CommonErrors.md
deleted file mode 100644
index 7c9c8966899f..000000000000
--- a/Documentation/Books/AQL/CommonErrors.md
+++ /dev/null
@@ -1,118 +0,0 @@
-Common Errors
-=============
-
-Trailing semicolons in query strings
-------------------------------------
-
-Many SQL databases allow sending multiple queries at once. In this case, multiple
-queries are seperated using the semicolon character. Often it is also supported to
-execute a single query that has a semicolon at its end.
-
-AQL does not support this, and it is a parse error to use a semicolon at the end
-of an AQL query string.
-
-
-String concatenation
---------------------
-
-In AQL, strings must be concatenated using the [CONCAT()](Functions/String.md#concat)
-function. Joining them together with the `+` operator is not supported. Especially
-as JavaScript programmer it is easy to walk into this trap:
-
-```js
-RETURN "foo" + "bar" // [ 0 ]
-RETURN "foo" + 123 // [ 123 ]
-RETURN "123" + 200 // [ 323 ]
-```
-
-The arithmetic plus operator expects numbers as operands, and will try to implicitly
-cast them to numbers if they are of different type. `"foo"` and `"bar"` are casted
-to `0` and then added to together (still zero). If an actual number is added, that
-number will be returned (adding zero doesn't change the result). If the string is a
-valid string representation of a number, then it is casted to a number. Thus, adding
-`"123"` and `200` results in two numbers being added up to `323`.
-
-To concatenate elements (with implicit casting to string for non-string values), do:
-
-```js
-RETURN CONCAT("foo", "bar") // [ "foobar" ]
-RETURN CONCAT("foo", 123) // [ "foo123" ]
-RETURN CONCAT("123", 200) // [ "123200" ]
-```
-
-Unexpected long running queries
--------------------------------
-
-Slow queries can have various reasons and be legitimate for queries with a high
-computational complexity or if they touch a lot of data. Use the *Explain*
-feature to inspect execution plans and verify that appropriate indexes are
-utilized. Also check for mistakes such as references to the wrong variables.
-
-A literal collection name, which is not part of constructs like `FOR`,
-`UPDATE ... IN` etc., stands for an array of all documents of that collection
-and can cause an entire collection to be materialized before further
-processing. It should thus be avoided.
-
-Check the execution plan for `/* all collection documents */` and verify that
-it is intended. You should also see a warning if you execute such a query:
-
-> collection 'coll' used as expression operand
-
-For example, instead of:
-
-```js
-RETURN coll[* LIMIT 1]
-```
-
-... with the execution plan ...
-
-```
-Execution plan:
- Id NodeType Est. Comment
- 1 SingletonNode 1 * ROOT
- 2 CalculationNode 1 - LET #2 = coll /* all collection documents */[* LIMIT 0, 1] /* v8 expression */
- 3 ReturnNode 1 - RETURN #2
-```
-
-... you can use the following equivalent query:
-
-```js
-FOR doc IN coll
- LIMIT 1
- RETURN doc
-```
-
-... with the (better) execution plan:
-
-```
-Execution plan:
- Id NodeType Est. Comment
- 1 SingletonNode 1 * ROOT
- 2 EnumerateCollectionNode 44 - FOR doc IN Characters /* full collection scan */
- 3 LimitNode 1 - LIMIT 0, 1
- 4 ReturnNode 1 - RETURN doc
-```
-
-Similarly, make sure you have not confused any variable names with collection
-names by accident:
-
-```js
-LET names = ["John", "Mary", ...]
-// supposed to refer to variable "names", not collection "Names"
-FOR name IN Names
- ...
-```
-
-
diff --git a/Documentation/Books/AQL/DataQueries.md b/Documentation/Books/AQL/DataQueries.md
deleted file mode 100644
index 21e2d7de6660..000000000000
--- a/Documentation/Books/AQL/DataQueries.md
+++ /dev/null
@@ -1,333 +0,0 @@
-Data Queries
-============
-
-Data Access Queries
--------------------
-
-Retrieving data from the database with AQL does always include a **RETURN**
-operation. It can be used to return a static value, such as a string:
-
-```js
-RETURN "Hello ArangoDB!"
-```
-
-The query result is always an array of elements, even if a single element was
-returned and contains a single element in that case: `["Hello ArangoDB!"]`
-
-The function `DOCUMENT()` can be called to retrieve a single document via
-its document handle, for instance:
-
-```js
-RETURN DOCUMENT("users/phil")
-```
-
-*RETURN* is usually accompanied by a **FOR** loop to iterate over the
-documents of a collection. The following query executes the loop body for all
-documents of a collection called *users*. Each document is returned unchanged
-in this example:
-
-```js
-FOR doc IN users
- RETURN doc
-```
-
-Instead of returning the raw `doc`, one can easily create a projection:
-
-```js
-FOR doc IN users
- RETURN { user: doc, newAttribute: true }
-```
-
-For every user document, an object with two attributes is returned. The value
-of the attribute *user* is set to the content of the user document, and
-*newAttribute* is a static attribute with the boolean value *true*.
-
-Operations like **FILTER**, **SORT** and **LIMIT** can be added to the loop body
-to narrow and order the result. Instead of above shown call to `DOCUMENT()`,
-one can also retrieve the document that describes user *phil* like so:
-
-```js
-FOR doc IN users
- FILTER doc._key == "phil"
- RETURN doc
-```
-
-The document key is used in this example, but any other attribute could equally
-be used for filtering. Since the document key is guaranteed to be unique, no
-more than a single document will match this filter. For other attributes this
-may not be the case. To return a subset of active users (determined by an
-attribute called *status*), sorted by name in ascending order, you can do:
-
-```js
-FOR doc IN users
- FILTER doc.status == "active"
- SORT doc.name
- LIMIT 10
-```
-
-Note that operations do not have to occur in a fixed order and that their order
-can influence the result significantly. Limiting the number of documents
-before a filter is usually not what you want, because it easily misses a lot
-of documents that would fulfill the filter criterion, but are ignored because
-of a premature *LIMIT* clause. Because of the aforementioned reasons, *LIMIT*
-is usually put at the very end, after *FILTER*, *SORT* and other operations.
-
-See the [High Level Operations](Operations/README.md) chapter for more details.
-
-Data Modification Queries
--------------------------
-
-AQL supports the following data-modification operations:
-
-- **INSERT**: insert new documents into a collection
-- **UPDATE**: partially update existing documents in a collection
-- **REPLACE**: completely replace existing documents in a collection
-- **REMOVE**: remove existing documents from a collection
-- **UPSERT**: conditionally insert or update documents in a collection
-
-Below you find some simple example queries that use these operations.
-The operations are detailed in the chapter [High Level Operations](Operations/README.md).
-
-
-### Modifying a single document
-
-Let's start with the basics: `INSERT`, `UPDATE` and `REMOVE` operations on single documents.
-Here is an example that insert a document in an existing collection *users*:
-
-```js
-INSERT {
- firstName: "Anna",
- name: "Pavlova",
- profession: "artist"
-} IN users
-```
-
-You may provide a key for the new document; if not provided, ArangoDB will create one for you.
-
-```js
-INSERT {
- _key: "GilbertoGil",
- firstName: "Gilberto",
- name: "Gil",
- city: "Fortalezza"
-} IN users
-```
-
-As ArangoDB is schema-free, attributes of the documents may vary:
-
-```js
-INSERT {
- _key: "PhilCarpenter",
- firstName: "Phil",
- name: "Carpenter",
- middleName: "G.",
- status: "inactive"
-} IN users
-```
-
-```js
-INSERT {
- _key: "NatachaDeclerck",
- firstName: "Natacha",
- name: "Declerck",
- location: "Antwerp"
-} IN users
-```
-
-Update is quite simple. The following AQL statement will add or change the attributes status and location
-
-```js
-UPDATE "PhilCarpenter" WITH {
- status: "active",
- location: "Beijing"
-} IN users
-```
-
-Replace is an alternative to update where all attributes of the document are replaced.
-
-```js
-REPLACE {
- _key: "NatachaDeclerck",
- firstName: "Natacha",
- name: "Leclerc",
- status: "active",
- level: "premium"
-} IN users
-```
-
-Removing a document if you know its key is simple as well :
-
-```js
-REMOVE "GilbertoGil" IN users
-```
-
-or
-
-```js
-REMOVE { _key: "GilbertoGil" } IN users
-```
-
-### Modifying multiple documents
-
-Data-modification operations are normally combined with *FOR* loops to
-iterate over a given list of documents. They can optionally be combined with
-*FILTER* statements and the like.
-
-Let's start with an example that modifies existing documents in a collection
-*users* that match some condition:
-
-```js
-FOR u IN users
- FILTER u.status == "not active"
- UPDATE u WITH { status: "inactive" } IN users
-```
-
-
-Now, let's copy the contents of the collection *users* into the collection
-*backup*:
-
-```js
-FOR u IN users
- INSERT u IN backup
-```
-
-Subsequently, let's find some documents in collection *users* and remove them
-from collection *backup*. The link between the documents in both collections is
-established via the documents' keys:
-
-```js
-FOR u IN users
- FILTER u.status == "deleted"
- REMOVE u IN backup
-```
-
-The following example will remove all documents from both *users* and *backup*:
-
-```js
-LET r1 = (FOR u IN users REMOVE u IN users)
-LET r2 = (FOR u IN backup REMOVE u IN backup)
-RETURN true
-```
-
-### Returning documents
-
-Data-modification queries can optionally return documents. In order to reference
-the inserted, removed or modified documents in a `RETURN` statement, data-modification
-statements introduce the `OLD` and/or `NEW` pseudo-values:
-
-```js
-FOR i IN 1..100
- INSERT { value: i } IN test
- RETURN NEW
-```
-
-```js
-FOR u IN users
- FILTER u.status == "deleted"
- REMOVE u IN users
- RETURN OLD
-```
-
-```js
-FOR u IN users
- FILTER u.status == "not active"
- UPDATE u WITH { status: "inactive" } IN users
- RETURN NEW
-```
-
-`NEW` refers to the inserted or modified document revision, and `OLD` refers
-to the document revision before update or removal. `INSERT` statements can
-only refer to the `NEW` pseudo-value, and `REMOVE` operations only to `OLD`.
-`UPDATE`, `REPLACE` and `UPSERT` can refer to either.
-
-In all cases the full documents will be returned with all their attributes,
-including the potentially auto-generated attributes such as `_id`, `_key`, or `_rev`
-and the attributes not specified in the update expression of a partial update.
-
-#### Projections
-
-It is possible to return a projection of the documents in `OLD` or `NEW` instead of
-returning the entire documents. This can be used to reduce the amount of data returned
-by queries.
-
-For example, the following query will return only the keys of the inserted documents:
-
-```js
-FOR i IN 1..100
- INSERT { value: i } IN test
- RETURN NEW._key
-```
-
-#### Using OLD and NEW in the same query
-
-For `UPDATE`, `REPLACE` and `UPSERT` statements, both `OLD` and `NEW` can be used
-to return the previous revision of a document together with the updated revision:
-
-```js
-FOR u IN users
- FILTER u.status == "not active"
- UPDATE u WITH { status: "inactive" } IN users
- RETURN { old: OLD, new: NEW }
-```
-
-#### Calculations with OLD or NEW
-
-It is also possible to run additional calculations with `LET` statements between the
-data-modification part and the final `RETURN` of an AQL query. For example, the following
-query performs an upsert operation and returns whether an existing document was
-updated, or a new document was inserted. It does so by checking the `OLD` variable
-after the `UPSERT` and using a `LET` statement to store a temporary string for
-the operation type:
-
-```js
-UPSERT { name: "test" }
- INSERT { name: "test" }
- UPDATE { } IN users
-LET opType = IS_NULL(OLD) ? "insert" : "update"
-RETURN { _key: NEW._key, type: opType }
-```
-
-### Restrictions
-
-The name of the modified collection (*users* and *backup* in the above cases)
-must be known to the AQL executor at query-compile time and cannot change at
-runtime. Using a bind parameter to specify the
-[collection name](../Manual/Appendix/Glossary.html#collection-name) is allowed.
-
-It is not possible to use multiple data-modification operations for the same
-collection in the same query, or follow up a data-modification operation for a
-specific collection with a read operation for the same collection. Neither is
-it possible to follow up any data-modification operation with a traversal query
-(which may read from arbitrary collections not necessarily known at the start of
-the traversal).
-
-That means you may not place several `REMOVE` or `UPDATE` statements for the same
-collection into the same query. It is however possible to modify different collections
-by using multiple data-modification operations for different collections in the
-same query.
-In case you have a query with several places that need to remove documents from the
-same collection, it is recommended to collect these documents or their keys in an array
-and have the documents from that array removed using a single `REMOVE` operation.
-
-Data-modification operations can optionally be followed by `LET` operations to
-perform further calculations and a `RETURN` operation to return data.
-
-
-### Transactional Execution
-
-On a single server, data-modification operations are executed transactionally.
-If a data-modification operation fails, any changes made by it will be rolled
-back automatically as if they never happened.
-
-If the RocksDB engine is used and intermediate commits are enabled, a query may
-execute intermediate transaction commits in case the running transaction (AQL
-query) hits the specified size thresholds. In this case, the query's operations
-carried out so far will be committed and not rolled back in case of a later abort/rollback.
-That behavior can be controlled by adjusting the intermediate commit settings for
-the RocksDB engine.
-
-In a cluster, AQL data-modification queries are currently not executed transactionally.
-Additionally, *update*, *replace*, *upsert* and *remove* AQL queries currently
-require the *_key* attribute to be specified for all documents that should be
-modified or removed, even if a shared key attribute other than *_key* was chosen
-for the collection. This restriction may be overcome in a future release of ArangoDB.
diff --git a/Documentation/Books/AQL/Examples/CombiningGraphTraversals.md b/Documentation/Books/AQL/Examples/CombiningGraphTraversals.md
deleted file mode 100644
index 68d0690441ab..000000000000
--- a/Documentation/Books/AQL/Examples/CombiningGraphTraversals.md
+++ /dev/null
@@ -1,99 +0,0 @@
-Combining Graph Traversals
-==========================
-
-Finding the start vertex via a geo query
-----------------------------------------
-
-Our first example will locate the start vertex for a graph traversal via [a geo index](../../Manual/Indexing/Geo.html).
-We use [the city graph](../../Manual/Graphs/index.html#the-city-graph) and its geo indices:
-
-![Cities Example Graph](../../Manual/Graphs/cities_graph.png)
-
- @startDocuBlockInline COMBINING_GRAPH_01_create_graph
- @EXAMPLE_ARANGOSH_OUTPUT{COMBINING_GRAPH_01_create_graph}
- var examples = require("@arangodb/graph-examples/example-graph.js");
- var g = examples.loadGraph("routeplanner");
- ~examples.dropGraph("routeplanner");
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock COMBINING_GRAPH_01_create_graph
-
-We search all german cities in a range of 400 km around the ex-capital **Bonn**: **Hamburg** and **Cologne**.
-We won't find **Paris** since its in the `frenchCity` collection.
-
- @startDocuBlockInline COMBINING_GRAPH_02_show_geo
- @EXAMPLE_AQL{COMBINING_GRAPH_02_show_geo}
- @DATASET{routeplanner}
- FOR startCity IN germanCity
- FILTER GEO_DISTANCE(@bonn, startCity.geometry) < @radius
- RETURN startCity._key
- @BV {
- bonn: [7.0998, 50.7340],
- radius: 400000
- }
- @END_EXAMPLE_AQL
- @endDocuBlock COMBINING_GRAPH_02_show_geo
-
-Lets revalidate that the geo indices are actually used:
-
- @startDocuBlockInline COMBINING_GRAPH_03_explain_geo
- @EXAMPLE_AQL{COMBINING_GRAPH_03_explain_geo}
- @DATASET{routeplanner}
- @EXPLAIN{TRUE}
- FOR startCity IN germanCity
- FILTER GEO_DISTANCE(@bonn, startCity.geometry) < @radius
- RETURN startCity._key
- @BV {
- bonn: [7.0998, 50.7340],
- radius: 400000
- }
- @END_EXAMPLE_AQL
- @endDocuBlock COMBINING_GRAPH_03_explain_geo
-
-And now combine this with a graph traversal:
-
- @startDocuBlockInline COMBINING_GRAPH_04_combine
- @EXAMPLE_AQL{COMBINING_GRAPH_04_combine}
- @DATASET{routeplanner}
- FOR startCity IN germanCity
- FILTER GEO_DISTANCE(@bonn, startCity.geometry) < @radius
- FOR v, e, p IN 1..1 OUTBOUND startCity
- GRAPH 'routeplanner'
- RETURN {startcity: startCity._key, traversedCity: v._key}
- @BV {
- bonn: [7.0998, 50.7340],
- radius: 400000
- }
- @END_EXAMPLE_AQL
- @endDocuBlock COMBINING_GRAPH_04_combine
-
-The geo index query returns us `startCity` (**Cologne** and **Hamburg**) which we then use as starting point for our graph traversal.
-For simplicity we only return their direct neighbours. We format the return result so we can see from which `startCity` the traversal came.
-
-Alternatively we could use a `LET` statement with a subquery to group the traversals by their `startCity` efficiently:
-
- @startDocuBlockInline COMBINING_GRAPH_05_combine_let
- @EXAMPLE_AQL{COMBINING_GRAPH_05_combine_let}
- @DATASET{routeplanner}
- FOR startCity IN germanCity
- FILTER GEO_DISTANCE(@bonn, startCity.geometry) < @radius
- LET oneCity = (
- FOR v, e, p IN 1..1 OUTBOUND startCity
- GRAPH 'routeplanner' RETURN v._key
- )
- RETURN {startCity: startCity._key, connectedCities: oneCity}
- @BV {
- bonn: [7.0998, 50.7340],
- radius: 400000
- }
- @END_EXAMPLE_AQL
- @endDocuBlock COMBINING_GRAPH_05_combine_let
-
-Finally, we clean up again:
-
- @startDocuBlockInline COMBINING_GRAPH_06_cleanup
- @EXAMPLE_ARANGOSH_OUTPUT{COMBINING_GRAPH_06_cleanup}
- ~var examples = require("@arangodb/graph-examples/example-graph.js");
- ~var g = examples.loadGraph("routeplanner");
- examples.dropGraph("routeplanner");
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock COMBINING_GRAPH_06_cleanup
diff --git a/Documentation/Books/AQL/Examples/CombiningQueries.md b/Documentation/Books/AQL/Examples/CombiningQueries.md
deleted file mode 100644
index ff0dec8beda0..000000000000
--- a/Documentation/Books/AQL/Examples/CombiningQueries.md
+++ /dev/null
@@ -1,60 +0,0 @@
-Combining queries
-=================
-
-Subqueries
-----------
-
-Wherever an expression is allowed in AQL, a subquery can be placed. A subquery
-is a query part that can introduce its own local variables without affecting
-variables and values in its outer scope(s).
-
-It is required that subqueries be put inside parentheses *(* and *)* to
-explicitly mark their start and end points:
-
-```js
-FOR p IN persons
- LET recommendations = (
- FOR r IN recommendations
- FILTER p.id == r.personId
- SORT p.rank DESC
- LIMIT 10
- RETURN r
- )
- RETURN { person : p, recommendations : recommendations }
-```
-
-```js
-FOR p IN persons
- COLLECT city = p.city INTO g
- RETURN {
- city : city,
- numPersons : LENGTH(g),
- maxRating: MAX(
- FOR r IN g
- RETURN r.p.rating
- )}
-```
-
-Subqueries may also include other subqueries.
-
-Note that subqueries always return a result **array**, even if there is only
-a single return value:
-
-```js
-RETURN ( RETURN 1 )
-```
-
-```json
-[ [ 1 ] ]
-```
-
-To avoid such a nested data structure, [FIRST()](../Functions/Array.md#first)
-can be used for example:
-
-```js
-RETURN FIRST( RETURN 1 )
-```
-
-```json
-[ 1 ]
-```
diff --git a/Documentation/Books/AQL/Examples/Counting.md b/Documentation/Books/AQL/Examples/Counting.md
deleted file mode 100644
index 76aa9d7f6130..000000000000
--- a/Documentation/Books/AQL/Examples/Counting.md
+++ /dev/null
@@ -1,25 +0,0 @@
-Counting
-========
-
-Amount of documents in a collection
------------------------------------
-
-To return the count of documents that currently exist in a collection,
-you can call the [LENGTH() function](../Functions/Array.md#length):
-
-```
-RETURN LENGTH(collection)
-```
-
-This type of call is optimized since 2.8 (no unnecessary intermediate result
-is built up in memory) and it is therefore the preferred way to determine the count.
-Internally, [COLLECTION_COUNT()](../Functions/Miscellaneous.md#collectioncount) is called.
-
-In earlier versions with `COLLECT ... WITH COUNT INTO` available (since 2.4),
-you may use the following code instead of *LENGTH()* for better performance:
-
-```
-FOR doc IN collection
- COLLECT WITH COUNT INTO length
- RETURN length
-```
diff --git a/Documentation/Books/AQL/Examples/DataModificationQueries.md b/Documentation/Books/AQL/Examples/DataModificationQueries.md
deleted file mode 100644
index 2c03440484cb..000000000000
--- a/Documentation/Books/AQL/Examples/DataModificationQueries.md
+++ /dev/null
@@ -1,267 +0,0 @@
-Data-modification queries
-=========================
-
-The following operations can be used to modify data of multiple documents
-with one query. This is superior to fetching and updating the documents individually
-with multiple queries. However, if only a single document needs to be modified,
-ArangoDB's specialized data-modification operations for single documents
-might execute faster.
-
-Updating documents
-------------------
-
-To update existing documents, we can either use the *UPDATE* or the *REPLACE*
-operation. *UPDATE* updates only the specified attributes in the found documents,
-and *REPLACE* completely replaces the found documents with the specified values.
-
-We'll start with an *UPDATE* query that rewrites the gender attribute in all
-documents:
-
-```js
-FOR u IN users
- UPDATE u WITH { gender: TRANSLATE(u.gender, { m: 'male', f: 'female' }) } IN users
-```
-
-To add new attributes to existing documents, we can also use an *UPDATE* query.
-The following query adds an attribute *numberOfLogins* for all users with status
-active:
-
-```js
-FOR u IN users
- FILTER u.active == true
- UPDATE u WITH { numberOfLogins: 0 } IN users
-```
-
-Existing attributes can also be updated based on their previous value:
-
-```js
-FOR u IN users
- FILTER u.active == true
- UPDATE u WITH { numberOfLogins: u.numberOfLogins + 1 } IN users
-```
-
-The above query will only work if there was already a *numberOfLogins* attribute
-present in the document. If it is unsure whether there is a *numberOfLogins*
-attribute in the document, the increase must be made conditional:
-
-```js
-FOR u IN users
- FILTER u.active == true
- UPDATE u WITH {
- numberOfLogins: HAS(u, 'numberOfLogins') ? u.numberOfLogins + 1 : 1
- } IN users
-```
-
-Updates of multiple attributes can be combined in a single query:
-
-```js
-FOR u IN users
- FILTER u.active == true
- UPDATE u WITH {
- lastLogin: DATE_NOW(),
- numberOfLogins: HAS(u, 'numberOfLogins') ? u.numberOfLogins + 1 : 1
- } IN users
-```
-
-Note than an update query might fail during execution, for example because a
-document to be updated does not exist. In this case, the query will abort at
-the first error. In single-server mode, all modifications done by the query will
-be rolled back as if they never happened.
-
-
-Replacing documents
--------------------
-
-To not just partially update, but completely replace existing documents, use
-the *REPLACE* operation.
-The following query replaces all documents in the collection backup with
-the documents found in collection users. Documents common to both
-collections will be replaced. All other documents will remain unchanged.
-Documents are compared using their *_key* attributes:
-
-```js
-FOR u IN users
- REPLACE u IN backup
-```
-
-The above query will fail if there are documents in collection users that are
-not in collection backup yet. In this case, the query would attempt to replace
-documents that do not exist. If such case is detected while executing the query,
-the query will abort. In single-server mode, all changes made by the query will
-also be rolled back.
-
-To make the query succeed for such case, use the *ignoreErrors* query option:
-
-```js
-FOR u IN users
- REPLACE u IN backup OPTIONS { ignoreErrors: true }
-```
-
-
-Removing documents
-------------------
-
-Deleting documents can be achieved with the *REMOVE* operation.
-To remove all users within a certain age range, we can use the following query:
-
-```js
-FOR u IN users
- FILTER u.active == true && u.age >= 35 && u.age <= 37
- REMOVE u IN users
-```
-
-
-Creating documents
-------------------
-
-To create new documents, there is the *INSERT* operation.
-It can also be used to generate copies of existing documents from other collections,
-or to create synthetic documents (e.g. for testing purposes). The following
-query creates 1000 test users in collection users with some attributes set:
-
-```js
-FOR i IN 1..1000
- INSERT {
- id: 100000 + i,
- age: 18 + FLOOR(RAND() * 25),
- name: CONCAT('test', TO_STRING(i)),
- active: false,
- gender: i % 2 == 0 ? 'male' : 'female'
- } IN users
-```
-
-
-Copying data from one collection into another
----------------------------------------------
-
-To copy data from one collection into another, an *INSERT* operation can be
-used:
-
-```js
-FOR u IN users
- INSERT u IN backup
-```
-
-This will copy over all documents from collection users into collection
-backup. Note that both collections must already exist when the query is
-executed. The query might fail if backup already contains documents, as
-executing the insert might attempt to insert the same document (identified
-by *_key* attribute) again. This will trigger a unique key constraint violation
-and abort the query. In single-server mode, all changes made by the query
-will also be rolled back.
-To make such copy operation work in all cases, the target collection can
-be emptied before, using a *REMOVE* query.
-
-
-Handling errors
----------------
-
-In some cases it might be desirable to continue execution of a query even in
-the face of errors (e.g. "document not found"). To continue execution of a
-query in case of errors, there is the *ignoreErrors* option.
-
-To use it, place an *OPTIONS* keyword directly after the data modification
-part of the query, e.g.
-
-```js
-FOR u IN users
- REPLACE u IN backup OPTIONS { ignoreErrors: true }
-```
-
-This will continue execution of the query even if errors occur during the
-*REPLACE* operation. It works similar for *UPDATE*, *INSERT*, and *REMOVE*.
-
-
-Altering substructures
-----------------------
-
-To modify lists in documents we have to work with temporary variables.
-We will collect the sublist in there and alter it. We choose a simple
-boolean filter condition to make the query better comprehensible.
-
-First lets create a collection with a sample:
-
-```js
-database = db._create('complexCollection')
-database.save({
- "topLevelAttribute" : "a",
- "subList" : [
- {
- "attributeToAlter" : "oldValue",
- "filterByMe" : true
- },
- {
- "attributeToAlter" : "moreOldValues",
- "filterByMe" : true
- },
- {
- "attributeToAlter" : "unchangedValue",
- "filterByMe" : false
- }
- ]
-})
-```
-
-Heres the Query which keeps the *subList* on *alteredList* to update it later:
-
-```js
-FOR document in complexCollection
- LET alteredList = (
- FOR element IN document.subList
- LET newItem = (! element.filterByMe ?
- element :
- MERGE(element, { attributeToAlter: "shiny New Value" }))
- RETURN newItem)
- UPDATE document WITH { subList: alteredList } IN complexCollection
-```
-
-While the query as it is is now functional:
-
-```js
-db.complexCollection.toArray()
-[
- {
- "_id" : "complexCollection/392671569467",
- "_key" : "392671569467",
- "_rev" : "392799430203",
- "topLevelAttribute" : "a",
- "subList" : [
- {
- "filterByMe" : true,
- "attributeToAlter" : "shiny New Value"
- },
- {
- "filterByMe" : true,
- "attributeToAlter" : "shiny New Value"
- },
- {
- "filterByMe" : false,
- "attributeToAlter" : "unchangedValue"
- }
- ]
- }
-]
-```
-
-It will probably be soonish a performance bottleneck, since it **modifies**
-all documents in the collection **regardless whether the values change or not**.
-Therefore we want to only *UPDATE* the documents if we really change their value.
-Hence we employ a second *FOR* to test whether *subList* will be altered or not:
-
-```js
-FOR document in complexCollection
- LET willUpdateDocument = (
- FOR element IN docToAlter.subList
- FILTER element.filterByMe LIMIT 1 RETURN 1)
-
- FILTER LENGTH(willUpdateDocument) > 0
-
- LET alteredList = (
- FOR element IN document.subList
- LET newItem = (! element.filterByMe ?
- element :
- MERGE(element, { attributeToAlter: "shiny New Value" }))
- RETURN newItem)
-
- UPDATE document WITH { subList: alteredList } IN complexCollection
-```
diff --git a/Documentation/Books/AQL/Examples/Grouping.md b/Documentation/Books/AQL/Examples/Grouping.md
deleted file mode 100644
index a42213ad5084..000000000000
--- a/Documentation/Books/AQL/Examples/Grouping.md
+++ /dev/null
@@ -1,324 +0,0 @@
-Grouping
-========
-
-To group results by arbitrary criteria, AQL provides the *COLLECT* keyword.
-*COLLECT* will perform a grouping, but no aggregation. Aggregation can still be
-added in the query if required.
-
-Ensuring uniqueness
--------------------
-
-*COLLECT* can be used to make a result set unique. The following query will return each distinct
-`age` attribute value only once:
-
-```js
-FOR u IN users
- COLLECT age = u.age
- RETURN age
-```
-
-This is grouping without tracking the group values, but just the group criterion (*age*) value.
-
-Grouping can also be done on multiple levels using *COLLECT*:
-
-```js
-FOR u IN users
- COLLECT status = u.status, age = u.age
- RETURN { status, age }
-```
-
-
-Alternatively *RETURN DISTINCT* can be used to make a result set unique. *RETURN DISTINCT* supports a
-single criterion only:
-
-```js
-FOR u IN users
- RETURN DISTINCT u.age
-```
-
-Note: the order of results is undefined for *RETURN DISTINCT*.
-
-Fetching group values
----------------------
-
-To group users by age, and return the names of the users with the highest ages,
-we'll issue a query like this:
-
-```js
-FOR u IN users
- FILTER u.active == true
- COLLECT age = u.age INTO usersByAge
- SORT age DESC LIMIT 0, 5
- RETURN {
- age,
- users: usersByAge[*].u.name
- }
-```
-
-```json
-[
- { "age": 37, "users": [ "John", "Sophia" ] },
- { "age": 36, "users": [ "Fred", "Emma" ] },
- { "age": 34, "users": [ "Madison" ] },
- { "age": 33, "users": [ "Chloe", "Michael" ] },
- { "age": 32, "users": [ "Alexander" ] }
-]
-```
-
-The query will put all users together by their *age* attribute. There will be one
-result document per distinct *age* value (let aside the *LIMIT*). For each group,
-we have access to the matching document via the *usersByAge* variable introduced in
-the *COLLECT* statement.
-
-Variable Expansion
-------------------
-
-The *usersByAge* variable contains the full documents found, and as we're only
-interested in user names, we'll use the expansion operator [\*] to extract just the
-*name* attribute of all user documents in each group:
-
-```js
-usersByAge[*].u.name
-```
-
-The [\*] expansion operator is just a handy short-cut. We could also write
-a subquery:
-
-```js
-( FOR temp IN usersByAge RETURN temp.u.name )
-```
-
-Grouping by multiple criteria
------------------------------
-
-To group by multiple criteria, we'll use multiple arguments in the *COLLECT* clause.
-For example, to group users by *ageGroup* (a derived value we need to calculate first)
-and then by *gender*, we'll do:
-
-```js
-FOR u IN users
- FILTER u.active == true
- COLLECT ageGroup = FLOOR(u.age / 5) * 5,
- gender = u.gender INTO group
- SORT ageGroup DESC
- RETURN {
- ageGroup,
- gender
- }
-```
-
-```json
-[
- { "ageGroup": 35, "gender": "f" },
- { "ageGroup": 35, "gender": "m" },
- { "ageGroup": 30, "gender": "f" },
- { "ageGroup": 30, "gender": "m" },
- { "ageGroup": 25, "gender": "f" },
- { "ageGroup": 25, "gender": "m" }
-]
-```
-
-Counting group values
----------------------
-
-If the goal is to count the number of values in each group, AQL provides the special
-*COLLECT WITH COUNT INTO* syntax. This is a simple variant for grouping with an additional
-group length calculation:
-
-```js
-FOR u IN users
- FILTER u.active == true
- COLLECT ageGroup = FLOOR(u.age / 5) * 5,
- gender = u.gender WITH COUNT INTO numUsers
- SORT ageGroup DESC
- RETURN {
- ageGroup,
- gender,
- numUsers
- }
-```
-
-```json
-[
- { "ageGroup": 35, "gender": "f", "numUsers": 2 },
- { "ageGroup": 35, "gender": "m", "numUsers": 2 },
- { "ageGroup": 30, "gender": "f", "numUsers": 4 },
- { "ageGroup": 30, "gender": "m", "numUsers": 4 },
- { "ageGroup": 25, "gender": "f", "numUsers": 2 },
- { "ageGroup": 25, "gender": "m", "numUsers": 2 }
-]
-```
-
-Aggregation
------------
-
-Adding further aggregation is also simple in AQL by using an *AGGREGATE* clause
-in the *COLLECT*:
-
-```js
-FOR u IN users
- FILTER u.active == true
- COLLECT ageGroup = FLOOR(u.age / 5) * 5,
- gender = u.gender
- AGGREGATE numUsers = LENGTH(1),
- minAge = MIN(u.age),
- maxAge = MAX(u.age)
- SORT ageGroup DESC
- RETURN {
- ageGroup,
- gender,
- numUsers,
- minAge,
- maxAge
- }
-```
-
-```json
-[
- {
- "ageGroup": 35,
- "gender": "f",
- "numUsers": 2,
- "minAge": 36,
- "maxAge": 39,
- },
- {
- "ageGroup": 35,
- "gender": "m",
- "numUsers": 2,
- "minAge": 35,
- "maxAge": 39,
- },
- ...
-]
-```
-
-We have used the aggregate functions *LENGTH* here (it returns the length of an array).
-This is the equivalent to SQL's `SELECT g, COUNT(*) FROM ... GROUP BY g`. In addition to
-*LENGTH*, AQL also provides *MAX*, *MIN*, *SUM* and *AVERAGE*, *VARIANCE_POPULATION*,
-*VARIANCE_SAMPLE*, *STDDEV_POPULATION*, *STDDEV_SAMPLE*, *UNIQUE*, *SORTED_UNIQUE* and
-*COUNT_UNIQUE* as basic aggregation functions.
-
-In AQL all aggregation functions can be run on arrays only. If an aggregation function
-is run on anything that is not an array, a warning will be produced and the result will
-be *null*.
-
-Using an *AGGREGATE* clause will ensure the aggregation is run while the groups are built
-in the collect operation. This is normally more efficient than collecting all group values
-for all groups and then doing a post-aggregation.
-
-
-Post-aggregation
-----------------
-
-Aggregation can also be performed after a *COLLECT* operation using other AQL constructs,
-though performance-wise this is often inferior to using *COLLECT* with *AGGREGATE*.
-
-The same query as before can be turned into a post-aggregation query as shown below. Note
-that this query will build and pass on all group values for all groups inside the variable
-*g*, and perform the aggregation at the latest possible stage:
-
-```js
-FOR u IN users
- FILTER u.active == true
- COLLECT ageGroup = FLOOR(u.age / 5) * 5,
- gender = u.gender INTO g
- SORT ageGroup DESC
- RETURN {
- ageGroup,
- gender,
- numUsers: LENGTH(g[*]),
- minAge: MIN(g[*].u.age),
- maxAge: MAX(g[*].u.age)
- }
-```
-
-```json
-[
- {
- "ageGroup": 35,
- "gender": "f",
- "numUsers": 2,
- "minAge": 36,
- "maxAge": 39,
- },
- {
- "ageGroup": 35,
- "gender": "m",
- "numUsers": 2,
- "minAge": 35,
- "maxAge": 39,
- },
- ...
-]
-```
-
-This is in constrast to the previous query that used an *AGGREGATE* clause to perform
-the aggregation during the collect operation, at the earliest possible stage.
-
-
-Post-filtering aggregated data
-------------------------------
-
-To filter the results of a grouping or aggregation operation (i.e. something
-similar to *HAVING* in SQL), simply add another *FILTER* clause after the *COLLECT*
-statement.
-
-For example, to get the 3 *ageGroup*s with the most users in them:
-
-```js
-FOR u IN users
- FILTER u.active == true
- COLLECT ageGroup = FLOOR(u.age / 5) * 5 INTO group
- LET numUsers = LENGTH(group)
- FILTER numUsers > 2 /* group must contain at least 3 users in order to qualify */
- SORT numUsers DESC
- LIMIT 0, 3
- RETURN {
- "ageGroup": ageGroup,
- "numUsers": numUsers,
- "users": group[*].u.name
- }
-```
-
-```json
-[
- {
- "ageGroup": 30,
- "numUsers": 8,
- "users": [
- "Abigail",
- "Madison",
- "Anthony",
- "Alexander",
- "Isabella",
- "Chloe",
- "Daniel",
- "Michael"
- ]
- },
- {
- "ageGroup": 25,
- "numUsers": 4,
- "users": [
- "Mary",
- "Mariah",
- "Jim",
- "Diego"
- ]
- },
- {
- "ageGroup": 35,
- "numUsers": 4,
- "users": [
- "Fred",
- "John",
- "Emma",
- "Sophia"
- ]
- }
-]
-```
-
-To increase readability, the repeated expression *LENGTH(group)* was put into a variable
-*numUsers*. The *FILTER* on *numUsers* is the equivalent an SQL *HAVING* clause.
diff --git a/Documentation/Books/AQL/Examples/Join.md b/Documentation/Books/AQL/Examples/Join.md
deleted file mode 100644
index 20dc8b9c3713..000000000000
--- a/Documentation/Books/AQL/Examples/Join.md
+++ /dev/null
@@ -1,246 +0,0 @@
-Joins
-=====
-
-So far we have only dealt with one collection (*users*) at a time. We also have a
-collection *relations* that stores relationships between users. We will now use
-this extra collection to create a result from two collections.
-
-First of all, we'll query a few users together with their friends' ids. For that,
-we'll use all *relations* that have a value of *friend* in their *type* attribute.
-Relationships are established by using the *friendOf* and *thisUser* attributes in the
-*relations* collection, which point to the *userId* values in the *users* collection.
-
-Join tuples
------------
-
-We'll start with a SQL-ish result set and return each tuple (user name, friends userId)
-separately. The AQL query to generate such result is:
-
-
- @startDocuBlockInline joinTuples
- @EXAMPLE_AQL{joinTuples}
- @DATASET{joinSampleDataset}
- FOR u IN users
- FILTER u.active == true
- LIMIT 0, 4
- FOR f IN relations
- FILTER f.type == @friend && f.friendOf == u.userId
- RETURN {
- "user" : u.name,
- "friendId" : f.thisUser
- }
- @BV {
- friend: "friend"
- }
- @END_EXAMPLE_AQL
- @endDocuBlock joinTuples
-
-We iterate over the collection users. Only the 'active' users will be examined.
-For each of these users we will search for up to 4 friends. We locate friends
-by comparing the *userId* of our current user with the *friendOf* attribute of the
-*relations* document. For each of those relations found we return the users name
-and the userId of the friend.
-
-
-Horizontal lists
-----------------
-
-
-Note that in the above result, a user can be returned multiple times. This is the
-SQL way of returning data. If this is not desired, the friends' ids of each user
-can be returned in a horizontal list. This will return each user at most once.
-
-The AQL query for doing so is:
-
-```js
-FOR u IN users
- FILTER u.active == true LIMIT 0, 4
- RETURN {
- "user" : u.name,
- "friendIds" : (
- FOR f IN relations
- FILTER f.friendOf == u.userId && f.type == "friend"
- RETURN f.thisUser
- )
- }
-```
-
-```json
-[
- {
- "user" : "Abigail",
- "friendIds" : [
- 108,
- 102,
- 106
- ]
- },
- {
- "user" : "Fred",
- "friendIds" : [
- 209
- ]
- },
- {
- "user" : "Mary",
- "friendIds" : [
- 207,
- 104
- ]
- },
- {
- "user" : "Mariah",
- "friendIds" : [
- 203,
- 205
- ]
- }
-]
-```
-
-In this query we are still iterating over the users in the *users* collection
-and for each matching user we are executing a subquery to create the matching
-list of related users.
-
-Self joins
-----------
-
-To not only return friend ids but also the names of friends, we could "join" the
-*users* collection once more (something like a "self join"):
-
-```js
-FOR u IN users
- FILTER u.active == true
- LIMIT 0, 4
- RETURN {
- "user" : u.name,
- "friendIds" : (
- FOR f IN relations
- FILTER f.friendOf == u.userId && f.type == "friend"
- FOR u2 IN users
- FILTER f.thisUser == u2.useId
- RETURN u2.name
- )
- }
-```
-
-```json
-[
- {
- "user" : "Abigail",
- "friendIds" : [
- "Jim",
- "Jacob",
- "Daniel"
- ]
- },
- {
- "user" : "Fred",
- "friendIds" : [
- "Mariah"
- ]
- },
- {
- "user" : "Mary",
- "friendIds" : [
- "Isabella",
- "Michael"
- ]
- },
- {
- "user" : "Mariah",
- "friendIds" : [
- "Madison",
- "Eva"
- ]
- }
-]
-```
-
-This query will then again in term fetch the clear text name of the
-friend from the users collection. So here we iterate the users collection,
-and for each hit the relations collection, and for each hit once more the
-users collection.
-
-Outer joins
------------
-
-Lets find the lonely people in our database - those without friends.
-
-```js
-
-FOR user IN users
- LET friendList = (
- FOR f IN relations
- FILTER f.friendOf == u.userId
- RETURN 1
- )
- FILTER LENGTH(friendList) == 0
- RETURN { "user" : user.name }
-```
-
-```json
-[
- {
- "user" : "Abigail"
- },
- {
- "user" : "Fred"
- }
-]
-```
-
-So, for each user we pick the list of their friends and count them. The ones where
-count equals zero are the lonely people. Using *RETURN 1* in the subquery
-saves even more precious CPU cycles and gives the optimizer more alternatives.
-
-Index usage
------------
-
-Especially on joins you should [make sure indices can be used to speed up your query.](../ExecutionAndPerformance/ExplainingQueries.md)
-Please note that sparse indices don't qualify for joins:
-
-In joins you typically would also want to join documents not containing the property
-you join with. However sparse indices don't contain references to documents that
-don't contain the indexed attributes - thus they would be missing from the join operation.
-For that reason you should provide non-sparse indices.
-
-Pitfalls
---------
-
-Since we're free of schemata, there is by default no way to tell the format of the
-documents. So, if your documents don't contain an attribute, it defaults to
-null. We can however check our data for accuracy like this:
-
-```js
-RETURN LENGTH(FOR u IN users FILTER u.userId == null RETURN 1)
-```
-
-```json
-[
- 10000
-]
-```
-
-```js
-RETURN LENGTH(FOR f IN relations FILTER f.friendOf == null RETURN 1)
-```
-
-```json
-[
- 10000
-]
-```
-
-So if the above queries return 10k matches each, the result of the Join tuples
-query will become 100,000,000 items larger and use much memory plus computation
-time. So it is generally a good idea to revalidate that the criteria for your
-join conditions exist.
-
-Using indices on the properties can speed up the operation significantly.
-You can use the explain helper to revalidate your query actually uses them.
-
-If you work with joins on edge collections you would typically aggregate over
-the internal fields *_id*, *_from* and *_to* (where *_id* equals *userId*,
-*_from* *friendOf* and *_to* would be *thisUser* in our examples). ArangoDB
-implicitly creates indices on them.
diff --git a/Documentation/Books/AQL/Examples/MultiplePaths.md b/Documentation/Books/AQL/Examples/MultiplePaths.md
deleted file mode 100644
index 18cee4dfcf55..000000000000
--- a/Documentation/Books/AQL/Examples/MultiplePaths.md
+++ /dev/null
@@ -1,43 +0,0 @@
-Multiple Path Search
-====================
-
-The shortest path algorithm can only determine one shortest path.
-For example, if this is the full graph (based on the [mps_graph](../../Manual/Graphs/index.html#the-mps-graph)):
-
-![Example Graph](../../Manual/Graphs/mps_graph.png)
-
-then a shortest path query from **A** to **C** may return the path `A -> B -> C` or `A -> D -> C`, but it's undefined which one (not taking edge weights into account here).
-
-You can use the efficient shortest path algorithm however, to determine the shortest path length:
-
-
- @startDocuBlockInline GRAPHTRAV_multiplePathSearch
- @EXAMPLE_AQL{GRAPHTRAV_multiplePathSearch}
- @DATASET{mps_graph}
- RETURN LENGTH(
- FOR v IN OUTBOUND
- SHORTEST_PATH "mps_verts/A" TO "mps_verts/C" mps_edges
- RETURN v
- )
- @END_EXAMPLE_AQL
- @endDocuBlock GRAPHTRAV_multiplePathSearch
-
-
-The result is 3 for the example graph (includes the start vertex). Now, subtract 1 to get the edge count / traversal depth. You can run a pattern matching traversal to find all paths with this length (or longer ones by increasing the min and max depth). Starting point is **A** again, and a filter on the document ID of v (or p.vertices[-1]) ensures that we only retrieve paths that end at point **C**.
-
-The following query returns all parts with length 2, start vertex **A** and target vertex **C**:
-
-
- @startDocuBlockInline GRAPHTRAV_multiplePathSearch2
- @EXAMPLE_AQL{GRAPHTRAV_multiplePathSearch2}
- @DATASET{mps_graph}
- FOR v, e, p IN 2..2 OUTBOUND "mps_verts/A" mps_edges
- FILTER v._id == "mps_verts/C"
- RETURN CONCAT_SEPARATOR(" -> ", p.vertices[*]._key)
- @END_EXAMPLE_AQL
- @endDocuBlock GRAPHTRAV_multiplePathSearch2
-
-
-A traversal depth of `3..3` would return `A -> E -> F -> C` and `2..3` all three paths.
-
-Note that two separate queries are required to compute the shortest path length and to do the pattern matching based on the shortest path length (minus 1), because min and max depth can't be expressions (they have to be known in advance, so either be number literals or bind parameters).
diff --git a/Documentation/Books/AQL/Examples/ProjectionsAndFilters.md b/Documentation/Books/AQL/Examples/ProjectionsAndFilters.md
deleted file mode 100644
index b6bf2445505e..000000000000
--- a/Documentation/Books/AQL/Examples/ProjectionsAndFilters.md
+++ /dev/null
@@ -1,134 +0,0 @@
-Projections and Filters
-=======================
-
-Returning unaltered documents
------------------------------
-
-To return three complete documents from collection *users*, the following query can be used:
-
-```js
-FOR u IN users
- LIMIT 0, 3
- RETURN u
-```
-
-```json
-[
- {
- "_id" : "users/229886047207520",
- "_rev" : "229886047207520",
- "_key" : "229886047207520",
- "active" : true,
- "id" : 206,
- "age" : 31,
- "gender" : "f",
- "name" : "Abigail"
- },
- {
- "_id" : "users/229886045175904",
- "_rev" : "229886045175904",
- "_key" : "229886045175904",
- "active" : true,
- "id" : 101,
- "age" : 36,
- "name" : "Fred",
- "gender" : "m"
- },
- {
- "_id" : "users/229886047469664",
- "_rev" : "229886047469664",
- "_key" : "229886047469664",
- "active" : true,
- "id" : 208,
- "age" : 29,
- "name" : "Mary",
- "gender" : "f"
- }
-]
-```
-
-Note that there is a *LIMIT* clause but no *SORT* clause. In this case it is not guaranteed
-which of the user documents are returned. Effectively the document return order is unspecified
-if no *SORT* clause is used, and you should not rely on the order in such queries.
-
-Projections
------------
-
-To return a projection from the collection *users* use a modified *RETURN* instruction:
-
-```js
-FOR u IN users
- LIMIT 0, 3
- RETURN {
- "user" : {
- "isActive" : u.active ? "yes" : "no",
- "name" : u.name
- }
- }
-```
-
-```json
-[
- {
- "user" : {
- "isActive" : "yes",
- "name" : "John"
- }
- },
- {
- "user" : {
- "isActive" : "yes",
- "name" : "Anthony"
- }
- },
- {
- "user" : {
- "isActive" : "yes",
- "name" : "Fred"
- }
- }
-]
-```
-
-Filters
--------
-
-To return a filtered projection from collection *users*, you can use the
-*FILTER* keyword. Additionally, a *SORT* clause is used to have the result
-returned in a specific order:
-
-```js
-FOR u IN users
- FILTER u.active == true && u.age >= 30
- SORT u.age DESC
- LIMIT 0, 5
- RETURN {
- "age" : u.age,
- "name" : u.name
- }
-```
-
-```json
-[
- {
- "age" : 37,
- "name" : "Sophia"
- },
- {
- "age" : 37,
- "name" : "John"
- },
- {
- "age" : 36,
- "name" : "Emma"
- },
- {
- "age" : 36,
- "name" : "Fred"
- },
- {
- "age" : 34,
- "name" : "Madison"
- }
-]
-```
diff --git a/Documentation/Books/AQL/Examples/QueriesNoCollections.md b/Documentation/Books/AQL/Examples/QueriesNoCollections.md
deleted file mode 100644
index 02e72fcea2eb..000000000000
--- a/Documentation/Books/AQL/Examples/QueriesNoCollections.md
+++ /dev/null
@@ -1,40 +0,0 @@
-Queries without collections
-===========================
-
-
-Following is a query that returns a string value. The result string is contained in an array
-because the result of every valid query is an array:
-
-```js
-RETURN "this will be returned"
-[
- "this will be returned"
-]
-```
-
-Here is a query that creates the cross products of two arrays and runs a projection
-on it, using a few of AQL's built-in functions:
-
-```js
-FOR year in [ 2011, 2012, 2013 ]
- FOR quarter IN [ 1, 2, 3, 4 ]
- RETURN {
- "y" : "year",
- "q" : quarter,
- "nice" : CONCAT(quarter, "/", year)
- }
-[
- { "y" : "year", "q" : 1, "nice" : "1/2011" },
- { "y" : "year", "q" : 2, "nice" : "2/2011" },
- { "y" : "year", "q" : 3, "nice" : "3/2011" },
- { "y" : "year", "q" : 4, "nice" : "4/2011" },
- { "y" : "year", "q" : 1, "nice" : "1/2012" },
- { "y" : "year", "q" : 2, "nice" : "2/2012" },
- { "y" : "year", "q" : 3, "nice" : "3/2012" },
- { "y" : "year", "q" : 4, "nice" : "4/2012" },
- { "y" : "year", "q" : 1, "nice" : "1/2013" },
- { "y" : "year", "q" : 2, "nice" : "2/2013" },
- { "y" : "year", "q" : 3, "nice" : "3/2013" },
- { "y" : "year", "q" : 4, "nice" : "4/2013" }
-]
-```
diff --git a/Documentation/Books/AQL/Examples/README.md b/Documentation/Books/AQL/Examples/README.md
deleted file mode 100644
index 771070ebfe62..000000000000
--- a/Documentation/Books/AQL/Examples/README.md
+++ /dev/null
@@ -1,113 +0,0 @@
-Usual Query Patterns Examples
-=============================
-
-These pages contain some common query patterns with examples. For better
-understandability the query results are also included directly below each query.
-
-Normally, you would want to run queries on data stored in collections. This section
-will provide several examples for that.
-
-Some of the following example queries are executed on a collection 'users' with the data provided here below.
-
-
-Things to consider when running queries on collections
-------------------------------------------------------
-
-Note that all documents created in any collections will automatically get the
-following server-generated attributes:
-
-- *_id*: A unique id, consisting of [collection name](../../Manual/Appendix/Glossary.html#collection-name)
- and a server-side sequence value
-- *_key*: The server sequence value
-- *_rev*: The document's revision id
-
-Whenever you run queries on the documents in collections, don't be surprised if
-these additional attributes are returned as well.
-
-Please also note that with real-world data, you might want to create additional
-indexes on the data (left out here for brevity). Adding indexes on attributes that are
-used in *FILTER* statements may considerably speed up queries. Furthermore, instead of
-using attributes such as *id*, *from* and *to*, you might want to use the built-in
-*_id*, *_from* and *_to* attributes. Finally, [edge collection](../../Manual/Appendix/Glossary.html#edge-collection)s provide a nice way of
-establishing references / links between documents. These features have been left out here
-for brevity as well.
-
-
-Example data
-------------
-
-Some of the following example queries are executed on a collection *users*
-with the following initial data:
-
-```json
-[
- { "id": 100, "name": "John", "age": 37, "active": true, "gender": "m" },
- { "id": 101, "name": "Fred", "age": 36, "active": true, "gender": "m" },
- { "id": 102, "name": "Jacob", "age": 35, "active": false, "gender": "m" },
- { "id": 103, "name": "Ethan", "age": 34, "active": false, "gender": "m" },
- { "id": 104, "name": "Michael", "age": 33, "active": true, "gender": "m" },
- { "id": 105, "name": "Alexander", "age": 32, "active": true, "gender": "m" },
- { "id": 106, "name": "Daniel", "age": 31, "active": true, "gender": "m" },
- { "id": 107, "name": "Anthony", "age": 30, "active": true, "gender": "m" },
- { "id": 108, "name": "Jim", "age": 29, "active": true, "gender": "m" },
- { "id": 109, "name": "Diego", "age": 28, "active": true, "gender": "m" },
- { "id": 200, "name": "Sophia", "age": 37, "active": true, "gender": "f" },
- { "id": 201, "name": "Emma", "age": 36, "active": true, "gender": "f" },
- { "id": 202, "name": "Olivia", "age": 35, "active": false, "gender": "f" },
- { "id": 203, "name": "Madison", "age": 34, "active": true, "gender": "f" },
- { "id": 204, "name": "Chloe", "age": 33, "active": true, "gender": "f" },
- { "id": 205, "name": "Eva", "age": 32, "active": false, "gender": "f" },
- { "id": 206, "name": "Abigail", "age": 31, "active": true, "gender": "f" },
- { "id": 207, "name": "Isabella", "age": 30, "active": true, "gender": "f" },
- { "id": 208, "name": "Mary", "age": 29, "active": true, "gender": "f" },
- { "id": 209, "name": "Mariah", "age": 28, "active": true, "gender": "f" }
-]
-```
-
-For some of the examples, we'll also use a collection *relations* to store
-relationships between users. The example data for *relations* are as follows:
-
-```json
-[
- { "from": 209, "to": 205, "type": "friend" },
- { "from": 206, "to": 108, "type": "friend" },
- { "from": 202, "to": 204, "type": "friend" },
- { "from": 200, "to": 100, "type": "friend" },
- { "from": 205, "to": 101, "type": "friend" },
- { "from": 209, "to": 203, "type": "friend" },
- { "from": 200, "to": 203, "type": "friend" },
- { "from": 100, "to": 208, "type": "friend" },
- { "from": 101, "to": 209, "type": "friend" },
- { "from": 206, "to": 102, "type": "friend" },
- { "from": 104, "to": 100, "type": "friend" },
- { "from": 104, "to": 108, "type": "friend" },
- { "from": 108, "to": 209, "type": "friend" },
- { "from": 206, "to": 106, "type": "friend" },
- { "from": 204, "to": 105, "type": "friend" },
- { "from": 208, "to": 207, "type": "friend" },
- { "from": 102, "to": 108, "type": "friend" },
- { "from": 207, "to": 203, "type": "friend" },
- { "from": 203, "to": 106, "type": "friend" },
- { "from": 202, "to": 108, "type": "friend" },
- { "from": 201, "to": 203, "type": "friend" },
- { "from": 105, "to": 100, "type": "friend" },
- { "from": 100, "to": 109, "type": "friend" },
- { "from": 207, "to": 109, "type": "friend" },
- { "from": 103, "to": 203, "type": "friend" },
- { "from": 208, "to": 104, "type": "friend" },
- { "from": 105, "to": 104, "type": "friend" },
- { "from": 103, "to": 208, "type": "friend" },
- { "from": 203, "to": 107, "type": "boyfriend" },
- { "from": 107, "to": 203, "type": "girlfriend" },
- { "from": 208, "to": 109, "type": "boyfriend" },
- { "from": 109, "to": 208, "type": "girlfriend" },
- { "from": 106, "to": 205, "type": "girlfriend" },
- { "from": 205, "to": 106, "type": "boyfriend" },
- { "from": 103, "to": 209, "type": "girlfriend" },
- { "from": 209, "to": 103, "type": "boyfriend" },
- { "from": 201, "to": 102, "type": "boyfriend" },
- { "from": 102, "to": 201, "type": "girlfriend" },
- { "from": 206, "to": 100, "type": "boyfriend" },
- { "from": 100, "to": 206, "type": "girlfriend" }
-]
-```
diff --git a/Documentation/Books/AQL/Examples/RemoveVertex.md b/Documentation/Books/AQL/Examples/RemoveVertex.md
deleted file mode 100644
index ea9cc8a197f7..000000000000
--- a/Documentation/Books/AQL/Examples/RemoveVertex.md
+++ /dev/null
@@ -1,70 +0,0 @@
-Remove Vertex
-=============
-
-Deleting vertices with associated edges is currently not handled via AQL while
-the [graph management interface](../../Manual/Graphs/GeneralGraphs/Management.html#remove-a-vertex)
-and the
-[REST API for the graph module](../../HTTP/Gharial/Vertices.html#remove-a-vertex)
-offer a vertex deletion functionality.
-However, as shown in this example based on the
-[knows_graph](../../Manual/Graphs/index.html#the-knowsgraph), a query for this
-use case can be created.
-
-![Example Graph](../../Manual/Graphs/knows_graph.png)
-
-When deleting vertex **eve** from the graph, we also want the edges
-`eve -> alice` and `eve -> bob` to be removed.
-The involved graph and its only edge collection has to be known. In this case it
-is the graph **knows_graph** and the edge collection **knows**.
-
-This query will delete **eve** with its adjacent edges:
-
- @startDocuBlockInline GRAPHTRAV_removeVertex1
- @EXAMPLE_AQL{GRAPHTRAV_removeVertex1}
- @DATASET{knows_graph}
-LET edgeKeys = (FOR v, e IN 1..1 ANY 'persons/eve' GRAPH 'knows_graph' RETURN e._key)
-LET r = (FOR key IN edgeKeys REMOVE key IN knows)
-REMOVE 'eve' IN persons
- @END_EXAMPLE_AQL
- @endDocuBlock GRAPHTRAV_removeVertex1
-
-This query executed several actions:
-* use a graph traversal of depth 1 to get the `_key` of **eve's** adjacent edges
-* remove all of these edges from the `knows` collection
-* remove vertex **eve** from the `persons` collection
-
-The following query shows a different design to achieve the same result:
-
- @startDocuBlockInline GRAPHTRAV_removeVertex2
- @EXAMPLE_AQL{GRAPHTRAV_removeVertex2}
- @DATASET{knows_graph}
-LET edgeKeys = (FOR v, e IN 1..1 ANY 'persons/eve' GRAPH 'knows_graph'
- REMOVE e._key IN knows)
-REMOVE 'eve' IN persons
- @END_EXAMPLE_AQL
- @endDocuBlock GRAPHTRAV_removeVertex2
-
-**Note**: The query has to be adjusted to match a graph with multiple vertex/edge collections.
-
-For example, the [city graph](../../Manual/Graphs/index.html#the-city-graph)
-contains several vertex collections - `germanCity` and `frenchCity` and several
-edge collections - `french / german / international Highway`.
-
-![Example Graph2](../../Manual/Graphs/cities_graph.png)
-
-To delete city **Berlin** all edge collections `french / german / international Highway`
-have to be considered. The **REMOVE** operation has to be applied on all edge
-collections with `OPTIONS { ignoreErrors: true }`. Not using this option will stop the query
-whenever a non existing key should be removed in a collection.
-
- @startDocuBlockInline GRAPHTRAV_removeVertex3
- @EXAMPLE_AQL{GRAPHTRAV_removeVertex3}
- @DATASET{routeplanner}
-LET edgeKeys = (FOR v, e IN 1..1 ANY 'germanCity/Berlin' GRAPH 'routeplanner' RETURN e._key)
-LET r = (FOR key IN edgeKeys REMOVE key IN internationalHighway
- OPTIONS { ignoreErrors: true } REMOVE key IN germanHighway
- OPTIONS { ignoreErrors: true } REMOVE key IN frenchHighway
- OPTIONS { ignoreErrors: true })
-REMOVE 'Berlin' IN germanCity
- @END_EXAMPLE_AQL
- @endDocuBlock GRAPHTRAV_removeVertex3
diff --git a/Documentation/Books/AQL/ExecutionAndPerformance/ExplainingQueries.md b/Documentation/Books/AQL/ExecutionAndPerformance/ExplainingQueries.md
deleted file mode 100644
index 296a0a9292a6..000000000000
--- a/Documentation/Books/AQL/ExecutionAndPerformance/ExplainingQueries.md
+++ /dev/null
@@ -1,221 +0,0 @@
-Explaining queries
-==================
-
-If it is unclear how a given query will perform, clients can retrieve a query's execution plan
-from the AQL query optimizer without actually executing the query. Getting the query execution
-plan from the optimizer is called *explaining*.
-
-An explain will throw an error if the given query is syntactically invalid. Otherwise, it will
-return the execution plan and some information about what optimizations could be applied to
-the query. The query will not be executed.
-
-Explaining a query can be achieved by calling the [HTTP REST API](../../HTTP/AqlQuery/index.html)
-or via _arangosh_.
-A query can also be explained from the ArangoShell using the `ArangoDatabase`'s `explain` method
-or in detail via `ArangoStatement`'s `explain` method.
-
-
-Inspecting query plans
-----------------------
-
-The `explain` method of `ArangoStatement` as shown in the next chapters creates very verbose output.
-To get a human-readable output of the query plan you can use the `explain` method on our database
-object in arangosh. You may use it like this: (we disable syntax highlighting here)
-
- @startDocuBlockInline 01_workWithAQL_databaseExplain
- @EXAMPLE_ARANGOSH_OUTPUT{01_workWithAQL_databaseExplain}
- db._explain("LET s = SLEEP(0.25) LET t = SLEEP(0.5) RETURN 1", {}, {colors: false});
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock 01_workWithAQL_databaseExplain
-
-The plan contains all execution nodes that are used during a query. These nodes represent different
-stages in a query. Each stage gets the input from the stage directly above (its dependencies).
-The plan will show you the estimated number of items (results) for each query stage (under _Est._). Each
-query stage roughly equates to a line in your original query, which you can see under _Comment_.
-
-
-Profiling queries
------------------
-
-Sometimes when you have a complex query it can be unclear on what time is spent
-during the execution, even for intermediate ArangoDB users.
-
-By profiling a query it gets executed with special instrumentation code enabled.
-It gives you all the usual information like when explaining a query, but
-additionally you get the query profile, [runtime statistics](QueryStatistics.md)
-and per-node statistics.
-
-To use this in an interactive fashion on the shell you can use the
-`_profileQuery()` method on the `ArangoDatabase` object or use the web interface.
-
-For more information see [Profiling Queries](QueryProfiler.md).
-
- @startDocuBlockInline 01_workWithAQL_databaseProfileQuery
- @EXAMPLE_ARANGOSH_OUTPUT{01_workWithAQL_databaseProfileQuery}
- db._profileQuery("LET s = SLEEP(0.25) LET t = SLEEP(0.5) RETURN 1", {}, {colors: false});
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock 01_workWithAQL_databaseProfileQuery
-
-
-Execution plans in detail
--------------------------
-
-By default, the query optimizer will return what it considers to be the *optimal plan*. The
-optimal plan will be returned in the `plan` attribute of the result. If `explain` is
-called with option `allPlans` set to `true`, all plans will be returned in the `plans`
-attribute instead. The result object will also contain an attribute *warnings*, which
-is an array of warnings that occurred during optimization or execution plan creation.
-
-Each plan in the result is an object with the following attributes:
-- *nodes*: the array of execution nodes of the plan. [The list of available node types
- can be found here](Optimizer.md)
-- *estimatedCost*: the total estimated cost for the plan. If there are multiple
- plans, the optimizer will choose the plan with the lowest total cost.
-- *collections*: an array of collections used in the query
-- *rules*: an array of rules the optimizer applied. [The list of rules can be
- found here](Optimizer.md)
-- *variables*: array of variables used in the query (note: this may contain
- internal variables created by the optimizer)
-
-Here is an example for retrieving the execution plan of a simple query:
-
- @startDocuBlockInline 07_workWithAQL_statementsExplain
- @EXAMPLE_ARANGOSH_OUTPUT{07_workWithAQL_statementsExplain}
- |var stmt = db._createStatement(
- "FOR user IN _users RETURN user");
- stmt.explain();
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock 07_workWithAQL_statementsExplain
-
-As the output of `explain` is very detailed, it is recommended to use some
-scripting to make the output less verbose:
-
- @startDocuBlockInline 08_workWithAQL_statementsPlans
- @EXAMPLE_ARANGOSH_OUTPUT{08_workWithAQL_statementsPlans}
- |var formatPlan = function (plan) {
- | return { estimatedCost: plan.estimatedCost,
- | nodes: plan.nodes.map(function(node) {
- return node.type; }) }; };
- formatPlan(stmt.explain().plan);
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock 08_workWithAQL_statementsPlans
-
-If a query contains bind parameters, they must be added to the statement **before**
-`explain` is called:
-
- @startDocuBlockInline 09_workWithAQL_statementsPlansBind
- @EXAMPLE_ARANGOSH_OUTPUT{09_workWithAQL_statementsPlansBind}
- |var stmt = db._createStatement(
- | `FOR doc IN @@collection FILTER doc.user == @user RETURN doc`
- );
- stmt.bind({ "@collection" : "_users", "user" : "root" });
- stmt.explain();
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock 09_workWithAQL_statementsPlansBind
-
-In some cases the AQL optimizer creates multiple plans for a single query. By default
-only the plan with the lowest total estimated cost is kept, and the other plans are
-discarded. To retrieve all plans the optimizer has generated, `explain` can be called
-with the option `allPlans` set to `true`.
-
-In the following example, the optimizer has created two plans:
-
- @startDocuBlockInline 10_workWithAQL_statementsPlansOptimizer0
- @EXAMPLE_ARANGOSH_OUTPUT{10_workWithAQL_statementsPlansOptimizer0}
- |var stmt = db._createStatement(
- "FOR user IN _users FILTER user.user == 'root' RETURN user");
- stmt.explain({ allPlans: true }).plans.length;
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock 10_workWithAQL_statementsPlansOptimizer0
-
-To see a slightly more compact version of the plan, the following transformation can be applied:
-
- @startDocuBlockInline 10_workWithAQL_statementsPlansOptimizer1
- @EXAMPLE_ARANGOSH_OUTPUT{10_workWithAQL_statementsPlansOptimizer1}
- ~var stmt = db._createStatement("FOR user IN _users FILTER user.user == 'root' RETURN user");
- |stmt.explain({ allPlans: true }).plans.map(
- function(plan) { return formatPlan(plan); });
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock 10_workWithAQL_statementsPlansOptimizer1
-
-`explain` will also accept the following additional options:
-- *maxPlans*: limits the maximum number of plans that are created by the AQL query optimizer
-- *optimizer.rules*: an array of to-be-included or to-be-excluded optimizer rules
- can be put into this attribute, telling the optimizer to include or exclude
- specific rules. To disable a rule, prefix its name with a `-`, to enable a rule, prefix it
- with a `+`. There is also a pseudo-rule `all`, which will match all optimizer rules.
-
-The following example disables all optimizer rules but `remove-redundant-calculations`:
-
- @startDocuBlockInline 10_workWithAQL_statementsPlansOptimizer2
- @EXAMPLE_ARANGOSH_OUTPUT{10_workWithAQL_statementsPlansOptimizer2}
- ~var stmt = db._createStatement("FOR user IN _users FILTER user.user == 'root' RETURN user");
- |stmt.explain({ optimizer: {
- rules: [ "-all", "+remove-redundant-calculations" ] } });
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock 10_workWithAQL_statementsPlansOptimizer2
-
-
-The contents of an execution plan are meant to be machine-readable. To get a human-readable
-version of a query's execution plan, the following commands can be used:
-
- @startDocuBlockInline 10_workWithAQL_statementsPlansOptimizer3
- @EXAMPLE_ARANGOSH_OUTPUT{10_workWithAQL_statementsPlansOptimizer3}
- var query = "FOR doc IN mycollection FILTER doc.value > 42 RETURN doc";
- require("@arangodb/aql/explainer").explain(query, {colors:false});
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock 10_workWithAQL_statementsPlansOptimizer3
-
-The above command prints the query's execution plan in the ArangoShell directly, focusing
-on the most important information.
-
-
-Gathering debug information about a query
------------------------------------------
-
-If an explain provides no suitable insight into why a query does not perform as
-expected, it may be reported to the ArangoDB support. In order to make this as easy
-as possible, there is a built-in command in ArangoShell for packaging the query, its
-bind parameters and all data required to execute the query elsewhere.
-
-The command will store all data in a file with a configurable filename:
-
- @startDocuBlockInline 10_workWithAQL_debugging1
- @EXAMPLE_ARANGOSH_OUTPUT{10_workWithAQL_debugging1}
- var query = "FOR doc IN mycollection FILTER doc.value > 42 RETURN doc";
- require("@arangodb/aql/explainer").debugDump("/tmp/query-debug-info", query);
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock 10_workWithAQL_debugging1
-
-Entitled users can send the generated file to the ArangoDB support to facilitate
-reproduction and debugging.
-
-If a query contains bind parameters, they will need to specified along with the query
-string:
-
- @startDocuBlockInline 10_workWithAQL_debugging2
- @EXAMPLE_ARANGOSH_OUTPUT{10_workWithAQL_debugging2}
- var query = "FOR doc IN @@collection FILTER doc.value > @value RETURN doc";
- var bind = { value: 42, "@collection": "mycollection" };
- require("@arangodb/aql/explainer").debugDump("/tmp/query-debug-info", query, bind);
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock 10_workWithAQL_debugging2
-
-It is also possible to include example documents from the underlying collection in
-order to make reproduction even easier. Example documents can be sent as they are, or
-in an anonymized form. The number of example documents can be specified in the *examples*
-options attribute, and should generally be kept low. The *anonymize* option will replace
-the contents of string attributes in the examples with "XXX". It will however not
-replace any other types of data (e.g. numeric values) or attribute names. Attribute
-names in the examples will always be preserved because they may be indexed and used in
-queries:
-
- @startDocuBlockInline 10_workWithAQL_debugging3
- @EXAMPLE_ARANGOSH_OUTPUT{10_workWithAQL_debugging3}
- var query = "FOR doc IN @@collection FILTER doc.value > @value RETURN doc";
- var bind = { value: 42, "@collection": "mycollection" };
- var options = { examples: 10, anonymize: true };
- require("@arangodb/aql/explainer").debugDump("/tmp/query-debug-info", query, bind, options);
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock 10_workWithAQL_debugging3
-
diff --git a/Documentation/Books/AQL/ExecutionAndPerformance/Optimizer.md b/Documentation/Books/AQL/ExecutionAndPerformance/Optimizer.md
deleted file mode 100644
index 79394164a548..000000000000
--- a/Documentation/Books/AQL/ExecutionAndPerformance/Optimizer.md
+++ /dev/null
@@ -1,533 +0,0 @@
-The AQL query optimizer
-=======================
-
-AQL queries are sent through an optimizer before execution. The task of the optimizer is
-to create an initial execution plan for the query, look for optimization opportunities and
-apply them. As a result, the optimizer might produce multiple execution plans for a
-single query. It will then calculate the costs for all plans and pick the plan with the
-lowest total cost. This resulting plan is considered to be the *optimal plan*, which is
-then executed.
-
-The optimizer is designed to only perform optimizations if they are *safe*, in the
-meaning that an optimization should not modify the result of a query. A notable exception
-to this is that the optimizer is allowed to change the order of results for queries that
-do not explicitly specify how results should be sorted.
-
-Execution plans
----------------
-
-The `explain` command can be used to query the optimal executed plan or even all plans
-the optimizer has generated. Additionally, `explain` can reveal some more information
-about the optimizer's view of the query.
-
-### Inspecting plans using the explain helper
-
-The `explain` method of `ArangoStatement` as shown in the next chapters creates very verbose output.
-You can work on the output programmatically, or use this handsome tool that we created
-to generate a more human readable representation.
-
-You may use it like this: (we disable syntax highlighting here)
-
- @startDocuBlockInline AQLEXP_01_axplainer
- @EXAMPLE_ARANGOSH_OUTPUT{AQLEXP_01_axplainer}
- ~addIgnoreCollection("test")
- ~db._drop("test");
- db._create("test");
- for (i = 0; i < 100; ++i) { db.test.save({ value: i }); }
- db.test.ensureIndex({ type: "skiplist", fields: [ "value" ] });
- var explain = require("@arangodb/aql/explainer").explain;
- explain("FOR i IN test FILTER i.value > 97 SORT i.value RETURN i.value", {colors:false});
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock AQLEXP_01_axplainer
-
-
-### Execution plans in detail
-
-Let's have a look at the raw json output of the same execution plan
-using the `explain` method of `ArangoStatement`:
-
- @startDocuBlockInline AQLEXP_01_explainCreate
- @EXAMPLE_ARANGOSH_OUTPUT{AQLEXP_01_explainCreate}
- stmt = db._createStatement("FOR i IN test FILTER i.value > 97 SORT i.value RETURN i.value");
- stmt.explain();
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock AQLEXP_01_explainCreate
-
-As you can see, the result details are very verbose so we will not show them in full in the next
-sections. Instead, let's take a closer look at the results step by step.
-
-#### Execution nodes
-
-In general, an execution plan can be considered to be a pipeline of processing steps.
-Each processing step is carried out by a so-called *execution node*
-
-The `nodes` attribute of the `explain` result contains these *execution nodes* in
-the *execution plan*. The output is still very verbose, so here's a shorted form of it:
-
- @startDocuBlockInline AQLEXP_02_explainOverview
- @EXAMPLE_ARANGOSH_OUTPUT{AQLEXP_02_explainOverview}
- ~var stmt = db._createStatement("FOR i IN test FILTER i.value > 97 SORT i.value RETURN i.value");
- stmt.explain().plan.nodes.map(function (node) { return node.type; });
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock AQLEXP_02_explainOverview
-
-*Note that the list of nodes might slightly change in future versions of ArangoDB if
-new execution node types get added or the optimizer create somewhat more
-optimized plans).*
-
-When a plan is executed, the query execution engine will start with the node at
-the bottom of the list (i.e. the *ReturnNode*).
-
-The *ReturnNode*'s purpose is to return data to the caller. It does not produce
-data itself, so it will ask the node above itself, this is the *CalculationNode*
-in our example.
-*CalculationNode*s are responsible for evaluating arbitrary expressions. In our
-example query, the *CalculationNode* will evaluate the value of `i.value`, which
-is needed by the *ReturnNode*. The calculation will be applied for all data the
-*CalculationNode* gets from the node above it, in our example the *IndexNode*.
-
-Finally, all of this needs to be done for documents of collection `test`. This is
-where the *IndexNode* enters the game. It will use an index (thus its name)
-to find certain documents in the collection and ship it down the pipeline in the
-order required by `SORT i.value`. The *IndexNode* itself has a *SingletonNode*
-as its input. The sole purpose of a *SingletonNode* node is to provide a single empty
-document as input for other processing steps. It is always the end of the pipeline.
-
-Here's a summary:
-* SingletonNode: produces an empty document as input for other processing steps.
-* IndexNode: iterates over the index on attribute `value` in collection `test`
- in the order required by `SORT i.value`.
-* CalculationNode: evaluates the result of the calculation `i.value > 97` to `true` or `false`
-* CalculationNode: calculates return value `i.value`
-* ReturnNode: returns data to the caller
-
-
-#### Optimizer rules
-
-Note that in the example, the optimizer has optimized the `SORT` statement away.
-It can do it safely because there is a sorted skiplist index on `i.value`, which it has
-picked in the *IndexNode*. As the index values are iterated over in sorted order
-anyway, the extra *SortNode* would have been redundant and was removed.
-
-Additionally, the optimizer has done more work to generate an execution plan that
-avoids as much expensive operations as possible. Here is the list of optimizer rules
-that were applied to the plan:
-
- @startDocuBlockInline AQLEXP_03_explainRules
- @EXAMPLE_ARANGOSH_OUTPUT{AQLEXP_03_explainRules}
- ~var stmt = db._createStatement("FOR i IN test FILTER i.value > 97 SORT i.value RETURN i.value");
- stmt.explain().plan.rules;
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock AQLEXP_03_explainRules
-
-Here is the meaning of these rules in context of this query:
-* `move-calculations-up`: moves a *CalculationNode* as far up in the processing pipeline
- as possible
-* `move-filters-up`: moves a *FilterNode* as far up in the processing pipeline as
- possible
-* `remove-redundant-calculations`: replaces references to variables with references to
- other variables that contain the exact same result. In the example query, `i.value`
- is calculated multiple times, but each calculation inside a loop iteration would
- produce the same value. Therefore, the expression result is shared by several nodes.
-* `remove-unnecessary-calculations`: removes *CalculationNode*s whose result values are
- not used in the query. In the example this happens due to the `remove-redundant-calculations`
- rule having made some calculations unnecessary.
-* `use-indexes`: use an index to iterate over a collection instead of performing a
- full collection scan. In the example case this makes sense, as the index can be
- used for filtering and sorting.
-* `remove-filter-covered-by-index`: remove an unnecessary filter whose functionality
- is already covered by an index. In this case the index only returns documents
- matching the filter.
-* `use-index-for-sort`: removes a `SORT` operation if it is already satisfied by
- traversing over a sorted index
-
-Note that some rules may appear multiple times in the list, with number suffixes.
-This is due to the same rule being applied multiple times, at different positions
-in the optimizer pipeline.
-
-
-#### Collections used in a query
-
-The list of collections used in a plan (and query) is contained in the `collections`
-attribute of a plan:
-
- @startDocuBlockInline AQLEXP_04_explainCollections
- @EXAMPLE_ARANGOSH_OUTPUT{AQLEXP_04_explainCollections}
- ~var stmt = db._createStatement("FOR i IN test FILTER i.value > 97 SORT i.value RETURN i.value");
- stmt.explain().plan.collections
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock AQLEXP_04_explainCollections
-
-The `name` attribute contains the name of the `collection`, and `type` is the
-access type, which can be either `read` or `write`.
-
-
-#### Variables used in a query
-
-The optimizer will also return a list of variables used in a plan (and query). This
-list will contain auxiliary variables created by the optimizer itself. This list
-can be ignored by end users in most cases.
-
-
-#### Cost of a query
-
-For each plan the optimizer generates, it will calculate the total cost. The plan
-with the lowest total cost is considered to be the optimal plan. Costs are
-estimates only, as the actual execution costs are unknown to the optimizer.
-Costs are calculated based on heuristics that are hard-coded into execution nodes.
-Cost values do not have any unit.
-
-
-### Retrieving all execution plans
-
-To retrieve not just the optimal plan but a list of all plans the optimizer has
-generated, set the option `allPlans` to `true`:
-
-This will return a list of all plans in the `plans` attribute instead of in the
-`plan` attribute:
-
- @startDocuBlockInline AQLEXP_05_explainAllPlans
- @EXAMPLE_ARANGOSH_OUTPUT{AQLEXP_05_explainAllPlans}
- ~var stmt = db._createStatement("FOR i IN test FILTER i.value > 97 SORT i.value RETURN i.value");
- stmt.explain({ allPlans: true });
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock AQLEXP_05_explainAllPlans
-
-### Retrieving the plan as it was generated by the parser / lexer
-
-To retrieve the plan which closely matches your query, you may turn off most
-optimization rules (i.e. cluster rules cannot be disabled if you're running
-the explain on a cluster coordinator) set the option `rules` to `-all`:
-
-This will return an unoptimized plan in the `plan`:
-
- @startDocuBlockInline AQLEXP_06_explainUnoptimizedPlans
- @EXAMPLE_ARANGOSH_OUTPUT{AQLEXP_06_explainUnoptimizedPlans}
- ~var stmt = db._createStatement("FOR i IN test FILTER i.value > 97 SORT i.value RETURN i.value");
- stmt.explain({ optimizer: { rules: [ "-all" ] } });
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock AQLEXP_06_explainUnoptimizedPlans
-
-Note that some optimizations are already done at parse time (i.e. evaluate simple constant
-calculation as `1 + 1`)
-
-
-Turning specific optimizer rules off
-------------------------------------
-
-Optimizer rules can also be turned on or off individually, using the `rules` attribute.
-This can be used to enable or disable one or multiple rules. Rules that shall be enabled
-need to be prefixed with a `+`, rules to be disabled should be prefixed with a `-`. The
-pseudo-rule `all` matches all rules.
-
-Rules specified in `rules` are evaluated from left to right, so the following works to
-turn on just the one specific rule:
-
- @startDocuBlockInline AQLEXP_07_explainSingleRulePlans
- @EXAMPLE_ARANGOSH_OUTPUT{AQLEXP_07_explainSingleRulePlans}
- ~var stmt = db._createStatement("FOR i IN test FILTER i.value > 97 SORT i.value RETURN i.value");
- stmt.explain({ optimizer: { rules: [ "-all", "+use-index-range" ] } });
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock AQLEXP_07_explainSingleRulePlans
-
-By default, all rules are turned on. To turn off just a few specific rules, use something
-like this:
-
- @startDocuBlockInline AQLEXP_08_explainDisableSingleRulePlans
- @EXAMPLE_ARANGOSH_OUTPUT{AQLEXP_08_explainDisableSingleRulePlans}
- ~var stmt = db._createStatement("FOR i IN test FILTER i.value > 97 SORT i.value RETURN i.value");
- stmt.explain({ optimizer: { rules: [ "-use-index-range", "-use-index-for-sort" ] } });
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock AQLEXP_08_explainDisableSingleRulePlans
-
-The maximum number of plans created by the optimizer can also be limited using the
-`maxNumberOfPlans` attribute:
-
- @startDocuBlockInline AQLEXP_09_explainMaxNumberOfPlans
- @EXAMPLE_ARANGOSH_OUTPUT{AQLEXP_09_explainMaxNumberOfPlans}
- ~var stmt = db._createStatement("FOR i IN test FILTER i.value > 97 SORT i.value RETURN i.value");
- stmt.explain({ maxNumberOfPlans: 1 });
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock AQLEXP_09_explainMaxNumberOfPlans
-
-Optimizer statistics
---------------------
-
-The optimizer will return statistics as a part of an `explain` result.
-
-The following attributes will be returned in the `stats` attribute of an `explain` result:
-
-- `plansCreated`: total number of plans created by the optimizer
-- `rulesExecuted`: number of rules executed (note: an executed rule does not
- indicate a plan was actually modified by a rule)
-- `rulesSkipped`: number of rules skipped by the optimizer
-
-Warnings
---------
-
-For some queries, the optimizer may produce warnings. These will be returned in
-the `warnings` attribute of the `explain` result:
-
- @startDocuBlockInline AQLEXP_10_explainWarn
- @EXAMPLE_ARANGOSH_OUTPUT{AQLEXP_10_explainWarn}
- var stmt = db._createStatement("FOR i IN 1..10 RETURN 1 / 0")
- stmt.explain().warnings;
- ~db._drop("test")
- ~removeIgnoreCollection("test")
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock AQLEXP_10_explainWarn
-
-There is an upper bound on the number of warning a query may produce. If that
-bound is reached, no further warnings will be returned.
-
-
-Optimization in a cluster
--------------------------
-
-When you're running AQL in the cluster, the parsing of the query is done on the
-coordinator. The coordinator then chops the query into snipets, which are to
-remain on the coordinator, and others that are to be distributed over the network
-to the shards. The cutting sites are interconnected via *Scatter-*, *Gather-* and *RemoteNodes*.
-
-These nodes mark the network borders of the snippets. The optimizer strives to reduce the amount
-of data transfered via these network interfaces by pushing `FILTER`s out to the shards,
-as it is vital to the query performance to reduce that data amount to transfer over the
-network links.
-
-Snippets marked with **DBS** are executed on the shards, **COOR** ones are excuted on the coordinator.
-
-**As usual, the optimizer can only take certain assumptions for granted when doing so,
-i.e. [user-defined functions have to be executed on the coordinator](../Extending/README.md).
-If in doubt, you should modify your query to reduce the number interconnections between your snippets.**
-
-When optimizing your query you may want to look at simpler parts of it first.
-
-List of execution nodes
------------------------
-
-The following execution node types will appear in the output of `explain`:
-
-* *SingletonNode*: the purpose of a *SingletonNode* is to produce an empty document
- that is used as input for other processing steps. Each execution plan will contain
- exactly one *SingletonNode* as its top node.
-* *EnumerateCollectionNode*: enumeration over documents of a collection (given in
- its *collection* attribute) without using an index.
-* *IndexNode*: enumeration over one or many indexes (given in its *indexes* attribute)
- of a collection. The index ranges are specified in the *condition* attribute of the node.
-* *EnumerateListNode*: enumeration over a list of (non-collection) values.
-* *FilterNode*: only lets values pass that satisfy a filter condition. Will appear once
- per *FILTER* statement.
-* *LimitNode*: limits the number of results passed to other processing steps. Will
- appear once per *LIMIT* statement.
-* *CalculationNode*: evaluates an expression. The expression result may be used by
- other nodes, e.g. *FilterNode*, *EnumerateListNode*, *SortNode* etc.
-* *SubqueryNode*: executes a subquery.
-* *SortNode*: performs a sort of its input values.
-* *AggregateNode*: aggregates its input and produces new output variables. This will
- appear once per *COLLECT* statement.
-* *ReturnNode*: returns data to the caller. Will appear in each read-only query at
- least once. Subqueries will also contain *ReturnNode*s.
-* *InsertNode*: inserts documents into a collection (given in its *collection*
- attribute). Will appear exactly once in a query that contains an *INSERT* statement.
-* *RemoveNode*: removes documents from a collection (given in its *collection*
- attribute). Will appear exactly once in a query that contains a *REMOVE* statement.
-* *ReplaceNode*: replaces documents in a collection (given in its *collection*
- attribute). Will appear exactly once in a query that contains a *REPLACE* statement.
-* *UpdateNode*: updates documents in a collection (given in its *collection*
- attribute). Will appear exactly once in a query that contains an *UPDATE* statement.
-* *UpsertNode*: upserts documents in a collection (given in its *collection*
- attribute). Will appear exactly once in a query that contains an *UPSERT* statement.
-* *NoResultsNode*: will be inserted if *FILTER* statements turn out to be never
- satisfiable. The *NoResultsNode* will pass an empty result set into the processing
- pipeline.
-
-For queries in the cluster, the following nodes may appear in execution plans:
-
-* *SingleRemoteOperationNode*: used on a coordinator to directly work with a single
- document on a DB-Server that was referenced by its `_key`.
-* *ScatterNode*: used on a coordinator to fan-out data to one or multiple shards.
-* *GatherNode*: used on a coordinator to aggregate results from one or many shards
- into a combined stream of results.
-* *DistributeNode*: used on a coordinator to fan-out data to one or multiple shards,
- taking into account a collection's shard key.
-* *RemoteNode*: a *RemoteNode* will perform communication with another ArangoDB
- instances in the cluster. For example, the cluster coordinator will need to
- communicate with other servers to fetch the actual data from the shards. It
- will do so via *RemoteNode*s. The data servers themselves might again pull
- further data from the coordinator, and thus might also employ *RemoteNode*s.
- So, all of the above cluster relevant nodes will be accompanied by a *RemoteNode*.
-
-
-List of optimizer rules
------------------------
-
-The following optimizer rules may appear in the `rules` attribute of a plan:
-
-* `move-calculations-up`: will appear if a *CalculationNode* was moved up in a plan.
- The intention of this rule is to move calculations up in the processing pipeline
- as far as possible (ideally out of enumerations) so they are not executed in loops
- if not required. It is also quite common that this rule enables further optimizations
- to kick in.
-* `move-filters-up`: will appear if a *FilterNode* was moved up in a plan. The
- intention of this rule is to move filters up in the processing pipeline as far
- as possible (ideally out of inner loops) so they filter results as early as possible.
-* `sort-in-values`: will appear when the values used as right-hand side of an `IN`
- operator will be pre-sorted using an extra function call. Pre-sorting the comparison
- array allows using a binary search in-list lookup with a logarithmic complexity instead
- of the default linear complexity in-list lookup.
-* `remove-unnecessary-filters`: will appear if a *FilterNode* was removed or replaced.
- *FilterNode*s whose filter condition will always evaluate to *true* will be
- removed from the plan, whereas *FilterNode* that will never let any results pass
- will be replaced with a *NoResultsNode*.
-* `remove-redundant-calculations`: will appear if redundant calculations (expressions
- with the exact same result) were found in the query. The optimizer rule will then
- replace references to the redundant expressions with a single reference, allowing
- other optimizer rules to remove the then-unneeded *CalculationNode*s.
-* `remove-unnecessary-calculations`: will appear if *CalculationNode*s were removed
- from the query. The rule will removed all calculations whose result is not
- referenced in the query (note that this may be a consequence of applying other
- optimizations).
-* `remove-redundant-sorts`: will appear if multiple *SORT* statements can be merged
- into fewer sorts.
-* `interchange-adjacent-enumerations`: will appear if a query contains multiple
- *FOR* statements whose order were permuted. Permutation of *FOR* statements is
- performed because it may enable further optimizations by other rules.
-* `remove-collect-variables`: will appear if an *INTO* clause was removed from a *COLLECT*
- statement because the result of *INTO* is not used. May also appear if a result
- of a *COLLECT* statement's *AGGREGATE* variables is not used.
-* `propagate-constant-attributes`: will appear when a constant value was inserted
- into a filter condition, replacing a dynamic attribute value.
-* `replace-or-with-in`: will appear if multiple *OR*-combined equality conditions
- on the same variable or attribute were replaced with an *IN* condition.
-* `remove-redundant-or`: will appear if multiple *OR* conditions for the same variable
- or attribute were combined into a single condition.
-* `use-indexes`: will appear when an index is used to iterate over a collection.
- As a consequence, an *EnumerateCollectionNode* was replaced with an
- *IndexNode* in the plan.
-* `remove-filter-covered-by-index`: will appear if a *FilterNode* was removed or replaced
- because the filter condition is already covered by an *IndexNode*.
-* `remove-filter-covered-by-traversal`: will appear if a *FilterNode* was removed or replaced
- because the filter condition is already covered by an *TraversalNode*.
-* `use-index-for-sort`: will appear if an index can be used to avoid a *SORT*
- operation. If the rule was applied, a *SortNode* was removed from the plan.
-* `move-calculations-down`: will appear if a *CalculationNode* was moved down in a plan.
- The intention of this rule is to move calculations down in the processing pipeline
- as far as possible (below *FILTER*, *LIMIT* and *SUBQUERY* nodes) so they are executed
- as late as possible and not before their results are required.
-* `patch-update-statements`: will appear if an *UpdateNode* or *ReplaceNode* was patched
- to not buffer its input completely, but to process it in smaller batches. The rule will
- fire for an *UPDATE* or *REPLACE* query that is fed by a full collection scan or an index
- scan only, and that does not use any other collections, indexes, subqueries or traversals.
-* `optimize-traversals`: will appear if either the edge or path output variable in an
- AQL traversal was optimized away, or if a *FILTER* condition from the query was moved
- in the *TraversalNode* for early pruning of results.
-* `inline-subqueries`: will appear when a subquery was pulled out in its surrounding scope,
- e.g. `FOR x IN (FOR y IN collection FILTER y.value >= 5 RETURN y.test) RETURN x.a`
- would become `FOR tmp IN collection FILTER tmp.value >= 5 LET x = tmp.test RETURN x.a`
-* `geo-index-optimizer`: will appear when a geo index is utilized.
-* `replace-function-with-index`: will appear when a deprecated index function such as
- `FULLTEXT`, `NEAR`, `WITHIN` or `WITHIN_RECTANGLE` is replaced with a regular
- subquery.
-* `fuse-filters`: will appear if the optimizer merges adjacent FILTER nodes together into
- a single FILTER node
-* `simplify-conditions`: will appear if the optimizer replaces parts in a CalculationNode's
- expression with simpler expressions
-* `remove-sort-rand`: will appear when a *SORT RAND()* expression is removed by
- moving the random iteration into an *EnumerateCollectionNode*. This optimizer rule
- is specific for the MMFiles storage engine.
-* `reduce-extraction-to-projection`: will appear when an *EnumerationCollectionNode* or
- an *IndexNode* that would have extracted an entire document was modified to return
- only a projection of each document. Projections are limited to at most 5 different
- document attributes. This optimizer rule is specific for the RocksDB storage engine.
-* `optimize-subqueries`: will appear when optimizations are applied to a subquery. The
- optimizer rule will add a *LIMIT* statement to qualifying subqueries to make them
- return less data. Another optimization performed by this rule is to modify the result
- value of subqueries in case only the number of subquery results is checked later.
- This saves copying the document data from the subquery to the outer scope and may
- enable follow-up optimizations.
-* `sort-limit`: will appear when a *SortNode* is followed by a *LimitNode* with no
- intervening nodes that may change the element count (e.g. a *FilterNode* which
- could not be moved before the sort, or a source node like *EnumerateCollectionNode*).
- This is used to make the *SortNode* aware of the limit and offset from the *LimitNode*
- to enable some optimizations internal to the *SortNode* which allow for reduced
- memory usage and and in many cases, improved sorting speed. The optimizer may
- choose not to apply the rule if it decides that it will offer little or no benefit.
- In particular it will not apply the rule if the input size is very small or if
- the output from the `LimitNode` is similar in size to the input. In exceptionally rare
- cases, this rule could result in some small slowdown. If observed, one can
- disable the rule for the affected query at the cost of increased memory usage.
-
-The following optimizer rules may appear in the `rules` attribute of cluster plans:
-
-* `optimize-cluster-single-document-operations`: it may appear if you directly reference
- a document by its `_key`; in this case no AQL will be executed on the DB-Servers, instead
- the coordinator will directly work with the documents on the DB-Servers.
-* `distribute-in-cluster`: will appear when query parts get distributed in a cluster.
- This is not an optimization rule, and it cannot be turned off.
-* `scatter-in-cluster`: will appear when scatter, gather, and remote nodes are inserted
- into a distributed query. This is not an optimization rule, and it cannot be turned off.
-* `distribute-filtercalc-to-cluster`: will appear when filters are moved up in a
- distributed execution plan. Filters are moved as far up in the plan as possible to
- make result sets as small as possible as early as possible.
-* `distribute-sort-to-cluster`: will appear if sorts are moved up in a distributed query.
- Sorts are moved as far up in the plan as possible to make result sets as small as possible
- as early as possible.
-* `remove-unnecessary-remote-scatter`: will appear if a RemoteNode is followed by a
- ScatterNode, and the ScatterNode is only followed by calculations or the SingletonNode.
- In this case, there is no need to distribute the calculation, and it will be handled
- centrally.
-* `undistribute-remove-after-enum-coll`: will appear if a RemoveNode can be pushed into
- the same query part that enumerates over the documents of a collection. This saves
- inter-cluster roundtrips between the EnumerateCollectionNode and the RemoveNode.
-* `collect-in-cluster`: will appear when a *CollectNode* on a coordinator is accompanied
- by extra *CollectNode*s on the database servers, which will do the heavy processing and
- allow the *CollectNode* on the coordinator to a light-weight aggregation only.
-* `restrict-to-single-shard`: will appear if a collection operation (IndexNode or a
- data-modification node) will only affect a single shard, and the operation can be
- restricted to the single shard and is not applied for all shards. This optimization
- can be applied for queries that access a collection only once in the query, and that
- do not use traversals, shortest path queries and that do not access collection data
- dynamically using the `DOCUMENT`, `FULLTEXT`, `NEAR` or `WITHIN` AQL functions.
- Additionally, the optimizer will only pull off this optimization if can safely
- determine the values of all the collection's shard keys from the query, and when the
- shard keys are covered by a single index (this is always true if the shard key is
- the default `_key`).
-* `smart-joins`: will appear when the query optimizer can reduce an inter-node join
- to a server-local join. This rule is only active in the *Enterprise Edition* of
- ArangoDB, and will only be employed when joining two collections with identical
- sharding setup via their shard keys.
-
-Note that some rules may appear multiple times in the list, with number suffixes.
-This is due to the same rule being applied multiple times, at different positions
-in the optimizer pipeline.
-
-### Additional optimizations applied
-
-If a query iterates over a collection (for filtering or counting) but does not need
-the actual document values later, the optimizer can apply a "scan-only" optimization
-for *EnumerateCollectionNode*s and *IndexNode*s. In this case, it will not build up
-a result with the document data at all, which may reduce work significantly especially
-with the RocksDB storage engine. In case the document data is actually not needed
-later on, it may be sensible to remove it from query strings so the optimizer can
-apply the optimization.
-
-If the optimization is applied, it will show up as "scan only" in an AQL
-query's execution plan for an *EnumerateCollectionNode* or an *IndexNode*.
-
-
-Additionally, the optimizer can apply an "index-only" optimization for AQL queries that
-can satisfy the retrieval of all required document attributes directly from an index.
-
-This optimization will be triggered for the RocksDB engine if an index is used
-that covers all required attributes of the document used later on in the query.
-If applied, it will save retrieving the actual document data (which would require
-an extra lookup in RocksDB), but will instead build the document data solely
-from the index values found. It will only be applied when using up to 5 attributes
-from the document, and only if the rest of the document data is not used later
-on in the query.
-
-The optimization is currently available for the RocksDB engine for the index types
-primary, edge, hash, skiplist and persistent.
-
-If the optimization is applied, it will show up as "index only" in an AQL
-query's execution plan for an *IndexNode*.
diff --git a/Documentation/Books/AQL/ExecutionAndPerformance/ParsingQueries.md b/Documentation/Books/AQL/ExecutionAndPerformance/ParsingQueries.md
deleted file mode 100644
index 36d88173f435..000000000000
--- a/Documentation/Books/AQL/ExecutionAndPerformance/ParsingQueries.md
+++ /dev/null
@@ -1,25 +0,0 @@
-Parsing queries
-===============
-
-Clients can use ArangoDB to check if a given AQL query is syntactically valid. ArangoDB provides
-an [HTTP REST API](../../HTTP/AqlQuery/index.html) for this.
-
-A query can also be parsed from the ArangoShell using `ArangoStatement`'s `parse` method. The
-`parse` method will throw an exception if the query is syntactically invalid. Otherwise, it will
-return the some information about the query.
-
-The return value is an object with the collection names used in the query listed in the
-`collections` attribute, and all bind parameters listed in the `bindVars` attribute.
-Additionally, the internal representation of the query, the query's abstract syntax tree, will
-be returned in the `AST` attribute of the result. Please note that the abstract syntax tree
-will be returned without any optimizations applied to it.
-
- @startDocuBlockInline 11_workWithAQL_parseQueries
- @EXAMPLE_ARANGOSH_OUTPUT{11_workWithAQL_parseQueries}
- |var stmt = db._createStatement(
- "FOR doc IN @@collection FILTER doc.foo == @bar RETURN doc");
- stmt.parse();
- ~removeIgnoreCollection("mycollection")
- ~db._drop("mycollection")
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock 11_workWithAQL_parseQueries
diff --git a/Documentation/Books/AQL/ExecutionAndPerformance/QueryCache.md b/Documentation/Books/AQL/ExecutionAndPerformance/QueryCache.md
deleted file mode 100644
index a35b45bef247..000000000000
--- a/Documentation/Books/AQL/ExecutionAndPerformance/QueryCache.md
+++ /dev/null
@@ -1,235 +0,0 @@
-The AQL query results cache
-===========================
-
-AQL provides an optional query results cache.
-
-The purpose of the query results cache is to avoid repeated calculation of the same
-query results. It is useful if data-reading queries repeat a lot and there are
-not many write queries.
-
-The query results cache is transparent so users do not need to manually invalidate
-results in it if underlying collection data are modified.
-
-
-Modes
------
-
-The cache can be operated in the following modes:
-
-* `off`: the cache is disabled. No query results will be stored
-* `on`: the cache will store the results of all AQL queries unless their `cache`
- attribute flag is set to `false`
-* `demand`: the cache will store the results of AQL queries that have their
- `cache` attribute set to `true`, but will ignore all others
-
-The mode can be set at server startup and later changed at runtime.
-
-
-Query eligibility
------------------
-
-The query results cache will consider two queries identical if they have exactly the
-same query string and the same bind variables. Any deviation in terms of whitespace,
-capitalization etc. will be considered a difference. The query string will be hashed
-and used as the cache lookup key. If a query uses bind parameters, these will also be hashed
-and used as part of the cache lookup key.
-
-That means even if the query strings of two queries are identical, the query results
-cache will treat them as different queries if they have different bind parameter
-values. Other components that will become part of a query's cache key are the
-`count`, `fullCount` and `optimizer` attributes.
-
-If the cache is turned on, the cache will check at the very start of execution
-whether it has a result ready for this particular query. If that is the case,
-the query result will be served directly from the cache, which is normally
-very efficient. If the query cannot be found in the cache, it will be executed
-as usual.
-
-If the query is eligible for caching and the cache is turned on, the query
-result will be stored in the query results cache so it can be used for subsequent
-executions of the same query.
-
-A query is eligible for caching only if all of the following conditions are met:
-
-* the server the query executes on is not a coordinator
-* the query string is at least 8 characters long
-* the query is a read-only query and does not modify data in any collection
-* no warnings were produced while executing the query
-* the query is deterministic and only uses deterministic functions whose results
- are marked as cacheable
-* the size of the query result does not exceed the cache's configured maximal
- size for individual cache results or cumulated results
-* the query is not executed using a streaming cursor
-
-The usage of non-deterministic functions leads to a query not being cachable.
-This is intentional to avoid caching of function results which should rather
-be calculated on each invocation of the query (e.g. `RAND()` or `DATE_NOW()`).
-
-The query results cache considers all user-defined AQL functions to be non-deterministic
-as it has no insight into these functions.
-
-
-Cache invalidation
-------------------
-
-The cached results are fully or partially invalidated automatically if
-queries modify the data of collections that were used during the computation of
-the cached query results. This is to protect users from getting stale results
-from the query results cache.
-
-This also means that if the cache is turned on, then there is an additional
-cache invalidation check for each data-modification operation (e.g. insert, update,
-remove, truncate operations as well as AQL data-modification queries).
-
-**Example**
-
-If the result of the following query is present in the query results cache,
-then either modifying data in collection `users` or in collection `organizations`
-will remove the already computed result from the cache:
-
-```
-FOR user IN users
- FOR organization IN organizations
- FILTER user.organization == organization._key
- RETURN { user: user, organization: organization }
-```
-
-Modifying data in other collections than the named two will not lead to this
-query result being removed from the cache.
-
-
-Performance considerations
---------------------------
-
-The query results cache is organized as a hash table, so looking up whether a query result
-is present in the cache is relatively fast. Still, the query string and the bind
-parameter used in the query will need to be hashed. This is a slight overhead that
-will not be present if the cache is turned off or a query is marked as not cacheable.
-
-Additionally, storing query results in the cache and fetching results from the
-cache requires locking via an R/W lock. While many thread can read in parallel from
-the cache, there can only be a single modifying thread at any given time. Modifications
-of the query cache contents are required when a query result is stored in the cache
-or during cache invalidation after data-modification operations. Cache invalidation
-will require time proportional to the number of cached items that need to be invalidated.
-
-There may be workloads in which enabling the query results cache will lead to a performance
-degradation. It is not recommended to turn the query resutls cache on in workloads that only
-modify data, or that modify data more often than reading it. Turning on the cache
-will also provide no benefit if queries are very diverse and do not repeat often.
-In read-only or read-mostly workloads, the cache will be beneficial if the same
-queries are repeated lots of times.
-
-In general, the query results cache will provide the biggest improvements for queries with
-small result sets that take long to calculate. If query results are very big and
-most of the query time is spent on copying the result from the cache to the client,
-then the cache will not provide much benefit.
-
-
-Global configuration
---------------------
-
-The query results cache can be configured at server start using the configuration parameter
-`--query.cache-mode`. This will set the cache mode according to the descriptions
-above.
-
-After the server is started, the cache mode can be changed at runtime as follows:
-
-```
-require("@arangodb/aql/cache").properties({ mode: "on" });
-```
-
-The maximum number of cached results in the cache for each database can be configured
-at server start using the following configuration parameters:
-
-* `--query.cache-entries`: maximum number of results in query result cache per database
-* `--query.cache-entries-max-size`: maximum cumulated size of results in query result cache per database
-* `--query.cache-entry-max-size`: maximum size of an invidiual result entry in query result cache
-* `--query.cache-include-system-collections`: whether or not to include system collection queries in the query result cache
-
-These parameters can be used to put an upper bound on the number and size of query
-results in each database's query cache and thus restrict the cache's memory consumption.
-
-These value can also be adjusted at runtime as follows:
-
-```
-require("@arangodb/aql/cache").properties({
- maxResults: 200,
- maxResultsSize: 8 * 1024 * 1024,
- maxEntrySize: 1024 * 1024,
- includeSystem: false
-});
-```
-
-The above will limit the number of cached results in the query results cache to 200
-results per database, and to 8 MB cumulated query result size per database. The maximum
-size of each query cache entry is restricted to 8MB. Queries that involve system
-collections are excluded from caching.
-
-
-Per-query configuration
------------------------
-
-When a query is sent to the server for execution and the cache is set to `on` or `demand`,
-the query executor will look into the query's `cache` attribute. If the query cache mode is
-`on`, then not setting this attribute or setting it to anything but `false` will make the
-query executor consult the query cache. If the query cache mode is `demand`, then setting
-the `cache` attribute to `true` will make the executor look for the query in the query cache.
-When the query cache mode is `off`, the executor will not look for the query in the cache.
-
-The `cache` attribute can be set as follows via the `db._createStatement()` function:
-
-```
-var stmt = db._createStatement({
- query: "FOR doc IN users LIMIT 5 RETURN doc",
- cache: true /* cache attribute set here */
-});
-
-stmt.execute();
-```
-
-When using the `db._query()` function, the `cache` attribute can be set as follows:
-
-```
-db._query({
- query: "FOR doc IN users LIMIT 5 RETURN doc",
- cache: true /* cache attribute set here */
-});
-```
-
-The `cache` attribute can be set via the HTTP REST API `POST /_api/cursor`, too.
-
-Each query result returned will contain a `cached` attribute. This will be set to `true`
-if the result was retrieved from the query cache, and `false` otherwise. Clients can use
-this attribute to check if a specific query was served from the cache or not.
-
-
-Query results cache inspection
-------------------------------
-
-The contents of the query results cache can be checked at runtime using the cache's
-`toArray()` function:
-
-```
-require("@arangodb/aql/cache").toArray();
-```
-
-This will return a list of all query results stored in the current database's query
-results cache.
-
-The query results cache for the current database can be cleared at runtime using the
-cache's `clear` function:
-
-```
-require("@arangodb/aql/cache").clear();
-```
-
-
-Restrictions
-------------
-
-Query results that are returned from the query results cache may contain execution statistics
-stemming from the initial, uncached query execution. This means for a cached query results,
-the *extra.stats* attribute may contain stale data, especially in terms of the *executionTime*
-and *profile* attribute values.
-
diff --git a/Documentation/Books/AQL/ExecutionAndPerformance/QueryProfiler.md b/Documentation/Books/AQL/ExecutionAndPerformance/QueryProfiler.md
deleted file mode 100644
index 84c1702abb9b..000000000000
--- a/Documentation/Books/AQL/ExecutionAndPerformance/QueryProfiler.md
+++ /dev/null
@@ -1,221 +0,0 @@
-Profiling and Hand-Optimizing AQL queries
-=========================================
-
-To give you more insight into your query ArangoDB allows to execute your query
-with special instrumentation code enabled. This will then print a query plan
-with detailed execution statistics.
-
-To use this in an interactive fashion on the shell you can use
-`db._profileQuery(..)` in _arangosh_. Alternatively, there is a button
-_Profile_ in the Query tab of the web interface.
-
-The printed execution plan then contains three additional columns:
-
-- **Call**: The number of times this query stage was executed
-- **Items**: The number of temporary result rows at this stage
-- **Runtime**: The total time spent in this stage
-
-Below the execution plan there are additional sections for the overall runtime
-statistics and the query profile.
-
-Example: Simple AQL query
--------------------------
-
-Assuming we got a collection named `acollection` and insert 10000 documents
-via `for (let i=0; i < 10000;i++) db.acollection.insert({value:i})`.
-Then a simple query filtering for `value < 10` will return 10 results:
-
-@startDocuBlockInline 01_workWithAQL_profileQuerySimple
-@EXAMPLE_ARANGOSH_OUTPUT{01_workWithAQL_profileQuerySimple}
-~db._drop("acollection");
-~db._create('acollection');
-~for (let i=0; i < 10000; i++) { db.acollection.insert({value:i}); }
-|db._profileQuery(`
-|FOR doc IN acollection
-| FILTER doc.value < 10
-| RETURN doc`, {}, {colors: false}
-);
-~db._drop("acollection");
-@END_EXAMPLE_ARANGOSH_OUTPUT
-@endDocuBlock 01_workWithAQL_profileQuerySimple
-
-An AQL query is essentially executed in a pipeline that chains together different
-functional execution blocks. Each block gets the input rows from the parent above
-it, does some processing and then outputs a certain number of output rows.
-
-Without any detailed insight into the query execution it is impossible to tell
-how many results each pipeline-block had to work on and how long this took.
-By executing the query with the query profiler (`db._profileQuery()` or via
-the _Profile_ button in the web interface) you can check exactly how much work
-each stage had to do.
-
-Without any indexes this query should have to perform the following operations:
-
-1. Perfom a full collection scan via a _EnumerateCollectionNode_ and outputting
- a row containing the document in `doc`.
-2. Calculate the boolean expression `LET #1 = doc.value < 10` from all inputs
- via a _CalculationNode_
-3. Filter out all input rows where `#1` is false via the _FilterNode_
-4. Put the `doc` variable of the remaining rows into the result set via
- the _ResultNode_
-
-The _EnumerateCollectionNode_ processed and returned all 10k rows (documents),
-as did the _CalculationNode_. Because the AQL execution engine also uses an
-internal batch size of 1000 these blocks were also called 100 times each.
-The _FilterNode_ as well as the _ReturnNode_ however only ever returned 10 rows
-and only had to be called once, because the result size fits within a single batch.
-
-Let us add a skiplist index on `value` to speed up the query:
-
-```js
-db.acollection.ensureIndex({type:"skiplist", fields:["value"]});
-```
-
-@startDocuBlockInline 02_workWithAQL_profileQuerySimpleIndex
-@EXAMPLE_ARANGOSH_OUTPUT{02_workWithAQL_profileQuerySimpleIndex}
-~db._create('acollection');
-~db.acollection.ensureIndex({type:"skiplist", fields:["value"]});
-~for (let i=0; i < 10000; i++) { db.acollection.insert({value:i}); }
-|db._profileQuery(`
-|FOR doc IN acollection
-| FILTER doc.value < 10
-| RETURN doc`, {}, {colors: false}
-);
-~db._drop("acollection");
-@END_EXAMPLE_ARANGOSH_OUTPUT
-@endDocuBlock 02_workWithAQL_profileQuerySimpleIndex
-
-This results in replacing the collection scan and filter block with an
-`IndexNode`. The execution pipeleine of the AQL query has become much shorter.
-Also the number of rows processed by each pipeline block is only 10, because
-we no longer need to look at all documents.
-
-Example: AQL with Subquery
---------------------------
-
-Let us consider a query containing a subquery:
-
-@startDocuBlockInline 03_workWithAQL_profileQuerySubquery
-@EXAMPLE_ARANGOSH_OUTPUT{03_workWithAQL_profileQuerySubquery}
-~db._create('acollection');
-~db.acollection.ensureIndex({type:"skiplist", fields:["value"]});
-~for (let i=0; i < 10000;i++) { db.acollection.insert({value:i}); }
-|db._profileQuery(`
-|LET list = (FOR doc in acollection FILTER doc.value > 90 RETURN doc)
-|FOR a IN list
-| FILTER a.value < 91
-| RETURN a`, {}, {colors: false, optimizer:{rules:["-all"]}}
-);
-~db._drop("acollection");
-@END_EXAMPLE_ARANGOSH_OUTPUT
-@endDocuBlock 03_workWithAQL_profileQuerySubquery
-
-The resulting query profile contains a _SubqueryNode_ which has the runtime of
-all its children combined.
-
-Actually, we cheated a little. The optimizer would have completely removed the
-subquery if it had not been deactivated (`rules:["-all"]`). The optimimized
-version would take longer in the "optimizing plan" stage, but should perform
-better with a lot of results.
-
-Example: AQL with Aggregation
------------------------------
-
-Let us try a more advanced query, using a [COLLECT](../Operations/Collect.md)
-statement. Assume we have a user collection with each document having a city,
-a username and an age attribute.
-
-The following query gets us all age groups in buckets (0-9, 10-19, 20-29, ...):
-
-@startDocuBlockInline 04_workWithAQL_profileQueryAggregation
-@EXAMPLE_ARANGOSH_OUTPUT{04_workWithAQL_profileQueryAggregation}
-~db._create('myusers');
-~["berlin", "paris", "cologne", "munich", "london"].forEach((c) => { ["peter", "david", "simon", "lars"].forEach( n => db.myusers.insert({ city : c, name : n, age: Math.floor(Math.random() * 75) }) ) });
-|db._profileQuery(`
-|FOR u IN myusers
-| COLLECT ageGroup = FLOOR(u.age / 10) * 10
-| AGGREGATE minAge = MIN(u.age), maxAge = MAX(u.age), len = LENGTH(u)
-| RETURN {
-| ageGroup,
-| minAge,
-| maxAge,
-| len
-| }`, {}, {colors: false}
-);
-~db._drop("myusers")
-@END_EXAMPLE_ARANGOSH_OUTPUT
-@endDocuBlock 04_workWithAQL_profileQueryAggregation
-
-Without any indexes this query should have to perform the following operations:
-
-1. Perfom a full collection scan via a _EnumerateCollectionNode_ and outputing
- a row containg the document in `doc`.
-2. Compute the expression `LET #1 = FLOOR(u.age / 10) * 10` for all inputs via
- a _CalculationNode_
-3. Perform the aggregations via the _CollectNode_
-4. Sort the resulting aggregated rows via a _SortNode_
-5. Build a result value via another _CalculationNode_
-6. Put the result variable into the result set via the _ResultNode_
-
-Like within the example above, you can see that after the _CalculationNode_
-stage, from the originally 20 rows only a handful remained.
-
-Typical AQL Performance Mistakes
---------------------------------
-
-With the new query profiler you should be able to spot typical performance
-mistakes that we see quite often:
-
-- Not employing indexes to speed up queries with common filter expressions
-- Not using shard keys in filter statements, when it is known
- (only a cluster problem)
-- Using subqueries to calculate an intermediary result, but only using a
- few results
-
-Bad example:
-
-```js
-LET vertices = (
- FOR v IN 1..2 ANY @startVertex GRAPH 'my_graph'
- // <-- add a LIMIT 1 here
- RETURN v
-)
-FOR doc IN collection
- FILTER doc.value == vertices[0].value
- RETURN doc
-```
-
-Adding a `LIMIT 1` into the subquery should result in better performance,
-because the traversal can be stopped after the first result instead of
-computing all paths.
-
-Another mistake is to start a graph traversal from the wrong side
-(if both ends are known).
-
-Assume we have two vertex collections _users_ and _products_ as well as an
-edge collection _purchased_. The graph model looks like this:
-`(users) <--[purchased]--> (products)`, i.e. every user is connected with an
-edge in _pruchased_ to zero or more _products_.
-
-If we want to know all users that have purchased the product _playstation_
-as well as produts of `type` _legwarmer_ we could use this query:
-
-```js
-FOR prod IN products
- FILTER prod.type == 'legwarmer'
- FOR v,e,p IN 2..2 OUTBOUND prod purchased
- FILTER v._key == 'playstation' // <-- last vertex of the path
- RETURN p.vertices[1] // <-- the user
-```
-
-This query first finds all legwarmer products and then performs a traversal
-for each of them. But we could also inverse the traversal by starting of with
-the known _playstation_ product. This way we only need a single traversal
-to achieve the same result:
-
-```js
-FOR v,e,p IN 2..2 OUTBOUND 'product/playstation' purchased
- FILTER v.type == 'legwarmer' // <-- last vertex of the path
- RETURN p.vertices[1] // <-- the user
-```
-
diff --git a/Documentation/Books/AQL/ExecutionAndPerformance/QueryStatistics.md b/Documentation/Books/AQL/ExecutionAndPerformance/QueryStatistics.md
deleted file mode 100644
index 7cf74d2d2ad1..000000000000
--- a/Documentation/Books/AQL/ExecutionAndPerformance/QueryStatistics.md
+++ /dev/null
@@ -1,62 +0,0 @@
-Query statistics
-================
-
-A query that has been executed will always return execution statistics. Execution statistics
-can be retrieved by calling `getExtra()` on the cursor. The statistics are returned in the
-return value's `stats` attribute:
-
- @startDocuBlockInline 06_workWithAQL_statementsExtra
- @EXAMPLE_ARANGOSH_OUTPUT{06_workWithAQL_statementsExtra}
- |db._query(`
- | FOR i IN 1..@count INSERT
- | { _key: CONCAT('anothertest', TO_STRING(i)) }
- | INTO mycollection`,
- | {count: 100},
- | {},
- | {fullCount: true}
- ).getExtra();
- |db._query({
- | "query": `FOR i IN 200..@count INSERT
- | { _key: CONCAT('anothertest', TO_STRING(i)) }
- | INTO mycollection`,
- | "bindVars": {count: 300},
- | "options": { fullCount: true}
- }).getExtra();
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock 06_workWithAQL_statementsExtra
-
-The meaning of the statistics attributes is as follows:
-
-* *writesExecuted*: the total number of data-modification operations successfully executed.
- This is equivalent to the number of documents created, updated or removed by `INSERT`,
- `UPDATE`, `REPLACE` or `REMOVE` operations.
-* *writesIgnored*: the total number of data-modification operations that were unsuccessful,
- but have been ignored because of query option `ignoreErrors`.
-* *scannedFull*: the total number of documents iterated over when scanning a collection
- without an index. Documents scanned by subqueries will be included in the result, but not
- no operations triggered by built-in or user-defined AQL functions.
-* *scannedIndex*: the total number of documents iterated over when scanning a collection using
- an index. Documents scanned by subqueries will be included in the result, but not
- no operations triggered by built-in or user-defined AQL functions.
-* *filtered*: the total number of documents that were removed after executing a filter condition
- in a `FilterNode`. Note that `IndexRangeNode`s can also filter documents by selecting only
- the required index range from a collection, and the `filtered` value only indicates how much
- filtering was done by `FilterNode`s.
-* *fullCount*: the total number of documents that matched the search condition if the query's
- final top-level `LIMIT` statement were not present.
- This attribute may only be returned if the `fullCount` option was set when starting the
- query and will only contain a sensible value if the query contained a `LIMIT` operation on
- the top level.
-* *peakMemoryUsage*: the maximum memory usage of the query while it was running. In a cluster,
- the memory accounting is done per shard, and the memory usage reported is the peak
- memory usage value from the individual shards.
- Note that to keep things light-weight, the per-query memory usage is tracked on a relatively
- high level, not including any memory allocator overhead nor any memory used for temporary
- results calculations (e.g. memory allocated/deallocated inside AQL expressions and function
- calls). The attribute *peakMemoryUsage* is available from v3.4.3.
-* *nodes*: _(optional)_ when the query was executed with the option `profile` set to at least *2*,
- then this value contains runtime statistics per query execution node. This field contains the
- node id (in `id`), the number of calls to this node `calls` and the number of items returned
- by this node `items` (Items are the temporary results returned at this stage). You can correlate
- this statistics with the `plan` returned in `extra`. For a human readable output you can execute
- `db._profileQuery(, )` in the arangosh.
diff --git a/Documentation/Books/AQL/ExecutionAndPerformance/README.md b/Documentation/Books/AQL/ExecutionAndPerformance/README.md
deleted file mode 100644
index d053099fac31..000000000000
--- a/Documentation/Books/AQL/ExecutionAndPerformance/README.md
+++ /dev/null
@@ -1,22 +0,0 @@
-AQL Execution and Performance
-=============================
-
-This chapter describes AQL features related to query executions and query performance.
-
-* [Execution statistics](QueryStatistics.md): A query that has been executed also returns statistics about its execution.
-
-* [Query parsing](ParsingQueries.md): Clients can use ArangoDB to check if a given AQL query is syntactically valid.
-
-* [Query execution plan](ExplainingQueries.md): If it is unclear how a given query will perform, clients can retrieve a query's execution plan from the AQL query optimizer without actually executing the query; this is called explaining.
-
-* [The AQL query optimizer](Optimizer.md): AQL queries are sent through an optimizer before execution. The task of the optimizer is to create an initial execution plan for the query, look for optimization opportunities and apply them.
-
-* [Query Profiling](QueryProfiler.md): Sometimes a query does not perform, but it is unclear which
-parts of the plan are responsible. The query-profiler can show you execution statistics for every
-stage of the query execution.
-
-* [The AQL query result cache](QueryCache.md): an optional query results cache can be used to avoid repeated calculation of the same query results.
-
-Be sure to check out the
-[ArangoDB Performance Course](https://www.arangodb.com/arangodb-performance-course/)
-for freshers as well.
diff --git a/Documentation/Books/AQL/Extending/Conventions.md b/Documentation/Books/AQL/Extending/Conventions.md
deleted file mode 100644
index 31e2fe83a18b..000000000000
--- a/Documentation/Books/AQL/Extending/Conventions.md
+++ /dev/null
@@ -1,119 +0,0 @@
-Conventions
-===========
-
-Naming
-------
-
-Built-in AQL functions that are shipped with ArangoDB reside in the namespace
-*_aql*, which is also the default namespace to look in if an unqualified
-function name is found.
-
-To refer to a user-defined AQL function, the function name must be fully
-qualified to also include the user-defined namespace. The *::* symbol is used
-as the namespace separator. Users can create a multi-level hierarchy of function
-groups if required:
-
-```js
-MYGROUP::MYFUNC()
-MYFUNCTIONS::MATH::RANDOM()
-```
-
-**Note**: Adding user functions to the *_aql* namespace is disallowed and will
-fail.
-
-User function names are case-insensitive like all function names in AQL.
-
-Variables and side effects
---------------------------
-
-User functions can take any number of input arguments and should
-provide one result via a `return` statement. User functions should be kept
-purely functional and thus free of side effects and state, and state modification.
-
-Modification of global variables is unsupported, as is reading or changing
-the data of any collection from inside an AQL user function.
-
-User function code is late-bound, and may thus not rely on any variables
-that existed at the time of declaration. If user function code requires
-access to any external data, it must take care to set up the data by
-itself.
-
-All AQL user function-specific variables should be introduced with the `var`
-keyword in order to not accidentally access already defined variables from
-outer scopes. Not using the `var` keyword for own variables may cause side
-effects when executing the function.
-
-Here is an example that may modify outer scope variables `i` and `name`,
-making the function **not** side-effect free:
-
-```js
-function (values) {
- for (i = 0; i < values.length; ++i) {
- name = values[i];
- if (name === "foo") {
- return i;
- }
- }
- return null;
-}
-```
-
-The above function can be made free of side effects by using the `var` or
-`let` keywords, so the variables become function-local variables:
-
-```js
-function (values) {
- for (var i = 0; i < values.length; ++i) {
- var name = values[i];
- if (name === "foo") {
- return i;
- }
- }
- return null;
-}
-```
-
-Input parameters
-----------------
-
-In order to return a result, a user function should use a `return` instruction
-rather than modifying its input parameters.
-
-AQL user functions are allowed to modify their input parameters for input
-parameters that are null, boolean, numeric or string values. Modifying these
-input parameter types inside a user function should be free of side effects.
-However, user functions should not modify input parameters if the parameters are
-arrays or objects and as such passed by reference, as that may modify variables
-and state outside of the user function itself.
-
-Return values
--------------
-
-User functions must only return primitive types (i.e. *null*, boolean
-values, numeric values, string values) or aggregate types (arrays or
-objects) composed of these types.
-Returning any other JavaScript object type (Function, Date, RegExp etc.) from
-a user function may lead to undefined behavior and should be avoided.
-
-Enforcing strict mode
----------------------
-
-By default, any user function code will be executed in *sloppy mode*, not
-*strict* or *strong mode*. In order to make a user function run in strict
-mode, use `"use strict"` explicitly inside the user function, e.g.:
-
-```js
-function (values) {
- "use strict"
-
- for (var i = 0; i < values.length; ++i) {
- var name = values[i];
- if (name === "foo") {
- return i;
- }
- }
- return null;
-}
-```
-
-Any violation of the strict mode will trigger a runtime error.
diff --git a/Documentation/Books/AQL/Extending/Functions.md b/Documentation/Books/AQL/Extending/Functions.md
deleted file mode 100644
index 58c593739a66..000000000000
--- a/Documentation/Books/AQL/Extending/Functions.md
+++ /dev/null
@@ -1,221 +0,0 @@
-Registering and Unregistering User Functions
-============================================
-
-User-defined functions (UDFs) can be registered in the selected database
-using the *aqlfunctions* object as follows:
-
-```js
-var aqlfunctions = require("@arangodb/aql/functions");
-```
-
-To register a function, the fully qualified function name plus the
-function code must be specified. This can easily be done in
-[arangosh](../../Manual/Programs/Arangosh/index.html). The
-[HTTP Interface](../../HTTP/AqlUserFunctions/index.html) also offers
-User Functions management.
-
-In a cluster setup, make sure to connect to a coordinator to manage the UDFs.
-
-Documents in the *_aqlfunctions* collection (or any other system collection)
-should not be accessed directly, but only via the dedicated interfaces.
-Otherwise you might see caching issues or accidentally break something.
-The interfaces will ensure the correct format of the documents and invalidate
-the UDF cache.
-
-Registering an AQL user function
---------------------------------
-
-For testing, it may be sufficient to directly type the function code in the shell.
-To manage more complex code, you may write it in the code editor of your choice
-and save it as file. For example:
-
-```js
-/* path/to/file.js */
-'use strict';
-
-function greeting(name) {
- if (name === undefined) {
- name = "World";
- }
- return `Hello ${name}!`;
-}
-
-module.exports = greeting;
-```
-
-Then require it in the shell in order to register a user-defined function:
-
-```
-arangosh> var func = require("path/to/file.js");
-arangosh> aqlfunctions.register("HUMAN::GREETING", func, true);
-```
-
-Note that a return value of *false* means that the function `HUMAN::GREETING`
-was newly created, and not that it failed to register. *true* is returned
-if a function of that name existed before and was just updated.
-
-`aqlfunctions.register(name, code, isDeterministic)`
-
-Registers an AQL user function, identified by a fully qualified function
-name. The function code in *code* must be specified as a JavaScript
-function or a string representation of a JavaScript function.
-If the function code in *code* is passed as a string, it is required that
-the string evaluates to a JavaScript function definition.
-
-If a function identified by *name* already exists, the previous function
-definition will be updated. Please also make sure that the function code
-does not violate the [Conventions](Conventions.md) for AQL
-functions.
-
-The *isDeterministic* attribute can be used to specify whether the
-function results are fully deterministic (i.e. depend solely on the input
-and are the same for repeated calls with the same input values). It is not
-used at the moment but may be used for optimizations later.
-
-The registered function is stored in the selected database's system
-collection *_aqlfunctions*.
-
-The function returns *true* when it updates/replaces an existing AQL
-function of the same name, and *false* otherwise. It will throw an exception
-when it detects syntactically invalid function code.
-
-
-**Examples**
-
-
-```js
-require("@arangodb/aql/functions").register("MYFUNCTIONS::TEMPERATURE::CELSIUSTOFAHRENHEIT",
-function (celsius) {
- return celsius * 1.8 + 32;
-});
-```
-
-The function code will not be executed in *strict mode* or *strong mode* by
-default. In order to make a user function being run in strict mode, use
-`use strict` explicitly, e.g.:
-
-```js
-require("@arangodb/aql/functions").register("MYFUNCTIONS::TEMPERATURE::CELSIUSTOFAHRENHEIT",
-function (celsius) {
- "use strict";
- return celsius * 1.8 + 32;
-});
-```
-
-You can access the name under which the AQL function is registered by accessing
-the `name` property of `this` inside the JavaScript code:
-
-```js
-require("@arangodb/aql/functions").register("MYFUNCTIONS::TEMPERATURE::CELSIUSTOFAHRENHEIT",
-function (celsius) {
- "use strict";
- if (typeof celsius === "undefined") {
- const error = require("@arangodb").errors.ERROR_QUERY_FUNCTION_ARGUMENT_NUMBER_MISMATCH;
- AQL_WARNING(error.code, require("util").format(error.message, this.name, 1, 1));
- }
- return celsius * 1.8 + 32;
-});
-```
-
-`AQL_WARNING()` is automatically available to the code of user-defined
-functions. The error code and message is retrieved via `@arangodb` module.
-The *argument number mismatch* message has placeholders, which we can substitute
-using [format()](http://nodejs.org/api/util.html):
-
-```
-invalid number of arguments for function '%s()', expected number of arguments: minimum: %d, maximum: %d
-```
-
-In the example above, `%s` is replaced by `this.name` (the AQL function name),
-and both `%d` placeholders by `1` (number of expected arguments). If you call
-the function without an argument, you will see this:
-
-```
-arangosh> db._query("RETURN MYFUNCTIONS::TEMPERATURE::CELSIUSTOFAHRENHEIT()")
-[object ArangoQueryCursor, count: 1, hasMore: false, warning: 1541 - invalid
-number of arguments for function 'MYFUNCTIONS::TEMPERATURE::CELSIUSTOFAHRENHEIT()',
-expected number of arguments: minimum: 1, maximum: 1]
-
-[
- null
-]
-```
-
-Deleting an existing AQL user function
---------------------------------------
-
-`aqlfunctions.unregister(name)`
-
-Unregisters an existing AQL user function, identified by the fully qualified
-function name.
-
-Trying to unregister a function that does not exist will result in an
-exception.
-
-
-**Examples**
-
-
-```js
-require("@arangodb/aql/functions").unregister("MYFUNCTIONS::TEMPERATURE::CELSIUSTOFAHRENHEIT");
-```
-
-
-Unregister Group
-----------------
-
-
-
-
-delete a group of AQL user functions
-`aqlfunctions.unregisterGroup(prefix)`
-
-Unregisters a group of AQL user function, identified by a common function
-group prefix.
-
-This will return the number of functions unregistered.
-
-
-**Examples**
-
-
-```js
-require("@arangodb/aql/functions").unregisterGroup("MYFUNCTIONS::TEMPERATURE");
-
-require("@arangodb/aql/functions").unregisterGroup("MYFUNCTIONS");
-```
-
-
-Listing all AQL user functions
-------------------------------
-
-`aqlfunctions.toArray()`
-
-Returns all previously registered AQL user functions, with their fully
-qualified names and function code.
-
-The result may optionally be restricted to a specified group of functions
-by specifying a group prefix:
-
-`aqlfunctions.toArray(prefix)`
-
-
-**Examples**
-
-To list all available user functions:
-
-```js
-require("@arangodb/aql/functions").toArray();
-```
-
-To list all available user functions in the *MYFUNCTIONS* namespace:
-
-```js
-require("@arangodb/aql/functions").toArray("MYFUNCTIONS");
-```
-
-To list all available user functions in the *MYFUNCTIONS::TEMPERATURE* namespace:
-
-```js
-require("@arangodb/aql/functions").toArray("MYFUNCTIONS::TEMPERATURE");
-```
diff --git a/Documentation/Books/AQL/Extending/README.md b/Documentation/Books/AQL/Extending/README.md
deleted file mode 100644
index 8558a59d9b48..000000000000
--- a/Documentation/Books/AQL/Extending/README.md
+++ /dev/null
@@ -1,73 +0,0 @@
-Extending AQL with User Functions
-=================================
-
-AQL comes with a [built-in set of functions](../Functions/README.md), but it is
-not a fully-featured programming language.
-
-To add missing functionality or to simplify queries, users may add their own
-functions to AQL in the selected database. These functions are written in
-JavaScript, and are deployed via an API; see [Registering Functions](Functions.md).
-
-In order to avoid conflicts with existing or future built-in function names,
-all user defined functions (**UDF**) have to be put into separate namespaces.
-Invoking a UDF is then possible by referring to the fully-qualified function name,
-which includes the namespace, too; see [Conventions](Conventions.md).
-
-Technical Details
------------------
-
-### Known Limitations
-
-{% hint 'warning' %}
-UDFs can have serious effects on the performance of your queries and the resource
-usage in ArangoDB. Especially in cluster setups they should not be used against
-much data, because this data will need to be sent over the network back and forth
-between _DBservers_ and _Coordinators_, potentially adding a lot of latency.
-This can be mitigated by very selective `FILTER`s before calls to UDFs.
-{% endhint %}
-
-Since the optimizer doesn't know anything about the nature of your function,
-**the optimizer can't use indices for UDFs**. So you should never lean on a UDF
-as the primary criterion for a `FILTER` statement to reduce your query result set.
-Instead, put a another `FILTER` statement in front of it. You should make sure
-that this [**`FILTER` statement** is effective](../ExecutionAndPerformance/Optimizer.md)
-to reduce the query result before passing it to your UDF.
-
-Rule of thumb is, the closer the UDF is to your final `RETURN` statement
-(or maybe even inside it), the better.
-
-When used in clusters, UDFs are always executed on the
-[coordinator](../../Manual/Architecture/DeploymentModes/Cluster/Architecture.html).
-
-As UDFs are written in JavaScript, each query that executes a UDF will acquire
-one V8 context to execute the UDFs in it. V8 contexts can be re-used across subsequent
-queries, but when UDF-invoking queries run in parallel, they will each require a
-dedicated V8 context.
-
-Using UDFs in clusters may thus result in a higher resource allocation
-in terms of used V8 contexts and server threads. If you run out
-of these resources, your query may abort with a
-[**cluster backend unavailable**](../../Manual/Appendix/ErrorCodes.html) error.
-
-To overcome these mentioned limitations, you may want to increase the
-[number of available V8 contexts](../../Manual/Programs/Arangod/Javascript.html#v8-contexts)
-(at the expense of increased memory usage), and the
-[number of available server threads](../../Manual/Programs/Arangod/Server.html#server-threads).
-
-### Deployment Details
-
-Internally, UDFs are stored in a system collection named `_aqlfunctions`
-of the selected database. When an AQL statement refers to such a UDF,
-it is loaded from that collection. The UDFs will be exclusively
-available for queries in that particular database.
-
-Since the coordinator doesn't have own local collections, the `_aqlfunctions`
-collection is sharded across the cluster. Therefore (as usual), it has to be
-accessed through a coordinator - you mustn't talk to the shards directly.
-Once it is in the `_aqlfunctions` collection, it is available on all
-coordinators without additional effort.
-
-Keep in mind that system collections are excluded from dumps created with
-[arangodump](../../Manual/Programs/Arangodump/index.html) by default.
-To include AQL UDF in a dump, the dump needs to be started with
-the option *--include-system-collections true*.
diff --git a/Documentation/Books/AQL/FOOTER.html b/Documentation/Books/AQL/FOOTER.html
deleted file mode 100644
index 239869bfaf6a..000000000000
--- a/Documentation/Books/AQL/FOOTER.html
+++ /dev/null
@@ -1 +0,0 @@
-© ArangoDB - the native multi-model NoSQL database
\ No newline at end of file
diff --git a/Documentation/Books/AQL/Functions/Array.md b/Documentation/Books/AQL/Functions/Array.md
deleted file mode 100644
index 07118118c66c..000000000000
--- a/Documentation/Books/AQL/Functions/Array.md
+++ /dev/null
@@ -1,743 +0,0 @@
-# Array functions
-
-AQL provides functions for higher-level array manipulation. Also see the
-[numeric functions](Numeric.md) for functions that work on number arrays.
-If you want to concatenate the elements of an array equivalent to `join()`
-in JavaScript, see [CONCAT()](String.md#concat) and
-[CONCAT_SEPARATOR()](String.md#concatseparator) in the string functions chapter.
-
-Apart from that, AQL also offers several language constructs:
-
-- simple [array access](../Fundamentals/DataTypes.md#arrays--lists) of individual elements,
-- [array operators](../Advanced/ArrayOperators.md) for array expansion and contraction,
- optionally with inline filter, limit and projection,
-- [array comparison operators](../Operators.md#array-comparison-operators) to compare
- each element in an array to a value or the elements of another array,
-- loop-based operations on arrays using [FOR](../Operations/For.md),
- [SORT](../Operations/Sort.md),
- [LIMIT](../Operations/Limit.md),
- as well as [COLLECT](../Operations/Collect.md) for grouping,
- which also offers efficient aggregation.
-
-## APPEND()
-
-`APPEND(anyArray, values, unique) → newArray`
-
-Add all elements of an array to another array. All values are added at the end of the
-array (right side).
-
-It can also be used to append a single element to an array. It is not necessary to wrap
-it in an array (unless it is an array itself). You may also use [PUSH()](#push) instead.
-
-- **anyArray** (array): array with elements of arbitrary type
-- **values** (array|any): array, whose elements shall be added to *anyArray*
-- **unique** (bool, *optional*): if set to *true*, only those *values* will be added
- that are not already contained in *anyArray*. The default is *false*.
-- returns **newArray** (array): the modified array
-
-**Examples**
-
-@startDocuBlockInline aqlArrayAppend_1
-@EXAMPLE_AQL{aqlArrayAppend_1}
-RETURN APPEND([ 1, 2, 3 ], [ 5, 6, 9 ])
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayAppend_1
-
-@startDocuBlockInline aqlArrayAppend_2
-@EXAMPLE_AQL{aqlArrayAppend_2}
-RETURN APPEND([ 1, 2, 3 ], [ 3, 4, 5, 2, 9 ], true)
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayAppend_2
-
-## CONTAINS_ARRAY()
-
-This is an alias for [POSITION()](#position).
-
-
-## COUNT()
-
-This is an alias for [LENGTH()](#length).
-
-## COUNT_DISTINCT()
-
-`COUNT_DISTINCT(anyArray) → number`
-
-Get the number of distinct elements in an array.
-
-- **anyArray** (array): array with elements of arbitrary type
-- returns **number**: the number of distinct elements in *anyArray*.
-
-**Examples**
-
-@startDocuBlockInline aqlArrayCountDistinct_1
-@EXAMPLE_AQL{aqlArrayCountDistinct_1}
-RETURN COUNT_DISTINCT([ 1, 2, 3 ])
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayCountDistinct_1
-
-@startDocuBlockInline aqlArrayCountDistinct_2
-@EXAMPLE_AQL{aqlArrayCountDistinct_2}
-RETURN COUNT_DISTINCT([ "yes", "no", "yes", "sauron", "no", "yes" ])
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayCountDistinct_2
-
-## COUNT_UNIQUE()
-
-This is an alias for [COUNT_DISTINCT()](#countdistinct).
-
-## FIRST()
-
-`FIRST(anyArray) → firstElement`
-
-Get the first element of an array. It is the same as `anyArray[0]`.
-
-- **anyArray** (array): array with elements of arbitrary type
-- returns **firstElement** (any|null): the first element of *anyArray*, or *null* if
- the array is empty.
-
-**Examples**
-
-@startDocuBlockInline aqlArrayFirst_1
-@EXAMPLE_AQL{aqlArrayFirst_1}
-RETURN FIRST([ 1, 2, 3 ])
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayFirst_1
-
-@startDocuBlockInline aqlArrayFirst_2
-@EXAMPLE_AQL{aqlArrayFirst_2}
-RETURN FIRST([])
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayFirst_2
-
-## FLATTEN()
-
-`FLATTEN(anyArray, depth) → flatArray`
-
-Turn an array of arrays into a flat array. All array elements in *array* will be
-expanded in the result array. Non-array elements are added as they are. The function
-will recurse into sub-arrays up to the specified depth. Duplicates will not be removed.
-
-Also see [array contraction](../Advanced/ArrayOperators.md#array-contraction).
-
-- **array** (array): array with elements of arbitrary type, including nested arrays
-- **depth** (number, *optional*): flatten up to this many levels, the default is 1
-- returns **flatArray** (array): a flattened array
-
-**Examples**
-
-@startDocuBlockInline aqlArrayFlatten_1
-@EXAMPLE_AQL{aqlArrayFlatten_1}
-RETURN FLATTEN( [ 1, 2, [ 3, 4 ], 5, [ 6, 7 ], [ 8, [ 9, 10 ] ] ] )
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayFlatten_1
-
-To fully flatten the example array, use a *depth* of 2:
-
-@startDocuBlockInline aqlArrayFlatten_2
-@EXAMPLE_AQL{aqlArrayFlatten_2}
-RETURN FLATTEN( [ 1, 2, [ 3, 4 ], 5, [ 6, 7 ], [ 8, [ 9, 10 ] ] ], 2 )
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayFlatten_2
-
-## INTERSECTION()
-
-`INTERSECTION(array1, array2, ... arrayN) → newArray`
-
-Return the intersection of all arrays specified. The result is an array of values that
-occur in all arguments.
-
-Other set operations are [UNION()](#union),
-[MINUS()](#minus) and
-[OUTERSECTION()](#outersection).
-
-- **arrays** (array, *repeatable*): an arbitrary number of arrays as multiple arguments
- (at least 2)
-- returns **newArray** (array): a single array with only the elements, which exist in all
- provided arrays. The element order is random. Duplicates are removed.
-
-**Examples**
-
-@startDocuBlockInline aqlArrayIntersection_1
-@EXAMPLE_AQL{aqlArrayIntersection_1}
-RETURN INTERSECTION( [1,2,3,4,5], [2,3,4,5,6], [3,4,5,6,7] )
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayIntersection_1
-
-@startDocuBlockInline aqlArrayIntersection_2
-@EXAMPLE_AQL{aqlArrayIntersection_2}
-RETURN INTERSECTION( [2,4,6], [8,10,12], [14,16,18] )
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayIntersection_2
-
-## LAST()
-
-`LAST(anyArray) → lastElement`
-
-Get the last element of an array. It is the same as `anyArray[-1]`.
-
-- **anyArray** (array): array with elements of arbitrary type
-- returns **lastElement** (any|null): the last element of *anyArray* or *null* if the
- array is empty.
-
-**Example**
-
-@startDocuBlockInline aqlArrayLast_1
-@EXAMPLE_AQL{aqlArrayLast_1}
-RETURN LAST( [1,2,3,4,5] )
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayLast_1
-
-## LENGTH()
-
-`LENGTH(anyArray) → length`
-
-Determine the number of elements in an array.
-
-- **anyArray** (array): array with elements of arbitrary type
-- returns **length** (number): the number of array elements in *anyArray*.
-
-*LENGTH()* can also determine the [number of attribute keys](Document.md#length)
-of an object / document, the [amount of documents](Miscellaneous.md#length) in a
-collection and the [character length](String.md#length) of a string.
-
-|input|length|
-|---|---|
-|String|number of unicode characters|
-|Number|number of unicode characters that represent the number|
-|Array|number of elements|
-|Object|number of first level elements|
-|true|1|
-|false|0|
-|null|0|
-
-**Examples**
-
-@startDocuBlockInline aqlArrayLength_1
-@EXAMPLE_AQL{aqlArrayLength_1}
-RETURN LENGTH( "🥑" )
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayLength_1
-
-@startDocuBlockInline aqlArrayLength_2
-@EXAMPLE_AQL{aqlArrayLength_2}
-RETURN LENGTH( 1234 )
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayLength_2
-
-@startDocuBlockInline aqlArrayLength_3
-@EXAMPLE_AQL{aqlArrayLength_3}
-RETURN LENGTH( [1,2,3,4,5,6,7] )
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayLength_3
-
-@startDocuBlockInline aqlArrayLength_4
-@EXAMPLE_AQL{aqlArrayLength_4}
-RETURN LENGTH( [1,2,3,4,5,6,7] )
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayLength_4
-
-@startDocuBlockInline aqlArrayLength_5
-@EXAMPLE_AQL{aqlArrayLength_5}
-RETURN LENGTH( {a:1, b:2, c:3, d:4, e:{f:5,g:6}} )
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayLength_5
-
-## MINUS()
-
-`MINUS(array1, array2, ... arrayN) → newArray`
-
-Return the difference of all arrays specified.
-
-Other set operations are [UNION()](#union),
-[INTERSECTION()](#intersection) and
-[OUTERSECTION()](#outersection).
-
-- **arrays** (array, *repeatable*): an arbitrary number of arrays as multiple
- arguments (at least 2)
-- returns **newArray** (array): an array of values that occur in the first array,
- but not in any of the subsequent arrays. The order of the result array is undefined
- and should not be relied on. Duplicates will be removed.
-
-**Example**
-
-@startDocuBlockInline aqlArrayMinus_1
-@EXAMPLE_AQL{aqlArrayMinus_1}
-RETURN MINUS( [1,2,3,4], [3,4,5,6], [5,6,7,8] )
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayMinus_1
-
-## NTH()
-
-`NTH(anyArray, position) → nthElement`
-
-Get the element of an array at a given position. It is the same as `anyArray[position]`
-for positive positions, but does not support negative positions.
-
-- **anyArray** (array): array with elements of arbitrary type
-- **position** (number): position of desired element in array, positions start at 0
-- returns **nthElement** (any|null): the array element at the given *position*.
- If *position* is negative or beyond the upper bound of the array,
- then *null* will be returned.
-
-**Examples**
-
-@startDocuBlockInline aqlArrayNth_1
-@EXAMPLE_AQL{aqlArrayNth_1}
-RETURN NTH( [ "foo", "bar", "baz" ], 2 )
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayNth_1
-
-@startDocuBlockInline aqlArrayNth_2
-@EXAMPLE_AQL{aqlArrayNth_2}
-RETURN NTH( [ "foo", "bar", "baz" ], 3 )
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayNth_2
-
-@startDocuBlockInline aqlArrayNth_3
-@EXAMPLE_AQL{aqlArrayNth_3}
-RETURN NTH( [ "foo", "bar", "baz" ], -1 )
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayNth_3
-
-## OUTERSECTION()
-
-`OUTERSECTION(array1, array2, ... arrayN) → newArray`
-
-Return the values that occur only once across all arrays specified.
-
-Other set operations are [UNION()](#union),
-[MINUS()](#minus) and
-[INTERSECTION()](#intersection).
-
-- **arrays** (array, *repeatable*): an arbitrary number of arrays as multiple arguments
- (at least 2)
-- returns **newArray** (array): a single array with only the elements that exist only once
- across all provided arrays. The element order is random.
-
-**Example**
-
-@startDocuBlockInline aqlArrayOutersection_1
-@EXAMPLE_AQL{aqlArrayOutersection_1}
-RETURN OUTERSECTION( [ 1, 2, 3 ], [ 2, 3, 4 ], [ 3, 4, 5 ] )
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayOutersection_1
-
-## POP()
-
-`POP(anyArray) → newArray`
-
-Remove the last element of *array*.
-
-To append an element (right side), see [PUSH()](#push).
-To remove the first element, see [SHIFT()](#shift).
-To remove an element at an arbitrary position, see [REMOVE_NTH()](#removenth).
-
-- **anyArray** (array): an array with elements of arbitrary type
-- returns **newArray** (array): *anyArray* without the last element. If it's already
- empty or has only a single element left, an empty array is returned.
-
-**Examples**
-
-@startDocuBlockInline aqlArrayPop_1
-@EXAMPLE_AQL{aqlArrayPop_1}
-RETURN POP( [ 1, 2, 3, 4 ] )
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayPop_1
-
-@startDocuBlockInline aqlArrayPop_2
-@EXAMPLE_AQL{aqlArrayPop_2}
-RETURN POP( [ 1 ] )
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayPop_2
-
-## POSITION()
-
-`POSITION(anyArray, search, returnIndex) → position`
-
-Return whether *search* is contained in *array*. Optionally return the position.
-
-- **anyArray** (array): the haystack, an array with elements of arbitrary type
-- **search** (any): the needle, an element of arbitrary type
-- **returnIndex** (bool, *optional*): if set to *true*, the position of the match
- is returned instead of a boolean. The default is *false*.
-- returns **position** (bool|number): *true* if *search* is contained in *anyArray*,
- *false* otherwise. If *returnIndex* is enabled, the position of the match is
- returned (positions start at 0), or *-1* if it's not found.
-
-To determine if or at which position a string occurs in another string, see the
-[CONTAINS() string function](String.md#contains).
-
-**Examples**
-
-@startDocuBlockInline aqlArrayPosition_1
-@EXAMPLE_AQL{aqlArrayPosition_1}
-RETURN POSITION( [2,4,6,8], 4 )
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayPosition_1
-
-@startDocuBlockInline aqlArrayPosition_2
-@EXAMPLE_AQL{aqlArrayPosition_2}
-RETURN POSITION( [2,4,6,8], 4, true )
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayPosition_2
-
-## PUSH()
-
-`PUSH(anyArray, value, unique) → newArray`
-
-Append *value* to *anyArray* (right side).
-
-To remove the last element, see [POP()](#pop).
-To prepend a value (left side), see [UNSHIFT()](#unshift).
-To append multiple elements, see [APPEND()](#append).
-
-- **anyArray** (array): array with elements of arbitrary type
-- **value** (any): an element of arbitrary type
-- **unique** (bool): if set to *true*, then *value* is not added if already
- present in the array. The default is *false*.
-- returns **newArray** (array): *anyArray* with *value* added at the end
- (right side)
-
-Note: The *unique* flag only controls if *value* is added if it's already present
-in *anyArray*. Duplicate elements that already exist in *anyArray* will not be
-removed. To make an array unique, use the [UNIQUE()](#unique) function.
-
-**Examples**
-
-@startDocuBlockInline aqlArrayPush_1
-@EXAMPLE_AQL{aqlArrayPush_1}
-RETURN PUSH([ 1, 2, 3 ], 4)
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayPush_1
-
-@startDocuBlockInline aqlArrayPush_2
-@EXAMPLE_AQL{aqlArrayPush_2}
-RETURN PUSH([ 1, 2, 2, 3 ], 2, true)
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayPush_2
-
-## REMOVE_NTH()
-
-`REMOVE_NTH(anyArray, position) → newArray`
-
-Remove the element at *position* from the *anyArray*.
-
-To remove the first element, see [SHIFT()](#shift).
-To remove the last element, see [POP()](#pop).
-
-- **anyArray** (array): array with elements of arbitrary type
-- **position** (number): the position of the element to remove. Positions start
- at 0. Negative positions are supported, with -1 being the last array element.
- If *position* is out of bounds, the array is returned unmodified.
-- returns **newArray** (array): *anyArray* without the element at *position*
-
-**Examples**
-
-@startDocuBlockInline aqlArrayRemoveNth_1
-@EXAMPLE_AQL{aqlArrayRemoveNth_1}
-RETURN REMOVE_NTH( [ "a", "b", "c", "d", "e" ], 1 )
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayRemoveNth_1
-
-@startDocuBlockInline aqlArrayRemoveNth_2
-@EXAMPLE_AQL{aqlArrayRemoveNth_2}
-RETURN REMOVE_NTH( [ "a", "b", "c", "d", "e" ], -2 )
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayRemoveNth_2
-
-## REMOVE_VALUE()
-
-`REMOVE_VALUE(anyArray, value, limit) → newArray`
-
-Remove all occurrences of *value* in *anyArray*. Optionally with a *limit*
-to the number of removals.
-
-- **anyArray** (array): array with elements of arbitrary type
-- **value** (any): an element of arbitrary type
-- **limit** (number, *optional*): cap the number of removals to this value
-- returns **newArray** (array): *anyArray* with *value* removed
-
-**Examples**
-
-@startDocuBlockInline aqlArrayRemoveValue_1
-@EXAMPLE_AQL{aqlArrayRemoveValue_1}
-RETURN REMOVE_VALUE( [ "a", "b", "b", "a", "c" ], "a" )
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayRemoveValue_1
-
-@startDocuBlockInline aqlArrayRemoveValue_2
-@EXAMPLE_AQL{aqlArrayRemoveValue_2}
-RETURN REMOVE_VALUE( [ "a", "b", "b", "a", "c" ], "a", 1 )
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayRemoveValue_2
-
-## REMOVE_VALUES()
-
-`REMOVE_VALUES(anyArray, values) → newArray`
-
-Remove all occurrences of any of the *values* from *anyArray*.
-
-- **anyArray** (array): array with elements of arbitrary type
-- **values** (array): an array with elements of arbitrary type, that shall
- be removed from *anyArray*
-- returns **newArray** (array): *anyArray* with all individual *values* removed
-
-**Example**
-
-@startDocuBlockInline aqlArrayRemoveValues_1
-@EXAMPLE_AQL{aqlArrayRemoveValues_1}
-RETURN REMOVE_VALUES( [ "a", "a", "b", "c", "d", "e", "f" ], [ "a", "f", "d" ] )
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayRemoveValues_1
-
-## REVERSE()
-
-`REVERSE(anyArray) → reversedArray`
-
-Return an array with its elements reversed.
-
-- **anyArray** (array): array with elements of arbitrary type
-- returns **reversedArray** (array): a new array with all elements of *anyArray* in
- reversed order
-
-**Example**
-
-@startDocuBlockInline aqlArrayReverse_1
-@EXAMPLE_AQL{aqlArrayReverse_1}
-RETURN REVERSE ( [2,4,6,8,10] )
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayReverse_1
-
-## SHIFT()
-
-`SHIFT(anyArray) → newArray`
-
-Remove the first element of *anyArray*.
-
-To prepend an element (left side), see [UNSHIFT()](#unshift).
-To remove the last element, see [POP()](#pop).
-To remove an element at an arbitrary position, see [REMOVE_NTH()](#removenth).
-
-- **anyArray** (array): array with elements with arbitrary type
-- returns **newArray** (array): *anyArray* without the left-most element. If *anyArray*
- is already empty or has only one element left, an empty array is returned.
-
-**Examples**
-
-@startDocuBlockInline aqlArrayShift_1
-@EXAMPLE_AQL{aqlArrayShift_1}
-RETURN SHIFT( [ 1, 2, 3, 4 ] )
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayShift_1
-
-@startDocuBlockInline aqlArrayShift_2
-@EXAMPLE_AQL{aqlArrayShift_2}
-RETURN SHIFT( [ 1 ] )
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayShift_2
-
-## SLICE()
-
-`SLICE(anyArray, start, length) → newArray`
-
-Extract a slice of *anyArray*.
-
-- **anyArray** (array): array with elements of arbitrary type
-- **start** (number): start extraction at this element. Positions start at 0.
- Negative values indicate positions from the end of the array.
-- **length** (number, *optional*): extract up to *length* elements, or all
- elements from *start* up to *length* if negative (exclusive)
-- returns **newArray** (array): the specified slice of *anyArray*. If *length*
- is not specified, all array elements starting at *start* will be returned.
-
-**Examples**
-
-@startDocuBlockInline aqlArraySlice_1
-@EXAMPLE_AQL{aqlArraySlice_1}
-RETURN SLICE( [ 1, 2, 3, 4, 5 ], 0, 1 )
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArraySlice_1
-
-@startDocuBlockInline aqlArraySlice_2
-@EXAMPLE_AQL{aqlArraySlice_2}
-RETURN SLICE( [ 1, 2, 3, 4, 5 ], 1, 2 )
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArraySlice_2
-
-@startDocuBlockInline aqlArraySlice_3
-@EXAMPLE_AQL{aqlArraySlice_3}
-RETURN SLICE( [ 1, 2, 3, 4, 5 ], 3 )
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArraySlice_3
-
-@startDocuBlockInline aqlArraySlice_4
-@EXAMPLE_AQL{aqlArraySlice_4}
-RETURN SLICE( [ 1, 2, 3, 4, 5 ], 1, -1 )
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArraySlice_4
-
-@startDocuBlockInline aqlArraySlice_5
-@EXAMPLE_AQL{aqlArraySlice_5}
-RETURN SLICE( [ 1, 2, 3, 4, 5 ], 0, -2 )
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArraySlice_5
-
-@startDocuBlockInline aqlArraySlice_6
-@EXAMPLE_AQL{aqlArraySlice_6}
-RETURN SLICE( [ 1, 2, 3, 4, 5 ], -3, 2 )
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArraySlice_6
-
-## SORTED()
-
-`SORTED(anyArray) → newArray`
-
-Sort all elements in *anyArray*. The function will use the default comparison
-order for AQL value types.
-
-- **anyArray** (array): array with elements of arbitrary type
-- returns **newArray** (array): *anyArray*, with elements sorted
-
-**Example**
-
-@startDocuBlockInline aqlArraySorted_1
-@EXAMPLE_AQL{aqlArraySorted_1}
-RETURN SORTED( [ 8,4,2,10,6 ] )
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArraySorted_1
-
-## SORTED_UNIQUE()
-
-`SORTED_UNIQUE(anyArray) → newArray`
-
-Sort all elements in *anyArray*. The function will use the default comparison
-order for AQL value types. Additionally, the values in the result array will
-be made unique.
-
-- **anyArray** (array): array with elements of arbitrary type
-- returns **newArray** (array): *anyArray*, with elements sorted and duplicates
- removed
-
-**Example**
-
-@startDocuBlockInline aqlArraySortedUnique_1
-@EXAMPLE_AQL{aqlArraySortedUnique_1}
-RETURN SORTED_UNIQUE( [ 8,4,2,10,6,2,8,6,4 ] )
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArraySortedUnique_1
-
-## UNION()
-
-`UNION(array1, array2, ... arrayN) → newArray`
-
-Return the union of all arrays specified.
-
-Other set operations are [MINUS()](#minus),
-[INTERSECTION()](#intersection) and
-[OUTERSECTION()](#outersection).
-
-- **arrays** (array, *repeatable*): an arbitrary number of arrays as multiple
- arguments (at least 2)
-- returns **newArray** (array): all array elements combined in a single array,
- in any order
-
-**Examples**
-
-@startDocuBlockInline aqlArrayUnion_1
-@EXAMPLE_AQL{aqlArrayUnion_1}
-RETURN UNION(
- [ 1, 2, 3 ],
- [ 1, 2 ]
-)
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayUnion_1
-
-Note: No duplicates will be removed. In order to remove duplicates, please use
-either [UNION_DISTINCT()](#uniondistinct)
-or apply [UNIQUE()](#unique) on the
-result of *UNION()*:
-
-@startDocuBlockInline aqlArrayUnion_2
-@EXAMPLE_AQL{aqlArrayUnion_2}
-RETURN UNIQUE(
- UNION(
- [ 1, 2, 3 ],
- [ 1, 2 ]
- )
-)
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayUnion_2
-
-## UNION_DISTINCT()
-
-`UNION_DISTINCT(array1, array2, ... arrayN) → newArray`
-
-Return the union of distinct values of all arrays specified.
-
-- **arrays** (array, *repeatable*): an arbitrary number of arrays as multiple
- arguments (at least 2)
-- returns **newArray** (array): the elements of all given arrays in a single
- array, without duplicates, in any order
-
-**Example**
-
-@startDocuBlockInline aqlArrayUnionDistinct_1
-@EXAMPLE_AQL{aqlArrayUnionDistinct_1}
-RETURN UNION_DISTINCT(
- [ 1, 2, 3 ],
- [ 1, 2 ]
-)
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayUnionDistinct_1
-
-## UNIQUE()
-
-`UNIQUE(anyArray) → newArray`
-
-Return all unique elements in *anyArray*. To determine uniqueness, the
-function will use the comparison order.
-
-- **anyArray** (array): array with elements of arbitrary type
-- returns **newArray** (array): *anyArray* without duplicates, in any order
-
-**Example**
-
-@startDocuBlockInline aqlArrayUnique_1
-@EXAMPLE_AQL{aqlArrayUnique_1}
-RETURN UNIQUE( [ 1,2,2,3,3,3,4,4,4,4,5,5,5,5,5 ] )
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayUnique_1
-
-## UNSHIFT()
-
-`UNSHIFT(anyArray, value, unique) → newArray`
-
-Prepend *value* to *anyArray* (left side).
-
-To remove the first element, see [SHIFT()](#shift).
-To append a value (right side), see [PUSH()](#push).
-
-- **anyArray** (array): array with elements of arbitrary type
-- **value** (any): an element of arbitrary type
-- **unique** (bool): if set to *true*, then *value* is not added if already
- present in the array. The default is *false*.
-- returns **newArray** (array): *anyArray* with *value* added at the start
- (left side)
-
-Note: The *unique* flag only controls if *value* is added if it's already present
-in *anyArray*. Duplicate elements that already exist in *anyArray* will not be
-removed. To make an array unique, use the [UNIQUE()](#unique) function.
-
-**Examples**
-
-@startDocuBlockInline aqlArrayUnshift_1
-@EXAMPLE_AQL{aqlArrayUnshift_1}
-RETURN UNSHIFT( [ 1, 2, 3 ], 4 )
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayUnshift_1
-
-@startDocuBlockInline aqlArrayUnshift_2
-@EXAMPLE_AQL{aqlArrayUnshift_2}
-RETURN UNSHIFT( [ 1, 2, 3 ], 2, true )
-@END_EXAMPLE_AQL
-@endDocuBlock aqlArrayUnshift_2
diff --git a/Documentation/Books/AQL/Functions/Date.md b/Documentation/Books/AQL/Functions/Date.md
deleted file mode 100644
index a9be5f176ab8..000000000000
--- a/Documentation/Books/AQL/Functions/Date.md
+++ /dev/null
@@ -1,697 +0,0 @@
-Date functions
-==============
-
-AQL offers functionality to work with dates. Dates are no data types of their own in
-AQL (neither are they in JSON, which is usually used as format to ship data into and
-out of ArangoDB). Instead, dates in AQL are represented by either numbers or strings.
-
-All date function operations are done in the *unix time* system. Unix time counts
-all non leap seconds beginning with January 1st 1970 00:00:00.000 UTC, also know as
-the Unix epoch. A point in time is called timestamp. A timestamp has the same value
-at every point on earth. The date functions use millisecond precision for timestamps.
-
-time unit definitions
-
-* millisecond: 1/1000 of a second
-* second: one [SI second](https://www.bipm.org/en/publications/si-brochure/second.html)
-* Minute: one minute is defined as 60 seconds
-* Hour: one hour is defined as 60 minutes
-* day: one day is defined as 24 hours
-* week: one week is defined as 7 days
-* year: one year is defined as 365.2425 days
-* month: one month is defined as 1/12 of a year
-
-All functions that require dates as arguments accept the following input values:
-
-- numeric timestamps, millisecond precision;
- An example timestamp value is *1399472349522*, which translates to
- *2014-05-07T14:19:09.522Z*.
-
-- date time strings in formats *YYYY-MM-DDTHH:MM:SS.MMM*,
- *YYYY-MM-DD HH:MM:SS.MMM* or *YYYY-MM-DD*; Milliseconds are always optional.
- A time offset may optionally be added at the end of the string, with the
- hours and minutes that need to be added or subtracted to the date time value.
- For example, *2014-05-07T14:19:09+01:00* can be used to specify a one hour offset,
- and *2014-05-07T14:19:09+07:30* can be specified for seven and half hours offset.
- Negative offsets are also possible. Alternatively to an offset, a *Z* can be used
- to indicate UTC / Zulu time.
-
- An example value is *2014-05-07T14:19:09.522Z* meaning May 7th 2014, 14:19:09 and
- 522 milliseconds, UTC / Zulu time. Another example value without time component is
- *2014-05-07Z*.
-
-```js
-DATE_HOUR( 2 * 60 * 60 * 1000 ) // 2
-DATE_HOUR("1970-01-01T02:00:00") // 2
-```
-
-You are free to store age determinations of specimens, incomplete or fuzzy dates and
-the like in different, more appropriate ways of course. AQL's date functions will
-most certainly not be of any help for such dates, but you can still use language
-constructs like [SORT](../Operations/Sort.md) (which also supports sorting of arrays)
-and [indexes](../../Manual/Indexing/index.html) like skiplists.
-
-Current date and time
----------------------
-
-### DATE_NOW()
-
-`DATE_NOW() → timestamp`
-
-Get the current unix time as numeric timestamp.
-
-- returns **timestamp** (number): the current unix time as a timestamp.
- The return value has millisecond precision. To convert the return value to
- seconds, divide it by 1000.
-
-Note that this function is evaluated on every invocation and may return
-different values when invoked multiple times in the same query. Assign it
-to a variable to use the exact same timestamp multiple times.
-
-Conversion
-----------
-
-*DATE_TIMESTAMP()* and *DATE_ISO8601()* can be used to convert ISO 8601 date time
-strings to numeric timestamps and numeric timestamps to ISO 8601 date time strings.
-
-Both also support individual date components as separate function arguments,
-in the following order:
-
-- year
-- month
-- day
-- hour
-- minute
-- second
-- millisecond
-
-All components following *day* are optional and can be omitted. Note that no
-time offset can be specified when using separate date components, and UTC /
-Zulu time will be used.
-
-The following calls to *DATE_TIMESTAMP()* are equivalent and will all return
-*1399472349522*:
-
-```js
-DATE_TIMESTAMP("2014-05-07T14:19:09.522")
-DATE_TIMESTAMP("2014-05-07T14:19:09.522Z")
-DATE_TIMESTAMP("2014-05-07 14:19:09.522")
-DATE_TIMESTAMP("2014-05-07 14:19:09.522Z")
-DATE_TIMESTAMP(2014, 5, 7, 14, 19, 9, 522)
-DATE_TIMESTAMP(1399472349522)
-```
-
-The same is true for calls to *DATE_ISO8601()* that also accepts variable input
-formats:
-
-```js
-DATE_ISO8601("2014-05-07T14:19:09.522Z")
-DATE_ISO8601("2014-05-07 14:19:09.522Z")
-DATE_ISO8601(2014, 5, 7, 14, 19, 9, 522)
-DATE_ISO8601(1399472349522)
-```
-
-The above functions are all equivalent and will return *"2014-05-07T14:19:09.522Z"*.
-
-### DATE_ISO8601()
-
-`DATE_ISO8601(date) → dateString`
-
-Return an ISO 8601 date time string from *date*.
-The date time string will always use UTC / Zulu time, indicated by the *Z* at its end.
-
-- **date** (number|string): numeric timestamp or ISO 8601 date time string
-- returns **dateString**: date and time expressed according to ISO 8601, in Zulu time
-
-`DATE_ISO8601(year, month, day, hour, minute, second, millisecond) → dateString`
-
-Return a ISO 8601 date time string from *date*, but allows to specify the individual
-date components separately. All parameters after *day* are optional.
-
-- **year** (number): typically in the range 0..9999, e.g. *2017*
-- **month** (number): 1..12 for January through December
-- **day** (number): 1..31 (upper bound depends on number of days in month)
-- **hour** (number, *optional*): 0..23
-- **minute** (number, *optional*): 0..59
-- **second** (number, *optional*): 0..59
-- **milliseconds** (number, *optional*): 0..999
-- returns **dateString**: date and time expressed according to ISO 8601, in Zulu time
-
-### DATE_TIMESTAMP()
-
-`DATE_TIMESTAMP(date) → timestamp`
-
-Create a timestamp value from *date*. The return value has millisecond precision.
-To convert the return value to seconds, divide it by 1000.
-
-- **date** (number|string): numeric timestamp or ISO 8601 date time string
-- returns **timestamp** (number): numeric timestamp
-
-`DATE_TIMESTAMP(year, month, day, hour, minute, second, millisecond) → timestamp`
-
-Create a timestamp value, but allows to specify the individual date components
-separately. All parameters after *day* are optional.
-
-- **year** (number): typically in the range 0..9999, e.g. *2017*
-- **month** (number): 1..12 for January through December
-- **day** (number): 1..31 (upper bound depends on number of days in month)
-- **hour** (number, *optional*): 0..23
-- **minute** (number, *optional*): 0..59
-- **second** (number, *optional*): 0..59
-- **milliseconds** (number, *optional*): 0..999
-- returns **timestamp** (number): numeric timestamp
-
-Negative values are not allowed, result in *null* and cause a warning.
-Values greater than the upper range bound overflow to the larger components
-(e.g. an hour of 26 is automatically turned into an additional day and two hours):
-
-```js
-DATE_TIMESTAMP(2016, 12, -1) // returns null and issues a warning
-DATE_TIMESTAMP(2016, 2, 32) // returns 1456963200000, which is March 3rd, 2016
-DATE_TIMESTAMP(1970, 1, 1, 26) // returns 93600000, which is January 2nd, 1970, at 2 a.m.
-```
-
-### IS_DATESTRING()
-
-`IS_DATESTRING(value) → bool`
-
-Check if an arbitrary string is suitable for interpretation as date time string.
-
-- **value** (string): an arbitrary string
-- returns **bool** (bool): *true* if *value* is a string that can be used
- in a date function. This includes partial dates such as *2015* or *2015-10* and
- strings containing invalid dates such as *2015-02-31*. The function will return
- *false* for all non-string values, even if some of them may be usable in date
- functions.
-
-Processing
-----------
-
-### DATE_DAYOFWEEK()
-
-`DATE_DAYOFWEEK(date) → weekdayNumber`
-
-Return the weekday number of *date*.
-
-- **date** (number|string): numeric timestamp or ISO 8601 date time string
-- returns **weekdayNumber** (number): 0..6 as follows:
- - 0 – Sunday
- - 1 – Monday
- - 2 – Tuesday
- - 3 – Wednesday
- - 4 – Thursday
- - 5 – Friday
- - 6 – Saturday
-
-### DATE_YEAR()
-
-`DATE_YEAR(date) → year`
-
-Return the year of *date*.
-
-- **date** (number|string): numeric timestamp or ISO 8601 date time string
-- returns **year** (number): the year part of *date* as a number
-
-### DATE_MONTH()
-
-`DATE_MONTH(date) → month`
-
-Return the month of *date*.
-
-- **date** (number|string): numeric timestamp or ISO 8601 date time string
-- returns **month** (number): the month part of *date* as a number
-
-### DATE_DAY()
-
-`DATE_DAY(date) → day`
-
-Return the day of *date*.
-
-- **date** (number|string): numeric timestamp or ISO 8601 date time string
-- returns **day** (number): the day part of *date* as a number
-
-### DATE_HOUR()
-
-Return the hour of *date*.
-
-`DATE_HOUR(date) → hour`
-
-- **date** (number|string): numeric timestamp or ISO 8601 date time string
-- returns **hour** (number): the hour part of *date* as a number
-
-### DATE_MINUTE()
-
-`DATE_MINUTE(date) → minute`
-
-Return the minute of *date*.
-
-- **date** (number|string): numeric timestamp or ISO 8601 date time string
-- returns **minute** (number): the minute part of *date* as a number
-
-### DATE_SECOND()
-
-`DATE_SECOND(date) → second`
-
-Return the second of *date*.
-
-- **date** (number|string): numeric timestamp or ISO 8601 date time string
-- returns **second** (number): the seconds part of *date* as a number
-
-### DATE_MILLISECOND()
-
-`DATE_MILLISECOND(date) → millisecond`
-
-- **date** (number|string): numeric timestamp or ISO 8601 date time string
-- returns **millisecond** (number): the milliseconds part of *date* as a number
-
-### DATE_DAYOFYEAR()
-
-`DATE_DAYOFYEAR(date) → dayOfYear`
-
-Return the day of year of *date*.
-
-- **date** (number|string): numeric timestamp or ISO 8601 date time string
-- returns **dayOfYear** (number): the day of year number of *date*.
- The return values range from 1 to 365, or 366 in a leap year respectively.
-
-### DATE_ISOWEEK()
-
-`DATE_ISOWEEK(date) → weekDate`
-
-Return the week date of *date* according to ISO 8601.
-
-- **date** (number|string): numeric timestamp or ISO 8601 date time string
-- returns **weekDate** (number): the ISO week date of *date*. The return values
- range from 1 to 53. Monday is considered the first day of the week. There are no
- fractional weeks, thus the last days in December may belong to the first week of
- the next year, and the first days in January may be part of the previous year's
- last week.
-
-### DATE_LEAPYEAR()
-
-`DATE_LEAPYEAR(date) → leapYear`
-
-Return whether *date* is in a leap year.
-
-- **date** (number|string): numeric timestamp or ISO 8601 date time string
-- returns **leapYear** (bool): *true* if *date* is in a leap year, *false* otherwise
-
-### DATE_QUARTER()
-
-`DATE_QUARTER(date) → quarter`
-
-Return which quarter *date* belongs to.
-
-- **date** (number|string): numeric timestamp or ISO 8601 date time string
-- returns **quarter** (number): the quarter of the given date (1-based):
- - 1 – January, February, March
- - 2 – April, May, June
- - 3 – July, August, September
- - 4 – October, November, December
-
-### DATE_DAYS_IN_MONTH()
-
-Return the number of days in the month of *date*.
-
-`DATE_DAYS_IN_MONTH(date) → daysInMonth`
-
-- **date** (number|string): numeric timestamp or ISO 8601 date time string
-- returns **daysInMonth** (number): the number of days in *date*'s month (28..31)
-
-### DATE_TRUNC()
-
-`DATE_TRUNC(date, unit) → isoDate`
-
-Truncates the given date after *unit* and returns the modified date.
-
-- **date** (number|string): numeric timestamp or ISO 8601 date time string
-- **unit** (string): either of the following to specify the time unit (case-insensitive):
- - y, year, years
- - m, month, months
- - d, day, days
- - h, hour, hours
- - i, minute, minutes
- - s, second, seconds
- - f, millisecond, milliseconds
-- returns **isoDate** (string): the truncated ISO 8601 date time string
-
-```js
-DATE_TRUNC('2017-02-03', 'month') // 2017-02-01T00:00:00.000Z
-DATE_TRUNC('2017-02-03 04:05:06', 'hours') // 2017-02-03 04:00:00.000Z
-```
-
-### DATE_FORMAT()
-
-`DATE_FORMAT(date, format) → str`
-
-Format a date according to the given format string.
-
-- **date** (string|number): a date string or timestamp
-- **format** (string): a format string, see below
-- returns **str** (string): a formatted date string
-
-*format* supports the following placeholders (case-insensitive):
-
-- %t – timestamp, in milliseconds since midnight 1970-01-01
-- %z – ISO date (0000-00-00T00:00:00.000Z)
-- %w – day of week (0..6)
-- %y – year (0..9999)
-- %yy – year (00..99), abbreviated (last two digits)
-- %yyyy – year (0000..9999), padded to length of 4
-- %yyyyyy – year (-009999 .. +009999), with sign prefix and padded to length of 6
-- %m – month (1..12)
-- %mm – month (01..12), padded to length of 2
-- %d – day (1..31)
-- %dd – day (01..31), padded to length of 2
-- %h – hour (0..23)
-- %hh – hour (00..23), padded to length of 2
-- %i – minute (0..59)
-- %ii – minute (00..59), padded to length of 2
-- %s – second (0..59)
-- %ss – second (00..59), padded to length of 2
-- %f – millisecond (0..999)
-- %fff – millisecond (000..999), padded to length of 3
-- %x – day of year (1..366)
-- %xxx – day of year (001..366), padded to length of 3
-- %k – ISO week date (1..53)
-- %kk – ISO week date (01..53), padded to length of 2
-- %l – leap year (0 or 1)
-- %q – quarter (1..4)
-- %a – days in month (28..31)
-- %mmm – abbreviated English name of month (Jan..Dec)
-- %mmmm – English name of month (January..December)
-- %www – abbreviated English name of weekday (Sun..Sat)
-- %wwww – English name of weekday (Sunday..Saturday)
-- %& – special escape sequence for rare occasions
-- %% – literal %
-- % – ignored
-
-`%yyyy` does not enforce a length of 4 for years before 0 and past 9999.
-The same format as for `%yyyyyy` will be used instead. `%yy` preserves the
-sign for negative years and may thus return 3 characters in total.
-
-Single `%` characters will be ignored. Use `%%` for a literal `%`. To resolve
-ambiguities like in `%mmonth` (unpadded month number + the string "month")
-between `%mm` + "onth" and `%m` + "month", use the escape sequence `%&`:
-`%m%&month`.
-
-Note that *DATE_FORMAT()* is a rather costly operation and may not be suitable for large
-datasets (like over 1 million dates). If possible, avoid formatting dates on
-server-side and leave it up to the client to do so. This function should only
-be used for special date comparisons or to store the formatted dates in the
-database. For better performance, use the primitive `DATE_*()` functions
-together with `CONCAT()` if possible.
-
-Examples:
-
-```js
-DATE_FORMAT(DATE_NOW(), "%q/%yyyy") // quarter and year (e.g. "3/2015")
-DATE_FORMAT(DATE_NOW(), "%dd.%mm.%yyyy %hh:%ii:%ss,%fff") // e.g. "18.09.2015 15:30:49,374"
-DATE_FORMAT("1969", "Summer of '%yy") // "Summer of '69"
-DATE_FORMAT("2016", "%%l = %l") // "%l = 1" (2016 is a leap year)
-DATE_FORMAT("2016-03-01", "%xxx%") // "063", trailing % ignored
-```
-
-Comparison and calculation
---------------------------
-
-### DATE_ADD()
-
-`DATE_ADD(date, amount, unit) → isoDate`
-
-Add *amount* given in *unit* to *date* and return the calculated date.
-
-- **date** (number|string): numeric timestamp or ISO 8601 date time string
-- **amount** (number|string): number of *unit*s to add (positive value) or
- subtract (negative value). It is recommended to use positive values only,
- and use [DATE_SUBTRACT()](#datesubtract) for subtractions instead.
-- **unit** (string): either of the following to specify the time unit to add or
- subtract (case-insensitive):
- - y, year, years
- - m, month, months
- - w, week, weeks
- - d, day, days
- - h, hour, hours
- - i, minute, minutes
- - s, second, seconds
- - f, millisecond, milliseconds
-- returns **isoDate** (string): the calculated ISO 8601 date time string
-
-```js
-DATE_ADD(DATE_NOW(), -1, "day") // yesterday; also see DATE_SUBTRACT()
-DATE_ADD(DATE_NOW(), 3, "months") // in three months
-DATE_ADD(DATE_ADD("2015-04-01", 5, "years"), 1, "month") // May 1st 2020
-DATE_ADD("2015-04-01", 12*5 + 1, "months") // also May 1st 2020
-DATE_ADD(DATE_TIMESTAMP(DATE_YEAR(DATE_NOW()), 12, 24), -4, "years") // Christmas four years ago
-DATE_ADD(DATE_ADD("2016-02", "month", 1), -1, "day") // last day of February (29th, because 2016 is a leap year!)
-```
-
-`DATE_ADD(date, isoDuration) → isoDate`
-
-You may also pass an ISO duration string as *amount* and leave out *unit*.
-
-- **date** (number|string): numeric timestamp or ISO 8601 date time string
-- **isoDuration** (string): an ISO 8601 duration string to add to *date*, see below
-- returns **isoDate** (string): the calculated ISO 8601 date time string
-
-The format is `P_Y_M_W_DT_H_M_._S`, where underscores stand for digits and
-letters for time intervals - except for the separators `P` (period) and `T` (time).
-The meaning of the other letters are:
-- Y – years
-- M – months (if before T)
-- W – weeks
-- D – days
-- H – hours
-- M – minutes (if after T)
-- S – seconds (optionally with 3 decimal places for milliseconds)
-
-The string must be prefixed by a `P`. A separating `T` is only required if
-`H`, `M` and/or `S` are specified. You only need to specify the needed pairs
-of letters and numbers.
-
-```js
-DATE_ADD(DATE_NOW(), "P1Y") // add 1 year
-DATE_ADD(DATE_NOW(), "P3M2W") // add 3 months and 2 weeks
-DATE_ADD(DATE_NOW(), "P5DT26H") // add 5 days and 26 hours (=6 days and 2 hours)
-DATE_ADD("2000-01-01", "PT4H") // add 4 hours
-DATE_ADD("2000-01-01", "PT30M44.4S" // add 30 minutes, 44 seconds and 400 ms
-DATE_ADD("2000-01-01", "P1Y2M3W4DT5H6M7.89S" // add a bit of everything
-```
-
-### DATE_SUBTRACT()
-
-`DATE_SUBTRACT(date, amount, unit) → isoDate`
-
-Subtract *amount* given in *unit* from *date* and return the calculated date.
-
-It works the same as [DATE_ADD()](#dateadd), except that it subtracts. It is
-equivalent to calling *DATE_ADD()* with a negative amount, except that
-*DATE_SUBTRACT()* can also subtract ISO durations. Note that negative ISO
-durations are not supported (i.e. starting with `-P`, like `-P1Y`).
-
-- **date** (number|string): numeric timestamp or ISO 8601 date time string
-- **amount** (number|string): number of *unit*s to subtract (positive value) or
- add (negative value). It is recommended to use positive values only,
- and use [DATE_ADD()](#dateadd) for additions instead.
-- **unit** (string): either of the following to specify the time unit to add or
- subtract (case-insensitive):
- - y, year, years
- - m, month, months
- - w, week, weeks
- - d, day, days
- - h, hour, hours
- - i, minute, minutes
- - s, second, seconds
- - f, millisecond, milliseconds
-- returns **isoDate** (string): the calculated ISO 8601 date time string
-
-`DATE_SUBTRACT(date, isoDuration) → isoDate`
-
-You may also pass an ISO duration string as *amount* and leave out *unit*.
-
-- **date** (number|string): numeric timestamp or ISO 8601 date time string
-- **isoDuration** (string): an ISO 8601 duration string to subtract from *date*,
- see below
-- returns **isoDate** (string): the calculated ISO 8601 date time string
-
-The format is `P_Y_M_W_DT_H_M_._S`, where underscores stand for digits and
-letters for time intervals - except for the separators `P` (period) and `T` (time).
-The meaning of the other letters are:
-- Y – years
-- M – months (if before T)
-- W – weeks
-- D – days
-- H – hours
-- M – minutes (if after T)
-- S – seconds (optionally with 3 decimal places for milliseconds)
-
-The string must be prefixed by a `P`. A separating `T` is only required if
-`H`, `M` and/or `S` are specified. You only need to specify the needed pairs
-of letters and numbers.
-
-```js
-DATE_SUBTRACT(DATE_NOW(), 1, "day") // yesterday
-DATE_SUBTRACT(DATE_TIMESTAMP(DATE_YEAR(DATE_NOW()), 12, 24), 4, "years") // Christmas four years ago
-DATE_SUBTRACT(DATE_ADD("2016-02", "month", 1), 1, "day") // last day of February (29th, because 2016 is a leap year!)
-DATE_SUBTRACT(DATE_NOW(), "P4D") // four days ago
-DATE_SUBTRACT(DATE_NOW(), "PT1H3M") // 1 hour and 30 minutes ago
-```
-
-### DATE_DIFF()
-
-`DATE_DIFF(date1, date2, unit, asFloat) → diff`
-
-Calculate the difference between two dates in given time *unit*, optionally
-with decimal places.
-
-- **date1** (number|string): numeric timestamp or ISO 8601 date time string
-- **date2** (number|string): numeric timestamp or ISO 8601 date time string
-- **unit** (string): either of the following to specify the time unit to return the
- difference in (case-insensitive):
- - y, year, years
- - m, month, months
- - w, week, weeks
- - d, day, days
- - h, hour, hours
- - i, minute, minutes
- - s, second, seconds
- - f, millisecond, milliseconds
-- **asFloat** (boolean, *optional*): if set to *true*, decimal places will be
- preserved in the result. The default is *false* and an integer is returned.
-- returns **diff** (number): the calculated difference as number in *unit*.
- The value will be negative if *date2* is before *date1*.
-
-### DATE_COMPARE()
-
-`DATE_COMPARE(date1, date2, unitRangeStart, unitRangeEnd) → bool`
-
-Check if two partial dates match.
-
-- **date1** (number|string): numeric timestamp or ISO 8601 date time string
-- **date2** (number|string): numeric timestamp or ISO 8601 date time string
-- **unitRangeStart** (string): unit to start from, see below
-- **unitRangeEnd** (string, *optional*): unit to end with, leave out to only
- compare the component as specified by *unitRangeStart*. An error is raised if
- *unitRangeEnd* is a unit before *unitRangeStart*.
-- returns **bool** (bool): *true* if the dates match, *false* otherwise
-
-The parts to compare are defined by a range of time units. The full range is:
-years, months, days, hours, minutes, seconds, milliseconds (in this order).
-
-All components of *date1* and *date2* as specified by the range will be compared.
-You can refer to the units as:
-
-- y, year, years
-- m, month, months
-- d, day, days
-- h, hour, hours
-- i, minute, minutes
-- s, second, seconds
-- f, millisecond, milliseconds
-
-```js
-// Compare months and days, true on birthdays if you're born on 4th of April
-DATE_COMPARE("1985-04-04", DATE_NOW(), "months", "days")
-
-// Will only match on one day if the current year is a leap year!
-// You may want to add or subtract one day from date1 to match every year.
-DATE_COMPARE("1984-02-29", DATE_NOW(), "months", "days")
-
-// compare years, months and days (true, because it's the same day)
-DATE_COMPARE("2001-01-01T15:30:45.678Z", "2001-01-01T08:08:08.008Z", "years", "days")
-```
-
-You can directly compare ISO date **strings** if you want to find dates before or
-after a certain date, or in between two dates (`>=`, `>`, `<`, `<=`).
-No special date function is required. Equality tests (`==` and `!=`) will only
-match the exact same date and time however. You may use `SUBSTRING()` to
-compare partial date strings, `DATE_COMPARE()` is basically a convenience
-function for that. However, neither is really required to limit a search to a
-certain day as demonstrated here:
-
-```js
-FOR doc IN coll
- FILTER doc.date >= "2015-05-15" AND doc.date < "2015-05-16"
- RETURN doc
-```
-
-Every ISO date on that day is greater than or equal to `2015-05-15` in a string
-comparison (e.g. `2015-05-15T11:30:00.000Z`). Dates before `2015-05-15` are smaller
-and therefore filtered out by the first condition. Every date past `2015-05-15` is
-greater than this date in a string comparison, and therefore filtered out by the
-second condition. The result is that the time components in the dates you compare
-with are "ignored". The query will return every document with *date* ranging from
-`2015-05-15T00:00:00.000Z` to `2015-05-15T23:99:99.999Z`. It would also include
-`2015-05-15T24:00:00.000Z`, but that date is actually `2015-05-16T00:00:00.000Z`
-and can only occur if inserted manually (you may want to pass dates through
-[DATE_ISO8601()](#dateiso8601) to ensure a correct date representation).
-
-Leap days in leap years (29th of February) must be always handled manually,
-if you require so (e.g. birthday checks):
-
-```js
-LET today = DATE_NOW()
-LET noLeapYear = NOT DATE_LEAPYEAR(today)
-
-FOR user IN users
- LET birthday = noLeapYear AND
- DATE_MONTH(user.birthday) == 2 AND
- DATE_DAY(user.birthday) == 29
- ? DATE_SUBTRACT(user.birthday, 1, "day") /* treat like 28th in non-leap years */
- : user.birthday
- FILTER DATE_COMPARE(today, birthday, "month", "day")
- /* includes leaplings on the 28th of February in non-leap years,
- * but excludes them in leap years which do have a 29th February.
- * Replace DATE_SUBTRACT() by DATE_ADD() to include them on the 1st of March
- * in non-leap years instead (depends on local jurisdiction).
- */
- RETURN user
-```
-
-Working with dates and indices
-------------------------------
-
-There are two recommended ways to store timestamps in ArangoDB:
- - string: UTC timestamp with [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601)
- - number: [unix timestamp](https://en.wikipedia.org/wiki/Unix_time) with millisecond precision
-
-The sort order of both is identical due to the sort properties of ISO date strings.
-You can't mix both types, numbers and strings, in a single attribute however.
-
-You can use [skiplist indices](../../Manual/Indexing/Skiplist.html) with both date types.
-When chosing string representations, you can work with string comparisons (less than,
-greater than etc.) to express time ranges in your queries while still utilizing
-skiplist indices:
-
- @startDocuBlockInline working_with_date_time
- @EXAMPLE_ARANGOSH_OUTPUT{working_with_date_time}
- db._create("exampleTime");
- var timestamps = ["2014-05-07T14:19:09.522","2014-05-07T21:19:09.522","2014-05-08T04:19:09.522","2014-05-08T11:19:09.522","2014-05-08T18:19:09.522"];
- for (i = 0; i < 5; i++) db.exampleTime.save({value:i, ts: timestamps[i]})
- db._query("FOR d IN exampleTime FILTER d.ts > '2014-05-07T14:19:09.522' and d.ts < '2014-05-08T18:19:09.522' RETURN d").toArray()
- ~addIgnoreCollection("example")
- ~db._drop("exampleTime")
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock working_with_date_time
-
-The first and the last timestamp in the array are excluded from the result by the `FILTER`.
-
-Limitations
------------
-
-Note that dates before the year 1583 aren't allowed by the
-[ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) standard by default, because
-they lie before the official introduction of the Gregorian calendar and may thus
-be incorrect or invalid. All AQL date functions apply the same rules to every
-date according to the Gregorian calendar system, even if inappropriate. That
-does not constitute a problem, unless you deal with dates prior to 1583 and
-especially years before Christ. The standard allows negative years, but requires
-special treatment of positive years too, if negative years are used (e.g.
-`+002015-05-15` and `-000753-01-01`). This is rarely used however, and AQL does
-not use the 7-character version for years between 0 and 9999 in ISO strings.
-Keep in mind that they can't be properly compared to dates outside that range.
-Sorting of negative dates does not result in a meaningful order, with years longer
-ago last, but months, days and the time components in otherwise correct order.
-
-Leap seconds are ignored, just as they are in JavaScript as per
-[ECMAScript Language Specifications](http://www.ecma-international.org/ecma-262/5.1/#sec-15.9.1.1).
diff --git a/Documentation/Books/AQL/Functions/Document.md b/Documentation/Books/AQL/Functions/Document.md
deleted file mode 100644
index 6906a45cfabc..000000000000
--- a/Documentation/Books/AQL/Functions/Document.md
+++ /dev/null
@@ -1,465 +0,0 @@
-Document functions
-==================
-
-AQL provides below listed functions to operate on objects / document values.
-Also see [object access](../Fundamentals/DataTypes.md#objects--documents) for
-additional language constructs.
-
-ATTRIBUTES()
-------------
-
-`ATTRIBUTES(document, removeInternal, sort) → strArray`
-
-Return the top-level attribute keys of the *document* as an array.
-Optionally omit system attributes and sort the array.
-
-- **document** (object): an arbitrary document / object
-- **removeInternal** (bool, *optional*): whether all system attributes (*_key*, *_id* etc.,
- every attribute key that starts with an underscore) shall be omitted in the result.
- The default is *false*.
-- **sort** (bool, *optional*): optionally sort the resulting array alphabetically.
- The default is *false* and will return the attribute names in any order.
-- returns **strArray** (array): the attribute keys of the input *document* as an
- array of strings
-
-```js
-ATTRIBUTES( { "foo": "bar", "_key": "123", "_custom": "yes" } )
-// [ "foo", "_key", "_custom" ]
-
-ATTRIBUTES( { "foo": "bar", "_key": "123", "_custom": "yes" }, true )
-// [ "foo" ]
-
-ATTRIBUTES( { "foo": "bar", "_key": "123", "_custom": "yes" }, false, true )
-// [ "_custom", "_key", "foo" ]
-```
-
-Complex example to count how often every attribute key occurs in the documents
-of *collection* (expensive on large collections):
-
-```js
-LET attributesPerDocument = (
- FOR doc IN collection RETURN ATTRIBUTES(doc, true)
-)
-FOR attributeArray IN attributesPerDocument
- FOR attribute IN attributeArray
- COLLECT attr = attribute WITH COUNT INTO count
- SORT count DESC
- RETURN {attr, count}
-```
-
-COUNT()
--------
-
-This is an alias for [LENGTH()](#length).
-
-HAS()
------
-
-`HAS(document, attributeName) → isPresent`
-
-Test whether an attribute is present in the provided document.
-
-- **document** (object): an arbitrary document / object
-- **attributeName** (string): the attribute key to test for
-- returns **isPresent** (bool): *true* if *document* has an attribute named
- *attributeName*, and *false* otherwise. An attribute with a falsy value (*0*, *false*,
- empty string *""*) or *null* is also considered as present and returns *true*.
-
-```js
-HAS( { name: "Jane" }, "name" ) // true
-HAS( { name: "Jane" }, "age" ) // false
-HAS( { name: null }, "name" ) // true
-```
-
-Note that the function checks if the specified attribute exists. This is different
-from similar ways to test for the existance of an attribute, in case the attribute
-has a falsy value or is not present (implicitly *null* on object access):
-
-```js
-!!{ name: "" }.name // false
-HAS( { name: "" }, "name") // true
-
-{ name: null }.name == null // true
-{ }.name == null // true
-HAS( { name: null }, "name" ) // true
-HAS( { }, "name" ) // false
-```
-
-Note that `HAS()` can not utilize indexes. If it's not necessary to distinguish
-between explicit and implicit *null* values in your query, you may use an equality
-comparison to test for *null* and create a non-sparse index on the attribute you
-want to test against:
-
-```js
-FILTER !HAS(doc, "name") // can not use indexes
-FILTER IS_NULL(doc, "name") // can not use indexes
-FILTER doc.name == null // can utilize non-sparse indexes
-```
-
-IS_SAME_COLLECTION()
---------------------
-
-`IS_SAME_COLLECTION(collectionName, documentHandle) → bool`
-
- collection id as the collection specified in *collection*. *document* can either be
- a [document handle](../../Manual/Appendix/Glossary.html#document-handle) string, or a document with
- an *_id* attribute. The function does not validate whether the collection actually
- contains the specified document, but only compares the name of the specified collection
- with the collection name part of the specified document.
- If *document* is neither an object with an *id* attribute nor a *string* value,
- the function will return *null* and raise a warning.
-
-- **collectionName** (string): the name of a collection as string
-- **documentHandle** (string|object): a document identifier string (e.g. *_users/1234*)
- or a regular document from a collection. Passing either a non-string or a non-document
- or a document without an *_id* attribute will result in an error.
-- returns **bool** (bool): return *true* if the collection of *documentHandle* is the same
- as *collectionName*, otherwise *false*
-
-```js
-// true
-IS_SAME_COLLECTION( "_users", "_users/my-user" )
-IS_SAME_COLLECTION( "_users", { _id: "_users/my-user" } )
-
-// false
-IS_SAME_COLLECTION( "_users", "foobar/baz")
-IS_SAME_COLLECTION( "_users", { _id: "something/else" } )
-```
-
-KEEP()
-------
-
-`KEEP(document, attributeName1, attributeName2, ... attributeNameN) → doc`
-
-Keep only the attributes *attributeName* to *attributeNameN* of *document*.
-All other attributes will be removed from the result.
-
-To do the opposite, see [UNSET()](#unset).
-
-- **document** (object): a document / object
-- **attributeNames** (string, *repeatable*): an arbitrary number of attribute
- names as multiple arguments
-- returns **doc** (object): a document with only the specified attributes on
- the top-level
-
-```js
-KEEP(doc, "firstname", "name", "likes")
-```
-
-`KEEP(document, attributeNameArray) → doc`
-
-- **document** (object): a document / object
-- **attributeNameArray** (array): an array of attribute names as strings
-- returns **doc** (object): a document with only the specified attributes on
- the top-level
-
-```js
-KEEP(doc, [ "firstname", "name", "likes" ])
-```
-
-LENGTH()
---------
-
-`LENGTH(doc) → attrCount`
-
-Determine the number of attribute keys of an object / document.
-
-- **doc** (object): a document / object
-- returns **attrCount** (number): the number of attribute keys in *doc*, regardless
- of their values
-
-*LENGTH()* can also determine the [number of elements](Array.md#length) in an array,
-the [amount of documents](Miscellaneous.md#length) in a collection and
-the [character length](String.md#length) of a string.
-
-MATCHES()
----------
-
-`MATCHES(document, examples, returnIndex) → match`
-
-Compare the given *document* against each example document provided. The comparisons
-will be started with the first example. All attributes of the example will be compared
-against the attributes of *document*. If all attributes match, the comparison stops
-and the result is returned. If there is a mismatch, the function will continue the
-comparison with the next example until there are no more examples left.
-
-The *examples* can be an array of 1..n example documents or a single document,
-with any number of attributes each.
-
-Note that *MATCHES()* can not utilize indexes.
-
-- **document** (object): document to determine whether it matches any example
-- **examples** (object|array): a single document, or an array of documents to compare
- against. Specifying an empty array is not allowed.
-- **returnIndex** (bool): by setting this flag to *true*, the index of the example that
- matched will be returned (starting at offset 0), or *-1* if there was no match.
- The default is *false* and makes the function return a boolean.
-- returns **match** (bool|number): if *document* matches one of the examples, *true* is
- returned, otherwise *false*. A number is returned instead if *returnIndex* is used.
-
-```js
-LET doc = {
- name: "jane",
- age: 27,
- active: true
-}
-RETURN MATCHES(doc, { age: 27, active: true } )
-```
-
-This will return *true*, because all attributes of the example are present in the document.
-
-```js
-RETURN MATCHES(
- { "test": 1 },
- [
- { "test": 1, "foo": "bar" },
- { "foo": 1 },
- { "test": 1 }
- ], true)
-```
-
-This will return *2*, because the third example matches, and because the
-*returnIndex* flag is set to *true*.
-
-MERGE()
--------
-
-`MERGE(document1, document2, ... documentN) → mergedDocument`
-
-Merge the documents *document1* to *documentN* into a single document.
-If document attribute keys are ambiguous, the merged result will contain the values
-of the documents contained later in the argument list.
-
-
-- **documents** (object, *repeatable*): an arbitrary number of documents as
- multiple arguments (at least 2)
-- returns **mergedDocument** (object): a combined document
-
-Note that merging will only be done for top-level attributes. If you wish to
-merge sub-attributes, use [MERGE_RECURSIVE()](#mergerecursive) instead.
-
-Two documents with distinct attribute names can easily be merged into one:
-
-```js
-MERGE(
- { "user1": { "name": "Jane" } },
- { "user2": { "name": "Tom" } }
-)
-// { "user1": { "name": "Jane" }, "user2": { "name": "Tom" } }
-```
-
-When merging documents with identical attribute names, the attribute values of the
-latter documents will be used in the end result:
-
-```js
-MERGE(
- { "users": { "name": "Jane" } },
- { "users": { "name": "Tom" } }
-)
-// { "users": { "name": "Tom" } }
-```
-
-`MERGE(docArray) → mergedDocument`
-
-*MERGE* works with a single array parameter, too. This variant allows combining the
-attributes of multiple objects in an array into a single object.
-
-- **docArray** (array): an array of documents, as sole argument
-- returns **mergedDocument** (object): a combined document
-
-```js
-MERGE(
- [
- { foo: "bar" },
- { quux: "quetzalcoatl", ruled: true },
- { bar: "baz", foo: "done" }
- ]
-)
-```
-
-This will now return:
-
-```js
-{
- "foo": "done",
- "quux": "quetzalcoatl",
- "ruled": true,
- "bar": "baz"
-}
-```
-
-MERGE_RECURSIVE()
------------------
-
-`MERGE_RECURSIVE(document1, document2, ... documentN) → mergedDocument`
-
-Recursively merge the documents *document1* to *documentN* into a single document.
-If document attribute keys are ambiguous, the merged result will contain the values
-of the documents contained later in the argument list.
-
-- **documents** (object, *repeatable*): an arbitrary number of documents as
- multiple arguments (at least 2)
-- returns **mergedDocument** (object): a combined document
-
-For example, two documents with distinct attribute names can easily be merged into one:
-
-```js
-MERGE_RECURSIVE(
- { "user-1": { "name": "Jane", "livesIn": { "city": "LA" } } },
- { "user-1": { "age": 42, "livesIn": { "state": "CA" } } }
-)
-// { "user-1": { "name": "Jane", "livesIn": { "city": "LA", "state": "CA" }, "age": 42 } }
-```
-
-*MERGE_RECURSIVE()* does not support the single array parameter variant that *MERGE* offers.
-
-PARSE_IDENTIFIER()
-------------------
-
-`PARSE_IDENTIFIER(documentHandle) → parts`
-
-Parse a [document handle](../../Manual/Appendix/Glossary.html#document-handle) and return its
-individual parts as separate attributes.
-
-This function can be used to easily determine the
-[collection name](../../Manual/Appendix/Glossary.html#collection-name) and key of a given document.
-
-- **documentHandle** (string|object): a document identifier string (e.g. *_users/1234*)
- or a regular document from a collection. Passing either a non-string or a non-document
- or a document without an *_id* attribute will result in an error.
-- returns **parts** (object): an object with the attributes *collection* and *key*
-
-```js
-PARSE_IDENTIFIER("_users/my-user")
-// { "collection": "_users", "key": "my-user" }
-
-PARSE_IDENTIFIER( { "_id": "mycollection/mykey", "value": "some value" } )
-// { "collection": "mycollection", "key": "mykey" }
-```
-
-TRANSLATE()
------------
-
-`TRANSLATE(value, lookupDocument, defaultValue) → mappedValue`
-
-Look up the specified *value* in the *lookupDocument*. If *value* is a key in
-*lookupDocument*, then *value* will be replaced with the lookup value found.
-If *value* is not present in *lookupDocument*, then *defaultValue* will be returned
-if specified. If no *defaultValue* is specified, *value* will be returned unchanged.
-
-- **value** (string): the value to encode according to the mapping
-- **lookupDocument** (object): a key/value mapping as document
-- **defaultValue** (any, *optional*): a fallback value in case *value* is not found
-- returns **mappedValue** (any): the encoded value, or the unaltered *value* or *defaultValue*
- (if supplied) in case it couldn't be mapped
-
-```js
-TRANSLATE("FR", { US: "United States", UK: "United Kingdom", FR: "France" } )
-// "France"
-
-TRANSLATE(42, { foo: "bar", bar: "baz" } )
-// 42
-
-TRANSLATE(42, { foo: "bar", bar: "baz" }, "not found!")
-// "not found!"
-```
-
-UNSET()
--------
-
-`UNSET(document, attributeName1, attributeName2, ... attributeNameN) → doc`
-
-Remove the attributes *attributeName1* to *attributeNameN* from *document*.
-All other attributes will be preserved.
-
-To do the opposite, see [KEEP()](#keep).
-
-- **document** (object): a document / object
-- **attributeNames** (string, *repeatable*): an arbitrary number of attribute
- names as multiple arguments (at least 1)
-- returns **doc** (object): *document* without the specified attributes on the
- top-level
-
-```js
-UNSET( doc, "_id", "_key", "foo", "bar" )
-```
-
-`UNSET(document, attributeNameArray) → doc`
-
-- **document** (object): a document / object
-- **attributeNameArray** (array): an array of attribute names as strings
-- returns **doc** (object): *document* without the specified attributes on the
- top-level
-
-```js
-UNSET( doc, [ "_id", "_key", "foo", "bar" ] )
-```
-
-UNSET_RECURSIVE()
------------------
-
-`UNSET_RECURSIVE(document, attributeName1, attributeName2, ... attributeNameN) → doc`
-
-Recursively remove the attributes *attributeName1* to *attributeNameN* from
-*document* and its sub-documents. All other attributes will be preserved.
-
-- **document** (object): a document / object
-- **attributeNames** (string, *repeatable*): an arbitrary number of attribute
- names as multiple arguments (at least 1)
-- returns **doc** (object): *document* without the specified attributes on
- all levels (top-level as well as nested objects)
-
-```js
-UNSET_RECURSIVE( doc, "_id", "_key", "foo", "bar" )
-```
-
-`UNSET_RECURSIVE(document, attributeNameArray) → doc`
-
-- **document** (object): a document / object
-- **attributeNameArray** (array): an array of attribute names as strings
-- returns **doc** (object): *document* without the specified attributes on
- all levels (top-level as well as nested objects)
-
-```js
-UNSET_RECURSIVE( doc, [ "_id", "_key", "foo", "bar" ] )
-```
-
-VALUES()
---------
-
-`VALUES(document, removeInternal) → anyArray`
-
-Return the attribute values of the *document* as an array. Optionally omit
-system attributes.
-
-- **document** (object): a document / object
-- **removeInternal** (bool, *optional*): if set to *true*, then all internal attributes
- (such as *_id*, *_key* etc.) are removed from the result
-- returns **anyArray** (array): the values of *document* returned in any order
-
-```js
-VALUES( { "_key": "users/jane", "name": "Jane", "age": 35 } )
-// [ "Jane", 35, "users/jane" ]
-
-VALUES( { "_key": "users/jane", "name": "Jane", "age": 35 }, true )
-// [ "Jane", 35 ]
-```
-
-ZIP()
------
-
-`ZIP(keys, values) → doc`
-
-Return a document object assembled from the separate parameters *keys* and *values*.
-
-*keys* and *values* must be arrays and have the same length.
-
-- **keys** (array): an array of strings, to be used as attribute names in the result
-- **values** (array): an array with elements of arbitrary types, to be used as
- attribute values
-- returns **doc** (object): a document with the keys and values assembled
-
-```js
-ZIP( [ "name", "active", "hobbies" ], [ "some user", true, [ "swimming", "riding" ] ] )
-// { "name": "some user", "active": true, "hobbies": [ "swimming", "riding" ] }
-```
diff --git a/Documentation/Books/AQL/Functions/Fulltext.md b/Documentation/Books/AQL/Functions/Fulltext.md
deleted file mode 100644
index 764b6b6db4fd..000000000000
--- a/Documentation/Books/AQL/Functions/Fulltext.md
+++ /dev/null
@@ -1,76 +0,0 @@
-Fulltext functions
-==================
-
-AQL offers the following functions to filter data based on
-[fulltext indexes](../../Manual/Indexing/Fulltext.html).
-
-FULLTEXT()
-----------
-
-`FULLTEXT(coll, attribute, query, limit) → docArray`
-
-Return all documents from collection *coll*, for which the attribute *attribute*
-matches the fulltext search phrase *query*, optionally capped to *limit* results.
-
-**Note**: the *FULLTEXT()* function requires the collection *coll* to have a
-fulltext index on *attribute*. If no fulltext index is available, this function
-will fail with an error at runtime. It doesn't fail when explaining the query however.
-
-- **coll** (collection): a collection
-- **attribute** (string): the attribute name of the attribute to search in
-- **query** (string): a fulltext search expression as described below
-- **limit** (number, *optional*): if set to a non-zero value, it will cap the result
- to at most this number of documents
-- returns **docArray** (array): an array of documents
-
-*FULLTEXT()* is not meant to be used as an argument to *FILTER*,
-but rather to be used as the expression of a *FOR* statement:
-
-```js
-FOR oneMail IN FULLTEXT(emails, "body", "banana,-apple")
- RETURN oneMail._id
-```
-
-*query* is a comma-separated list of sought words (or prefixes of sought words). To
-distinguish between prefix searches and complete-match searches, each word can optionally be
-prefixed with either the *prefix:* or *complete:* qualifier. Different qualifiers can
-be mixed in the same query. Not specifying a qualifier for a search word will implicitly
-execute a complete-match search for the given word:
-
-- *FULLTEXT(emails, "body", "banana")* Will look for the word *banana* in the
- attribute *body* of the collection *collection*.
-
-- *FULLTEXT(emails, "body", "banana,orange")* Will look for both words
- *banana* and *orange* in the mentioned attribute. Only those documents will be
- returned that contain both words.
-
-- *FULLTEXT(emails, "body", "prefix:head")* Will look for documents that contain any
- words starting with the prefix *head*.
-
-- *FULLTEXT(emails, "body", "prefix:head,complete:aspirin")* Will look for all
- documents that contain a word starting with the prefix *head* and that also contain
- the (complete) word *aspirin*. Note: specifying *complete* is optional here.
-
-- *FULLTEXT(emails, "body", "prefix:cent,prefix:subst")* Will look for all documents
- that contain a word starting with the prefix *cent* and that also contain a word
- starting with the prefix *subst*.
-
-If multiple search words (or prefixes) are given, then by default the results will be
-AND-combined, meaning only the logical intersection of all searches will be returned.
-It is also possible to combine partial results with a logical OR, and with a logical NOT:
-
-- *FULLTEXT(emails, "body", "+this,+text,+document")* Will return all documents that
- contain all the mentioned words. Note: specifying the *+* symbols is optional here.
-
-- *FULLTEXT(emails, "body", "banana,|apple")* Will return all documents that contain
- either (or both) words *banana* or *apple*.
-
-- *FULLTEXT(emails, "body", "banana,-apple")* Will return all documents that contain
- the word *banana*, but do not contain the word *apple*.
-
-- *FULLTEXT(emails, "body", "banana,pear,-cranberry")* Will return all documents that
- contain both the words *banana* and *pear*, but do not contain the word
- *cranberry*.
-
-No precedence of logical operators will be honored in a fulltext query. The query will simply
-be evaluated from left to right.
diff --git a/Documentation/Books/AQL/Functions/Geo.md b/Documentation/Books/AQL/Functions/Geo.md
deleted file mode 100644
index 3b2349522049..000000000000
--- a/Documentation/Books/AQL/Functions/Geo.md
+++ /dev/null
@@ -1,440 +0,0 @@
-Geo functions
-=============
-
-Geo utility functions
----------------------
-
-The following helper functions **can** use geo indexes, but do not have to in
-all cases. You can use all of these functions in combination with each other,
-and if you have configured a geo index it may be utilized,
-see [Geo Indexing](../../Manual/Indexing/Geo.html).
-
-### DISTANCE()
-
-`DISTANCE(latitude1, longitude1, latitude2, longitude2) → distance`
-
-Calculate the distance between two arbitrary coordinates in meters (as birds
-would fly). The value is computed using the haversine formula, which is based
-on a spherical Earth model. It's fast to compute and is accurate to around 0.3%,
-which is sufficient for most use cases such as location-aware services.
-
-- **latitude1** (number): the latitude portion of the first coordinate
-- **longitude1** (number): the longitude portion of the first coordinate
-- **latitude2** (number): the latitude portion of the second coordinate
-- **longitude2** (number): the longitude portion of the second coordinate
-- returns **distance** (number): the distance between both coordinates in **meters**
-
-```js
-// Distance from Brandenburg Gate (Berlin) to ArangoDB headquarters (Cologne)
-DISTANCE(52.5163, 13.3777, 50.9322, 6.94) // 476918.89688380965 (~477km)
-
-// Sort a small number of documents based on distance to Central Park (New York)
-FOR doc IN doc // e.g. documents returned by a traversal
- SORT DISTANCE(doc.latitude, doc.longitude, 40.78, -73.97)
- RETURN doc
-```
-
-### GEO_CONTAINS()
-
-Introduced in: v3.4.0
-
-`GEO_CONTAINS(geoJsonA, geoJsonB) → bool`
-
-Checks whether the [GeoJSON object](../../Manual/Indexing/Geo.html#geojson) `geoJsonA`
-fully contains `geoJsonB` (Every point in B is also in A). The object `geoJsonA` has to be of type
-`Polygon` or `MultiPolygon`, other types are not supported because containment is ill defined.
-This function can be **optimized** by a S2 based [geospatial index](../../Manual/Indexing/Geo.html).
-
-- **geoJsonA** (object): first GeoJSON object or coordinate array (in longitude, latitude order)
-- **geoJsonB** (object): second GeoJSON object or coordinate array (in longitude, latitude order)
-- returns **bool** (bool): true when every point in B is also contained in A, false otherwise
-
-
-### GEO_DISTANCE()
-
-Introduced in: v3.4.0
-
-`GEO_DISTANCE(geoJsonA, geoJsonB) → distance`
-
-Return the distance between two GeoJSON objects, measured from the **centroid**
-of each shape. For a list of supported types see the
-[geo index page](../../Manual/Indexing/Geo.html#geojson).
-
-- **geoJsonA** (object): first GeoJSON object
-- **geoJsonB** (object): second GeoJSON object
-- returns **distance** (number): the distance between the centroid points of
- the two objects
-
-```js
-LET polygon = {
- type: "Polygon",
- coordinates: [[[-11.5, 23.5], [-10.5, 26.1], [-11.2, 27.1], [-11.5, 23.5]]]
-}
-FOR doc IN collectionName
- LET distance = GEO_DISTANCE(doc.geometry, polygon) // calculates the distance
- RETURN distance
-```
-
-### GEO_EQUALS()
-
-Introduced in: v3.4.0
-
-`GEO_EQUALS(geoJsonA, geoJsonB) → bool`
-
-Checks whether two GeoJSON objects are equal or not. For a list of supported
-types see the [geo index page](../../Manual/Indexing/Geo.html#geojson).
-
-- **geoJsonA** (object): first GeoJSON object
-- **geoJsonB** (object): second GeoJSON object.
-- returns **bool** (bool): true for equality.
-
-```js
-LET polygonA = GEO_POLYGON([
- [-11.5, 23.5], [-10.5, 26.1], [-11.2, 27.1], [-11.5, 23.5]
-])
-LET polygonB = GEO_POLYGON([
- [-11.5, 23.5], [-10.5, 26.1], [-11.2, 27.1], [-11.5, 23.5]
-])
-RETURN GEO_EQUALS(polygonA, polygonB) // true
-```
-
-```js
-LET polygonA = GEO_POLYGON([
- [-11.1, 24.0], [-10.5, 26.1], [-11.2, 27.1], [-11.1, 24.0]
-])
-LET polygonB = GEO_POLYGON([
- [-11.5, 23.5], [-10.5, 26.1], [-11.2, 27.1], [-11.5, 23.5]
-])
-RETURN GEO_EQUALS(polygonA, polygonB) // false
-```
-
-### GEO_INTERSECTS()
-
-Introduced in: v3.4.0
-
-`GEO_INTERSECTS(geoJsonA, geoJsonB) → bool`
-
-Checks whether the [GeoJSON object](../../Manual/Indexing/Geo.html#geojson) `geoJsonA`
-intersects with `geoJsonB` (i.e. at least one point in B is also A or vice-versa).
-This function can be **optimized** by a S2 based [geospatial index](../../Manual/Indexing/Geo.html).
-
-- **geoJsonA** (object): first GeoJSON object
-- **geoJsonB** (object): second GeoJSON object.
-- returns **bool** (bool): true if B intersects A, false otherwise
-
-### IS_IN_POLYGON()
-
-Determine whether a coordinate is inside a polygon.
-
-{% hint 'warning' %}
-The *IS_IN_POLYGON* AQL function is **deprecated** as of ArangoDB 3.4.0 in
-favor of the new `GEO_CONTAINS` AQL function, which works with
-[GeoJSON](https://tools.ietf.org/html/rfc7946) Polygons and MultiPolygons.
-{% endhint %}
-
-`IS_IN_POLYGON(polygon, latitude, longitude) → bool`
-
-- **polygon** (array): an array of arrays with 2 elements each, representing the
- points of the polygon in the format *[lat, lon]*
-- **latitude** (number): the latitude portion of the search coordinate
-- **longitude** (number): the longitude portion of the search coordinate
-- returns **bool** (bool): *true* if the point (*latitude*, *longitude*) is
- inside the *polygon* or *false* if it's not. The result is undefined (can be
- *true* or *false*) if the specified point is exactly on a boundary of the
- polygon.
-
-```js
-// will check if the point (lat 4, lon 7) is contained inside the polygon
-IS_IN_POLYGON( [ [ 0, 0 ], [ 0, 10 ], [ 10, 10 ], [ 10, 0 ] ], 4, 7 )
-```
-
-`IS_IN_POLYGON(polygon, coord, useLonLat) → bool`
-
-The 2nd parameter can alternatively be specified as an array with two values.
-
-By default, each array element in *polygon* is expected to be in the format
-*[lat, lon]*. This can be changed by setting the 3rd parameter to *true* to
-interpret the points as *[lon, lat]*. *coord* will then also be interpreted in
-the same way.
-
-- **polygon** (array): an array of arrays with 2 elements each, representing the
- points of the polygon
-- **coord** (array): the search coordinate as a number array with two elements
-- **useLonLat** (bool, *optional*): if set to *true*, the coordinates in
- *polygon* and the search coordinate *coord* will be interpreted as
- *[lon, lat]* (GeoJSON). The default is *false* and the format *[lat, lon]* is
- expected.
-- returns **bool** (bool): *true* if the point *coord* is inside the *polygon*
- or *false* if it's not. The result is undefined (can be *true* or *false*) if
- the specified point is exactly on a boundary of the polygon.
-
-```js
-// will check if the point (lat 4, lon 7) is contained inside the polygon
-IS_IN_POLYGON( [ [ 0, 0 ], [ 0, 10 ], [ 10, 10 ], [ 10, 0 ] ], [ 4, 7 ] )
-
-// will check if the point (lat 4, lon 7) is contained inside the polygon
-IS_IN_POLYGON( [ [ 0, 0 ], [ 10, 0 ], [ 10, 10 ], [ 0, 10 ] ], [ 7, 4 ], true )
-```
-
-GeoJSON Constructors
----------------------
-
-Introduced in: v3.4.0
-
-The following helper functions are available to easily create valid GeoJSON
-output. In all cases you can write equivalent JSON yourself, but these functions
-will help you to make all your AQL queries shorter and easier to read.
-
-### GEO_LINESTRING()
-
-`GEO_LINESTRING(points) → geoJson`
-
-Construct a GeoJSON LineString.
-Needs at least two longitude/latitude pairs.
-
-- **points** (array): number array of longitude/latitude pairs
-- returns **geoJson** (object): a valid GeoJSON LineString
-
-@startDocuBlockInline aqlGeoLineString_1
-@EXAMPLE_AQL{aqlGeoLineString_1}
-RETURN GEO_LINESTRING([
- [35, 10], [45, 45]
-])
-@END_EXAMPLE_AQL
-@endDocuBlock aqlGeoLineString_1
-
-### GEO_MULTILINESTRING()
-
-`GEO_MULTILINESTRING(points) → geoJson`
-
-Construct a GeoJSON MultiLineString.
-Needs at least two elements consisting valid LineStrings coordinate arrays.
-
-- **points** (array): array of LineStrings
-- returns **geoJson** (object): a valid GeoJSON MultiLineString
-
-@startDocuBlockInline aqlGeoMultiLineString_1
-@EXAMPLE_AQL{aqlGeoMultiLineString_1}
-RETURN GEO_MULTILINESTRING([
- [[100.0, 0.0], [101.0, 1.0]],
- [[102.0, 2.0], [101.0, 2.3]]
-])
-@END_EXAMPLE_AQL
-@endDocuBlock aqlGeoMultiLineString_1
-
-### GEO_MULTIPOINT()
-
-`GEO_MULTIPOINT(points) → geoJson`
-
-Construct a GeoJSON LineString. Needs at least two longitude/latitude pairs.
-
-- **points** (array): number array of longitude/latitude pairs
-- returns **geoJson** (object): a valid GeoJSON Point
-
-@startDocuBlockInline aqlGeoMultiPoint_1
-@EXAMPLE_AQL{aqlGeoMultiPoint_1}
-RETURN GEO_MULTIPOINT([
- [35, 10], [45, 45]
-])
-@END_EXAMPLE_AQL
-@endDocuBlock aqlGeoMultiPoint_1
-
-### GEO_POINT()
-
-`GEO_POINT(longitude, latitude) → geoJson`
-
-Construct a valid GeoJSON Point.
-
-- **longitude** (number): the longitude portion of the point
-- **latitude** (number): the latitude portion of the point
-- returns **geoJson** (object): a GeoJSON Point
-
-@startDocuBlockInline aqlGeoPoint_1
-@EXAMPLE_AQL{aqlGeoPoint_1}
-RETURN GEO_POINT(1.0, 2.0)
-@END_EXAMPLE_AQL
-@endDocuBlock aqlGeoPoint_1
-
-### GEO_POLYGON()
-
-`GEO_POLYGON(points) → geoJson`
-
-Construct a GeoJSON Polygon. Needs at least one array representing a loop.
-Each loop consists of an array with at least three longitude/latitude pairs. The
-first loop must be the outermost, while any subsequent loops will be interpreted
-as holes.
-
-- **points** (array): array of (arrays of) longitude/latitude pairs
-- returns **geoJson** (object|null): a valid GeoJSON Polygon
-
-Simple Polygon:
-
-@startDocuBlockInline aqlGeoPolygon_1
-@EXAMPLE_AQL{aqlGeoPolygon_1}
-RETURN GEO_POLYGON([
- [0.0, 0.0], [7.5, 2.5], [0.0, 5.0]
-])
-@END_EXAMPLE_AQL
-@endDocuBlock aqlGeoPolygon_1
-
-Advanced Polygon with a hole inside:
-
-@startDocuBlockInline aqlGeoPolygon_2
-@EXAMPLE_AQL{aqlGeoPolygon_2}
-RETURN GEO_POLYGON([
- [[35, 10], [45, 45], [15, 40], [10, 20], [35, 10]],
- [[20, 30], [35, 35], [30, 20], [20, 30]]
-])
-@END_EXAMPLE_AQL
-@endDocuBlock aqlGeoPolygon_2
-
-### GEO_MULTIPOLYGON()
-
-`GEO_MULTIPOLYGON(polygons) → geoJson`
-
-Construct a GeoJSON MultiPolygon. Needs at least two Polygons inside.
-See [GEO_POLYGON()](#geopolygon) for the rules of Polygon construction.
-
-- **polygons** (array): array of arrays of array of longitude/latitude pairs
-- returns **geoJson** (object|null): a valid GeoJSON MultiPolygon
-
-MultiPolygon comprised of a simple Polygon and a Polygon with hole:
-
-@startDocuBlockInline aqlGeoMultiPolygon_1
-@EXAMPLE_AQL{aqlGeoMultiPolygon_1}
-RETURN GEO_MULTIPOLYGON([
- [
- [[40, 40], [20, 45], [45, 30], [40, 40]]
- ],
- [
- [[20, 35], [10, 30], [10, 10], [30, 5], [45, 20], [20, 35]],
- [[30, 20], [20, 15], [20, 25], [30, 20]]
- ]
-])
-@END_EXAMPLE_AQL
-@endDocuBlock aqlGeoMultiPolygon_1
-
-Geo Index Functions
--------------------
-
-{% hint 'warning' %}
-The AQL functions `NEAR()`, `WITHIN()` and `WITHIN_RECTANGLE()` are
-deprecated starting from version 3.4.0.
-Please use the [Geo utility functions](#geo-utility-functions) instead.
-{% endhint %}
-
-AQL offers the following functions to filter data based on
-[geo indexes](../../Manual/Indexing/Geo.html). These functions require the collection
-to have at least one geo index. If no geo index can be found, calling this
-function will fail with an error at runtime. There is no error when explaining
-the query however.
-
-### NEAR()
-
-{% hint 'warning' %}
-`NEAR` is a deprecated AQL function from version 3.4.0 on.
-Use [DISTANCE()](#distance) in a query like this instead:
-
-```js
-FOR doc IN doc
- SORT DISTANCE(doc.latitude, doc.longitude, paramLatitude, paramLongitude) ASC
- RETURN doc
-```
-Assuming there exists a geo-type index on `latitude` and `longitude`, the
-optimizer will recognize it and accelerate the query.
-{% endhint %}
-
-`NEAR(coll, latitude, longitude, limit, distanceName) → docArray`
-
-Return at most *limit* documents from collection *coll* that are near
-*latitude* and *longitude*. The result contains at most *limit* documents,
-returned sorted by distance, with closest distances being returned first.
-Optionally, the distances in meters between the specified coordinate
-(*latitude* and *longitude*) and the document coordinates can be returned as
-well. To make use of that, the desired attribute name for the distance result
-has to be specified in the *distanceName* argument. The result documents will
-contain the distance value in an attribute of that name.
-
-- **coll** (collection): a collection
-- **latitude** (number): the latitude portion of the search coordinate
-- **longitude** (number): the longitude portion of the search coordinate
-- **limit** (number, *optional*): cap the result to at most this number of
- documents. The default is 100. If more documents than *limit* are found,
- it is undefined which ones will be returned.
-- **distanceName** (string, *optional*): include the distance to the search
- coordinate in each document in the result (in meters), using the attribute
- name *distanceName*
-- returns **docArray** (array): an array of documents, sorted by distance
- (shortest distance first)
-
-### WITHIN()
-
-{% hint 'warning' %}
-`WITHIN` is a deprecated AQL function from version 3.4.0 on.
-Use [DISTANCE()](#distance) in a query like this instead:
-
-```js
-FOR doc IN doc
- LET d = DISTANCE(doc.latitude, doc.longitude, paramLatitude, paramLongitude)
- FILTER d <= radius
- SORT d ASC
- RETURN doc
-```
-
-Assuming there exists a geo-type index on `latitude` and `longitude`, the
-optimizer will recognize it and accelerate the query.
-{% endhint %}
-
-`WITHIN(coll, latitude, longitude, radius, distanceName) → docArray`
-
-Return all documents from collection *coll* that are within a radius of *radius*
-around the specified coordinate (*latitude* and *longitude*). The documents
-returned are sorted by distance to the search coordinate, with the closest
-distances being returned first. Optionally, the distance in meters between the
-search coordinate and the document coordinates can be returned as well. To make
-use of that, an attribute name for the distance result has to be specified in
-the *distanceName* argument. The result documents will contain the distance
-value in an attribute of that name.
-
-- **coll** (collection): a collection
-- **latitude** (number): the latitude portion of the search coordinate
-- **longitude** (number): the longitude portion of the search coordinate
-- **radius** (number): radius in meters
-- **distanceName** (string, *optional*): include the distance to the search
- coordinate in each document in the result (in meters), using the attribute
- name *distanceName*
-- returns **docArray** (array): an array of documents, sorted by distance
- (shortest distance first)
-
-### WITHIN_RECTANGLE()
-
-{% hint 'warning' %}
-`WITHIN_RECTANGLE` is a deprecated AQL function from version 3.4.0 on. Use
-[GEO_CONTAINS](#geocontains) and a GeoJSON polygon instead:
-
-```js
-LET rect = {type: "Polygon", coordinates: [[[longitude1, latitude1], ...]]]}
-FOR doc IN doc
- FILTER GEO_CONTAINS(poly, [doc.longitude, doc.latitude])
- RETURN doc
-```
-Assuming there exists a geo-type index on `latitude` and `longitude`, the
-optimizer will recognize it and accelerate the query.
-{% endhint %}
-
-`WITHIN_RECTANGLE(coll, latitude1, longitude1, latitude2, longitude2) → docArray`
-
-Return all documents from collection *coll* that are positioned inside the
-bounding rectangle with the points (*latitude1*, *longitude1*) and (*latitude2*,
-*longitude2*). There is no guaranteed order in which the documents are returned.
-
-- **coll** (collection): a collection
-- **latitude1** (number): the bottom-left latitude portion of the search
- coordinate
-- **longitude1** (number): the bottom-left longitude portion of the search
- coordinate
-- **latitude2** (number): the top-right latitude portion of the search
- coordinate
-- **longitude2** (number): the top-right longitude portion of the search
- coordinate
-- returns **docArray** (array): an array of documents, in random order
diff --git a/Documentation/Books/AQL/Functions/Miscellaneous.md b/Documentation/Books/AQL/Functions/Miscellaneous.md
deleted file mode 100644
index 03f5ad9a50d6..000000000000
--- a/Documentation/Books/AQL/Functions/Miscellaneous.md
+++ /dev/null
@@ -1,410 +0,0 @@
-Miscellaneous functions
-=======================
-
-Control flow functions
-----------------------
-
-### NOT_NULL()
-
-`NOT_NULL(alternative, ...) → value`
-
-Return the first element that is not *null*, and *null* if all alternatives
-are *null* themselves. It is also known as `COALESCE()` in SQL.
-
-- **alternative** (any, *repeatable*): input of arbitrary type
-- returns **value** (any): first non-null parameter, or *null* if all arguments
- are *null*
-
-### FIRST_LIST()
-
-Return the first alternative that is an array, and *null* if none of the
-alternatives is an array.
-
-- **alternative** (any, *repeatable*): input of arbitrary type
-- returns **list** (list|null): array / list or null
-
-### FIRST_DOCUMENT()
-
-`FIRST_DOCUMENT(value) → doc`
-
-Return the first alternative that is a document, and *null* if none of the
-alternatives is a document.
-
-- **alternative** (any, *repeatable*): input of arbitrary type
-- returns **doc** (object|null): document / object or null
-
-### Ternary operator
-
-For conditional evaluation, check out the
-[ternary operator](../Operators.md#ternary-operator).
-
-Database functions
-------------------
-
-### CHECK_DOCUMENT()
-
-Introduced in: v3.3.22, v3.4.2
-
-`CHECK_DOCUMENT(document) → checkResult`
-
-Returns *true* if *document* is a valid document object, i.e. a document
-without any duplicate attribute names. Will return *false* for any
-non-objects/non-documents or documents with duplicate attribute names.
-
-{% hint 'warning' %}
-This is an internal function for validating database objects and
-is not supposed to be useful for anything else.
-{% endhint %}
-
-The primary use case for this function is to apply it on all
-documents in a given collection as follows:
-
-```js
-FOR doc IN collection
- FILTER !CHECK_DOCUMENT(doc)
- RETURN JSON_STRINGIFY(doc)
-```
-
-This query will return all documents in the given collection with redundant
-attribute names and export them. This output can be used for subsequent
-cleanup operations.
-
-{% hint 'info' %}
-When using object literals in AQL, there will be an automatic
-removal/cleanup of duplicate attribute names, so the function will be effective
-only for **already stored** database documents. Therefore,
-`RETURN CHECK_DOCUMENT( { a: 1, a: 2 } )` is expected to return `true`.
-{% endhint %}
-
-- **document** (object): an arbitrary document / object
-- returns **checkResult** (bool): *true* for any valid objects/documents without
- duplicate attribute names, and *false* for any non-objects/non-documents or
- objects/documents with duplicate attribute names
-
-### COLLECTION_COUNT()
-
-`COLLECTION_COUNT(coll) → count`
-
-Determine the amount of documents in a collection. [LENGTH()](#length)
-is preferred.
-
-### COLLECTIONS()
-
-`COLLECTIONS() → docArray`
-
-Return an array of collections.
-
-- returns **docArray** (array): each collection as a document with attributes
- *name* and *_id* in an array
-
-### COUNT()
-
-This is an alias for [LENGTH()](#length).
-
-### CURRENT_USER()
-
-`CURRENT_USER() → userName`
-
-Return the name of the current user.
-
-The current user is the user account name that was specified in the
-*Authorization* HTTP header of the request. It will only be populated if
-authentication on the server is turned on, and if the query was executed inside
-a request context. Otherwise, the return value of this function will be *null*.
-
-- returns **userName** (string|null): the current user name, or *null* if
- authentication is disabled
-
-### DECODE_REV()
-
-`DECODE_REV(revision) → details`
-
-Decompose the specified `revision` string into its components.
-The resulting object has a `date` and a `count` attribute.
-This function is supposed to be called with the `_rev` attribute value
-of a database document as argument.
-
-- **revision** (string): revision ID string
-- returns **details** (object|null): object with two attributes
- *date* (string in ISO 8601 format) and *count* (integer number),
- or *null*
-
-If the input revision ID is not a string or cannot be processed, the function
-issues a warning and returns *null*.
-
-Please note that the result structure may change in future versions of
-ArangoDB in case the internal format of revision strings is modified. Please
-also note that the *date* value in the current result provides the date and
-time of when the document record was put together on the server, but not
-necessarily the time of insertion into the underlying storage engine. Therefore
-in case of concurrent document operations the exact document storage order
-cannot be derived unambiguously from the revision value. It should thus be
-treated as a rough estimate of when a document was created or last updated.
-
-```js
-DECODE_REV( "_YU0HOEG---" )
-// { "date" : "2019-03-11T16:15:05.314Z", "count" : 0 }
-```
-
-### DOCUMENT()
-
-`DOCUMENT(collection, id) → doc`
-
-Return the document which is uniquely identified by its *id*. ArangoDB will
-try to find the document using the *_id* value of the document in the specified
-collection.
-
-If there is a mismatch between the *collection* passed and the
-collection specified in *id*, then *null* will be returned. Additionally,
-if the *collection* matches the collection value specified in *id* but the
-document cannot be found, *null* will be returned.
-
-This function also allows *id* to be an array of ids. In this case, the
-function will return an array of all documents that could be found.
-
-It is also possible to specify a document key instead of an id, or an array
-of keys to return all documents that can be found.
-
-- **collection** (string): name of a collection
-- **id** (string|array): a document handle string (consisting of collection
- name and document key), a document key, or an array of both document handle
- strings and document keys
-- returns **doc** (document|array|null): the content of the found document,
- an array of all found documents or *null* if nothing was found
-
-```js
-DOCUMENT( users, "users/john" )
-DOCUMENT( users, "john" )
-
-DOCUMENT( users, [ "users/john", "users/amy" ] )
-DOCUMENT( users, [ "john", "amy" ] )
-```
-
-`DOCUMENT(id) → doc`
-
-The function can also be used with a single parameter *id* as follows:
-
-- **id** (string|array): either a document handle string (consisting of
- collection name and document key) or an array of document handle strings
-- returns **doc** (document|null): the content of the found document
- or *null* if nothing was found
-
-```js
-DOCUMENT("users/john")
-DOCUMENT( [ "users/john", "users/amy" ] )
-```
-
-Please also consider to use
-[`DOCUMENT` in conjunction with `WITH`](../Operations/With.md)
-
-### LENGTH()
-
-`LENGTH(coll) → documentCount`
-
-Determine the amount of documents in a collection.
-
-It calls [COLLECTION_COUNT()](#collectioncount) internally.
-
-- **coll** (collection): a collection (not string)
-- returns **documentCount** (number): the total amount of documents in *coll*
-
-*LENGTH()* can also determine the [number of elements](Array.md#length) in an array,
-the [number of attribute keys](Document.md#length) of an object / document and
-the [character length](String.md#length) of a string.
-
-Hash functions
---------------
-
-### HASH()
-
-`HASH(value) → hashNumber`
-
-Calculate a hash value for *value*.
-
-- **value** (any): an element of arbitrary type
-- returns **hashNumber** (number): a hash value of *value*
-
-*value* is not required to be a string, but can have any data type. The calculated
-hash value will take the data type of *value* into account, so for example the
-number *1* and the string *"1"* will have different hash values. For arrays the
-hash values will be equal if the arrays contain exactly the same values
-(including value types) in the same order. For objects the same hash values will
-be created if the objects have exactly the same attribute names and values
-(including value types). The order in which attributes appear inside objects
-is not important for hashing.
-
-The hash value returned by this function is a number. The hash algorithm is not
-guaranteed to remain the same in future versions of ArangoDB. The hash values
-should therefore be used only for temporary calculations, e.g. to compare if two
-documents are the same, or for grouping values in queries.
-
-### String-based hashing
-
-See the following string functions:
-
-- [CRC32()](String.md#crc32)
-- [FNV64()](String.md#fnv64)
-- [MD5()](String.md#md5)
-- [SHA1()](String.md#sha1)
-- [SHA512()](String.md#sha512)
-
-Function calling
-----------------
-
-### APPLY()
-
-`APPLY(functionName, arguments) → retVal`
-
-Dynamically call the function *funcName* with the arguments specified.
-Arguments are given as array and are passed as separate parameters to
-the called function.
-
-Both built-in and user-defined functions can be called.
-
-- **funcName** (string): a function name
-- **arguments** (array, *optional*): an array with elements of arbitrary type
-- returns **retVal** (any): the return value of the called function
-
-```js
-APPLY( "SUBSTRING", [ "this is a test", 0, 7 ] )
-// "this is"
-```
-
-### ASSERT() / WARN()
-
-`ASSERT(expr, message) → retVal`
-`WARN(expr, message) → retVal`
-
-The two functions evaluate an expression. In case the expression evaluates to
-*true* both functions will return *true*. If the expression evaluates to
-*false* *ASSERT* will throw an error and *WARN* will issue a warning and return
-*false*. This behavior allows the use of *ASSERT* and *WARN* in *FILTER*
-conditions.
-
-- **expr** (expression): AQL expression to be evaluated
-- **message** (string): message that will be used in exception or warning if expression evaluates to false
-- returns **retVal** (bool): returns true if expression evaluates to true
-
-```js
-FOR i IN 1..3 FILTER ASSERT(i > 0, "i is not greater 0") RETURN i
-FOR i IN 1..3 FILTER WARN(i < 2, "i is not smaller 2") RETURN i
-```
-### CALL()
-
-`CALL(funcName, arg1, arg2, ... argN) → retVal`
-
-Dynamically call the function *funcName* with the arguments specified.
-Arguments are given as multiple parameters and passed as separate
-parameters to the called function.
-
-Both built-in and user-defined functions can be called.
-
-- **funcName** (string): a function name
-- **args** (any, *repeatable*): an arbitrary number of elements as
- multiple arguments, can be omitted
-- returns **retVal** (any): the return value of the called function
-
-```js
-CALL( "SUBSTRING", "this is a test", 0, 4 )
-// "this"
-```
-
-Internal functions
-------------------
-
-The following functions are used during development of ArangoDB as a database
-system, primarily for unit testing. They are not intended to be used by end
-users, especially not in production environments.
-
-### FAIL()
-
-`FAIL(reason)`
-
-Let a query fail on purpose. Can be used in a conditional branch, or to verify
-if lazy evaluation / short circuiting is used for instance.
-
-- **reason** (string): an error message
-- returns nothing, because the query is aborted
-
-```js
-RETURN 1 == 1 ? "okay" : FAIL("error") // "okay"
-RETURN 1 == 1 || FAIL("error") ? true : false // true
-RETURN 1 == 2 && FAIL("error") ? true : false // false
-RETURN 1 == 1 && FAIL("error") ? true : false // aborted with error
-```
-
-### NOOPT()
-
-`NOOPT(value) → retVal`
-
-No-operation that prevents certain query compile-time and run-time optimizations.
-Constant expressions can be forced to be evaluated at runtime with this.
-This function is marked as non-deterministic so its argument withstands
-query optimization. There is no need to call this function explicitly, it is
-mainly used for internal testing.
-
-- **value** (any): a value of arbitrary type
-- returns **retVal** (any): *value*
-
-```js
-// differences in execution plan (explain)
-FOR i IN 1..3 RETURN (1 + 1) // const assignment
-FOR i IN 1..3 RETURN NOOPT(1 + 1) // simple expression
-
-NOOPT( 123 ) // evaluates 123 at runtime
-NOOPT( CONCAT("a", "b") ) // evaluates concatenation at runtime
-```
-
-### PASSTHRU()
-
-`PASSTHRU(value) → retVal`
-
-Simply returns its call argument unmodified. There is no need to call this function
-explicitly, it is mainly used for internal testing.
-
-- **value** (any): a value of arbitrary type
-- returns **retVal** (any): *value*
-
-### SLEEP()
-
-`SLEEP(seconds) → null`
-
-Wait for a certain amount of time before continuing the query.
-
-- **seconds** (number): amount of time to wait
-- returns a *null* value
-
-```js
-SLEEP(1) // wait 1 second
-SLEEP(0.02) // wait 20 milliseconds
-```
-
-### V8()
-
-`V8(expression) → retVal`
-
-No-operation that enforces the usage of the V8 JavaScript engine. There is
-no need to call this function explicitly, it is mainly used for internal
-testing.
-
-- **expression** (any): arbitrary expression
-- returns **retVal** (any): the return value of the *expression*
-
-```js
-// differences in execution plan (explain)
-FOR i IN 1..3 RETURN (1 + 1) // const assignment
-FOR i IN 1..3 RETURN V8(1 + 1) // simple expression
-```
-
-### VERSION()
-
-`VERSION() → serverVersion`
-
-Returns the server version as a string. In a cluster, returns the version
-of the coordinator.
-
-- returns **serverVersion** (string): the server version string
-
-```js
-RETURN VERSION() // e.g. "3.4.0"
-```
diff --git a/Documentation/Books/AQL/Functions/Numeric.md b/Documentation/Books/AQL/Functions/Numeric.md
deleted file mode 100644
index 371d3e94b34a..000000000000
--- a/Documentation/Books/AQL/Functions/Numeric.md
+++ /dev/null
@@ -1,629 +0,0 @@
-Numeric functions
-=================
-
-AQL offers some numeric functions for calculations. The following functions are
-supported:
-
-ABS()
------
-
-`ABS(value) → unsignedValue`
-
-Return the absolute part of *value*.
-
-- **value** (number): any number, positive or negative
-- returns **unsignedValue** (number): the number without + or - sign
-
-```js
-ABS(-5) // 5
-ABS(+5) // 5
-ABS(3.5) // 3.5
-```
-
-ACOS()
-------
-
-`ACOS(value) → num`
-
-Return the arccosine of *value*.
-
-- **value** (number): the input value
-- returns **num** (number|null): the arccosine of *value*, or *null* if *value* is
- outside the valid range -1 and 1 (inclusive)
-
-```js
-ACOS(-1) // 3.141592653589793
-ACOS(0) // 1.5707963267948966
-ACOS(1) // 0
-ACOS(2) // null
-```
-
-ASIN()
-------
-
-`ASIN(value) → num`
-
-Return the arcsine of *value*.
-
-- **value** (number): the input value
-- returns **num** (number|null): the arcsine of *value*, or *null* if *value* is
- outside the valid range -1 and 1 (inclusive)
-
-```js
-ASIN(1) // 1.5707963267948966
-ASIN(0) // 0
-ASIN(-1) // -1.5707963267948966
-ASIN(2) // null
-```
-
-ATAN()
-------
-
-`ATAN(value) → num`
-
-Return the arctangent of *value*.
-
-- **value** (number): the input value
-- returns **num** (number): the arctangent of *value*
-
-```js
-ATAN(-1) // -0.7853981633974483
-ATAN(0) // 0
-ATAN(10) // 1.4711276743037347
-```
-
-ATAN2()
--------
-
-`ATAN2(y, x) → num`
-
-Return the arctangent of the quotient of *y* and *x*.
-
-```js
-ATAN2(0, 0) // 0
-ATAN2(1, 0) // 1.5707963267948966
-ATAN2(1, 1) // 0.7853981633974483
-ATAN2(-10, 20) // -0.4636476090008061
-```
-
-AVERAGE()
----------
-
-`AVERAGE(numArray) → mean`
-
-Return the average (arithmetic mean) of the values in *array*.
-
-- **numArray** (array): an array of numbers, *null* values are ignored
-- returns **mean** (number|null): the average value of *numArray*. If the array is
- empty or contains *null* values only, *null* will be returned.
-
-```js
-AVERAGE( [5, 2, 9, 2] ) // 4.5
-AVERAGE( [ -3, -5, 2 ] ) // -2
-AVERAGE( [ 999, 80, 4, 4, 4, 3, 3, 3 ] ) // 137.5
-```
-
-## AVG()
-
-This is an alias for [AVERAGE()](#average).
-
-CEIL()
-------
-
-`CEIL(value) → roundedValue`
-
-Return the integer closest but not less than *value*.
-
-To round downward, see [FLOOR()](#floor).
-To round to the nearest integer value, see [ROUND()](#round).
-
-- **value** (number): any number
-- returns **roundedValue** (number): the value rounded to the ceiling
-
-```js
-CEIL(2.49) // 3
-CEIL(2.50) // 3
-CEIL(-2.50) // -2
-CEIL(-2.51) // -2
-```
-
-COS()
------
-
-`COS(value) → num`
-
-Return the cosine of *value*.
-
-- **value** (number): the input value
-- returns **num** (number): the cosine of *value*
-
-```js
-COS(1) // 0.5403023058681398
-COS(0) // 1
-COS(-3.141592653589783) // -1
-COS(RADIANS(45)) // 0.7071067811865476
-```
-
-DEGREES()
----------
-
-`DEGREES(rad) → num`
-
-Return the angle converted from radians to degrees.
-
-- **rad** (number): the input value
-- returns **num** (number): the angle in degrees
-
-```js
-DEGREES(0.7853981633974483) // 45
-DEGREES(0) // 0
-DEGREES(3.141592653589793) // 180
-```
-
-EXP()
------
-
-`EXP(value) → num`
-
-Return Euler's constant (2.71828...) raised to the power of *value*.
-
-- **value** (number): the input value
-- returns **num** (number): Euler's constant raised to the power of *value*
-
-```js
-EXP(1) // 2.718281828459045
-EXP(10) // 22026.46579480671
-EXP(0) // 1
-```
-
-EXP2()
-------
-
-`EXP2(value) → num`
-
-Return 2 raised to the power of *value*.
-
-- **value** (number): the input value
-- returns **num** (number): 2 raised to the power of *value*
-
-```js
-EXP2(16) // 65536
-EXP2(1) // 2
-EXP2(0) // 1
-```
-
-FLOOR()
--------
-
-`FLOOR(value) → roundedValue`
-
-Return the integer closest but not greater than *value*.
-
-To round upward, see [CEIL()](#ceil).
-To round to the nearest integer value, see [ROUND()](#round).
-
-- **value** (number): any number
-- returns **roundedValue** (number): the value rounded downward
-
-```js
-FLOOR(2.49) // 2
-FLOOR(2.50) // 2
-FLOOR(-2.50) // -3
-FLOOR(-2.51) // -3
-```
-
-LOG()
------
-
-`LOG(value) → num`
-
-Return the natural logarithm of *value*. The base is Euler's
-constant (2.71828...).
-
-- **value** (number): the input value
-- returns **num** (number|null): the natural logarithm of *value*, or *null* if *value* is
- equal or less than 0
-
-```js
-LOG(2.718281828459045) // 1
-LOG(10) // 2.302585092994046
-LOG(0) // null
-```
-
-LOG2()
-------
-
-`LOG2(value) → num`
-
-Return the base 2 logarithm of *value*.
-
-- **value** (number): the input value
-- returns **num** (number|null): the base 2 logarithm of *value*, or *null* if *value* is
- equal or less than 0
-
-```js
-LOG2(1024) // 10
-LOG2(8) // 3
-LOG2(0) // null
-```
-
-LOG10()
--------
-
-`LOG10(value) → num`
-
-Return the base 10 logarithm of *value*.
-
-- **value** (number): the input value
-- returns **num** (number): the base 10 logarithm of *value*, or *null* if *value* is
- equal or less than 0
-
-```js
-LOG10(10000) // 10
-LOG10(10) // 1
-LOG10(0) // null
-```
-MAX()
------
-
-`MAX(anyArray) → max`
-
-Return the greatest element of *anyArray*. The array is not limited to numbers.
-Also see [type and value order](../Fundamentals/TypeValueOrder.md).
-
-- **anyArray** (array): an array of numbers, *null* values are ignored
-- returns **max** (any|null): the element with the greatest value. If the array is
- empty or contains *null* values only, the function will return *null*.
-
-```js
-MAX( [5, 9, -2, null, 1] ) // 9
-MAX( [ null, null ] ) // null
-```
-
-MEDIAN()
---------
-
-`MEDIAN(numArray) → median`
-
-Return the median value of the values in *array*.
-
-The array is sorted and the element in the middle is returned. If the array has an
-even length of elements, the two center-most elements are interpolated by calculating
-the average value (arithmetic mean).
-
-- **numArray** (array): an array of numbers, *null* values are ignored
-- returns **median** (number|null): the median of *numArray*. If the array is
- empty or contains *null* values only, the function will return *null*.
-
-```js
-MEDIAN( [ 1, 2, 3] ) // 2
-MEDIAN( [ 1, 2, 3, 4 ] ) // 2.5
-MEDIAN( [ 4, 2, 3, 1 ] ) // 2.5
-MEDIAN( [ 999, 80, 4, 4, 4, 3, 3, 3 ] ) // 4
-```
-
-MIN()
------
-
-`MIN(anyArray) → min`
-
-Return the smallest element of *anyArray*. The array is not limited to numbers.
-Also see [type and value order](../Fundamentals/TypeValueOrder.md).
-
-- **anyArray** (array): an array of numbers, *null* values are ignored
-- returns **min** (any|null): the element with the smallest value. If the array is
- empty or contains *null* values only, the function will return *null*.
-
-```js
-MIN( [5, 9, -2, null, 1] ) // -2
-MIN( [ null, null ] ) // null
-```
-
-PERCENTILE()
-------------
-
-`PERCENTILE(numArray, n, method) → percentile`
-
-Return the *n*th percentile of the values in *numArray*.
-
-- **numArray** (array): an array of numbers, *null* values are ignored
-- **n** (number): must be between 0 (excluded) and 100 (included)
-- **method** (string, *optional*): "rank" (default) or "interpolation"
-- returns **percentile** (number|null): the *n*th percentile, or *null* if the
- array is empty or only *null* values are contained in it or the percentile
- cannot be calculated
-
-```js
-PERCENTILE( [1, 2, 3, 4], 50 ) // 2
-PERCENTILE( [1, 2, 3, 4], 50, "rank" ) // 2
-PERCENTILE( [1, 2, 3, 4], 50, "interpolation" ) // 2.5
-```
-
-PI()
-----
-
-`PI() → pi`
-
-Return pi.
-
-- returns **pi** (number): the first few significant digits of pi (3.141592653589793)
-
-```js
-PI() // 3.141592653589793
-```
-
-POW()
------
-
-`POW(base, exp) → num`
-
-Return the *base* to the exponent *exp*.
-
-- **base** (number): the base value
-- **exp** (number): the exponent value
-- returns **num** (number): the exponentiated value
-
-```js
-POW( 2, 4 ) // 16
-POW( 5, -1 ) // 0.2
-POW( 5, 0 ) // 1
-```
-
-RADIANS()
----------
-
-`RADIANS(deg) → num`
-
-Return the angle converted from degrees to radians.
-
-- **deg** (number): the input value
-- returns **num** (number): the angle in radians
-
-```js
-RADIANS(180) // 3.141592653589793
-RADIANS(90) // 1.5707963267948966
-RADIANS(0) // 0
-```
-
-RAND()
-------
-
-`RAND() → randomNumber`
-
-Return a pseudo-random number between 0 and 1.
-
-- returns **randomNumber** (number): a number greater than 0 and less than 1
-
-```js
-RAND() // 0.3503170117504508
-RAND() // 0.6138226173882478
-```
-
-Complex example:
-
-```js
-LET coinFlips = (
- FOR i IN 1..100000
- RETURN RAND() > 0.5 ? "heads" : "tails"
-)
-RETURN MERGE(
- FOR flip IN coinFlips
- COLLECT f = flip WITH COUNT INTO count
- RETURN { [f]: count }
-)
-```
-
-Result:
-
-```json
-[
- {
- "heads": 49902,
- "tails": 50098
- }
-]
-```
-
-RANGE()
--------
-
-`RANGE(start, stop, step) → numArray`
-
-Return an array of numbers in the specified range, optionally with increments
-other than 1. The *start* and *stop* arguments are truncated to integers
-unless a *step* argument is provided.
-
-Also see the [range operator](../Operators.md#range-operator) for ranges
-with integer bounds and a step size of 1.
-
-- **start** (number): the value to start the range at (inclusive)
-- **stop** (number): the value to end the range with (inclusive)
-- **step** (number, *optional*): how much to increment in every step,
- the default is *1.0*
-- returns **numArray** (array): all numbers in the range as array
-
-```js
-RANGE(1, 4) // [ 1, 2, 3, 4 ]
-RANGE(1, 4, 2) // [ 1, 3 ]
-RANGE(1, 4, 3) // [ 1, 4 ]
-RANGE(1.5, 2.5) // [ 1, 2 ]
-RANGE(1.5, 2.5, 1) // [ 1.5, 2.5 ]
-RANGE(1.5, 2.5, 0.5) // [ 1.5, 2, 2.5 ]
-RANGE(-0.75, 1.1, 0.5) // [ -0.75, -0.25, 0.25, 0.75 ]
-```
-
-ROUND()
--------
-
-`ROUND(value) → roundedValue`
-
-Return the integer closest to *value*.
-
-- **value** (number): any number
-- returns **roundedValue** (number): the value rounded to the closest integer
-
-```js
-ROUND(2.49) // 2
-ROUND(2.50) // 3
-ROUND(-2.50) // -2
-ROUND(-2.51) // -3
-```
-
-Rounding towards zero, also known as *trunc()* in C/C++, can be achieved with
-a combination of the [ternary operator](../Operators.md#ternary-operator),
-[CEIL()](#ceil)
-and [FLOOR()](#floor):
-
-```js
-value >= 0 ? FLOOR(value) : CEIL(value)
-```
-
-SIN()
------
-
-`SIN(value) → num`
-
-Return the sine of *value*.
-
-- **value** (number): the input value
-- returns **num** (number): the sine of *value*
-
-```js
-SIN(3.141592653589783 / 2) // 1
-SIN(0) // 0
-SIN(-3.141592653589783 / 2) // -1
-SIN(RADIANS(270)) // -1
-```
-
-SQRT()
-------
-
-`SQRT(value) → squareRoot`
-
-Return the square root of *value*.
-
-- **value** (number): a number
-- returns **squareRoot** (number): the square root of *value*
-
-```js
-SQRT(9) // 3
-SQRT(2) // 1.4142135623730951
-```
-
-Other roots can be calculated with [POW()](#pow) like `POW(value, 1/n)`:
-
-```js
-// 4th root of 8*8*8*8 = 4096
-POW(4096, 1/4) // 8
-
-// cube root of 3*3*3 = 27
-POW(27, 1/3) // 3
-
-// square root of 3*3 = 9
-POW(9, 1/2) // 3
-```
-
-STDDEV_POPULATION()
--------------------
-
-`STDDEV_POPULATION(numArray) → num`
-
-Return the population standard deviation of the values in *array*.
-
-- **numArray** (array): an array of numbers, *null* values are ignored
-- returns **num** (number|null): the population standard deviation of *numArray*.
- If the array is empty or only *null* values are contained in the array,
- *null* will be returned.
-
-```js
-STDDEV_POPULATION( [ 1, 3, 6, 5, 2 ] ) // 1.854723699099141
-```
-
-STDDEV_SAMPLE()
----------------
-
-`STDDEV_SAMPLE(numArray) → num`
-
-Return the sample standard deviation of the values in *array*.
-
-- **numArray** (array): an array of numbers, *null* values are ignored
-- returns **num** (number|null): the sample standard deviation of *numArray*.
- If the array is empty or only *null* values are contained in the array,
- *null* will be returned.
-
-```js
-STDDEV_SAMPLE( [ 1, 3, 6, 5, 2 ] ) // 2.0736441353327724
-```
-
-## STDDEV()
-
-This is an alias for [STDDEV_POPULATION()](#stddevpopulation).
-
-SUM()
------
-
-`SUM(numArray) → sum`
-
-Return the sum of the values in *array*.
-
-- **numArray** (array): an array of numbers, *null* values are ignored
-- returns **sum** (number): the total of all values in *numArray*. If the array
- is empty or only *null* values are contained in the array, *0* will be returned.
-
-```js
-SUM( [1, 2, 3, 4] ) // 10
-SUM( [null, -5, 6] ) // 1
-SUM( [ ] ) // 0
-```
-
-TAN()
------
-
-`TAN(value) → num`
-
-Return the tangent of *value*.
-
-- **value** (number): the input value
-- returns **num** (number): the tangent of *value*
-
-```js
-TAN(10) // 0.6483608274590866
-TAN(5) // -3.380515006246586
-TAN(0) // 0
-```
-
-VARIANCE_POPULATION()
----------------------
-
-`VARIANCE_POPULATION(numArray) → num`
-
-Return the population variance of the values in *array*.
-
-- **numArray** (array): an array of numbers, *null* values are ignored
-- returns **num** (number|null): the population variance of *numArray*.
- If the array is empty or only *null* values are contained in the array,
- *null* will be returned.
-
-```js
-VARIANCE_POPULATION( [ 1, 3, 6, 5, 2 ] ) // 3.4400000000000004
-```
-
-VARIANCE_SAMPLE()
------------------
-
-`VARIANCE_SAMPLE(array) → num`
-
-Return the sample variance of the values in *array*.
-
-- **numArray** (array): an array of numbers, *null* values are ignored
-- returns **num** (number|null): the sample variance of *numArray*.
- If the array is empty or only *null* values are contained in the array,
- *null* will be returned.
-
-```js
-VARIANCE_SAMPLE( [ 1, 3, 6, 5, 2 ] ) // 4.300000000000001
-```
-
-## VARIANCE()
-
-This is an alias for [VARIANCE_POPULATION()](#variancepopulation).
diff --git a/Documentation/Books/AQL/Functions/README.md b/Documentation/Books/AQL/Functions/README.md
deleted file mode 100644
index af2c6fce6f50..000000000000
--- a/Documentation/Books/AQL/Functions/README.md
+++ /dev/null
@@ -1,34 +0,0 @@
-Functions
-=========
-
-AQL supports functions to allow more complex computations. Functions can be
-called at any query position where an expression is allowed. The general
-function call syntax is:
-
-```js
-FUNCTIONNAME(arguments)
-```
-
-where *FUNCTIONNAME* is the name of the function to be called, and *arguments*
-is a comma-separated list of function arguments. If a function does not need any
-arguments, the argument list can be left empty. However, even if the argument
-list is empty the parentheses around it are still mandatory to make function
-calls distinguishable from variable names.
-
-Some example function calls:
-
-```js
-HAS(user, "name")
-LENGTH(friends)
-COLLECTIONS()
-```
-
-In contrast to collection and variable names, function names are case-insensitive,
-i.e. *LENGTH(foo)* and *length(foo)* are equivalent.
-
-Extending AQL
--------------
-
-It is possible to extend AQL with user-defined functions. These functions need to
-be written in JavaScript, and have to be registered before they can be used in a query.
-Please refer to [Extending AQL](../Extending/index.html) for more details.
diff --git a/Documentation/Books/AQL/Functions/String.md b/Documentation/Books/AQL/Functions/String.md
deleted file mode 100644
index 24f9fec11f36..000000000000
--- a/Documentation/Books/AQL/Functions/String.md
+++ /dev/null
@@ -1,942 +0,0 @@
-String functions
-================
-
-For string processing, AQL offers the following functions:
-
-CHAR_LENGTH()
--------------
-
-`CHAR_LENGTH(value) → length`
-
-Return the number of characters in *value* (not byte length).
-
-|input|length|
-|---|---|
-|String|number of unicode characters|
-|Number|number of unicode characters that represent the number|
-|Array / Object|number of unicode characters from the resulting stringification|
-|true| 4 |
-|false| 5 |
-|null| 0 |
-
-CONCAT()
---------
-
-`CONCAT(value1, value2, ... valueN) → str`
-
-Concatenate the values passed as *value1* to *valueN*.
-
-- **values** (any, *repeatable*): elements of arbitrary type (at least 1)
-- returns **str** (string): a concatenation of the elements. *null* values
- are ignored.
-
-```js
-CONCAT("foo", "bar", "baz") // "foobarbaz"
-CONCAT(1, 2, 3) // "123"
-CONCAT("foo", [5, 6], {bar: "baz"}) // "foo[5,6]{\"bar\":\"baz\"}"
-```
-
-`CONCAT(anyArray) → str`
-
-If a single array is passed to *CONCAT()*, its members are concatenated.
-
-- **anyArray** (array): array with elements of arbitrary type
-- returns **str** (string): a concatenation of the array elements. *null* values
- are ignored.
-
-```js
-CONCAT( [ "foo", "bar", "baz" ] ) // "foobarbaz"
-CONCAT( [1, 2, 3] ) // "123"
-```
-
-CONCAT_SEPARATOR()
-------------------
-
-`CONCAT_SEPARATOR(separator, value1, value2, ... valueN) → joinedString`
-
-Concatenate the strings passed as arguments *value1* to *valueN* using the
-*separator* string.
-
-- **separator** (string): an arbitrary separator string
-- **values** (string|array, *repeatable*): strings or arrays of strings as multiple
- arguments (at least 1)
-- returns **joinedString** (string): a concatenated string of the elements, using
- *separator* as separator string. *null* values are ignored. Array value arguments
- are expanded automatically, and their individual members will be concatenated.
- Nested arrays will be expanded too, but with their elements separated by commas
- if they have more than a single element.
-
-```js
-CONCAT_SEPARATOR(", ", "foo", "bar", "baz")
-// "foo, bar, baz"
-
-CONCAT_SEPARATOR(", ", [ "foo", "bar", "baz" ])
-// "foo, bar, baz"
-
-CONCAT_SEPARATOR(", ", [ "foo", [ "b", "a", "r" ], "baz" ])
-// [ "foo, b,a,r, baz" ]
-
-CONCAT_SEPARATOR("-", [1, 2, 3, null], [4, null, 5])
-// "1-2-3-4-5"
-```
-
-CONTAINS()
-----------
-
-`CONTAINS(text, search, returnIndex) → match`
-
-Check whether the string *search* is contained in the string *text*.
-The string matching performed by *CONTAINS* is case-sensitive.
-
-- **text** (string): the haystack
-- **search** (string): the needle
-- **returnIndex** (bool, *optional*): if set to *true*, the character position
- of the match is returned instead of a boolean. The default is *false*.
- The default is *false*.
-- returns **match** (bool|number): by default, *true* is returned if *search*
- is contained in *text*, and *false* otherwise. With *returnIndex* set to *true*,
- the position of the first occurrence of *search* within *text* is returned
- (starting at offset 0), or *-1* if *search* is not contained in *text*.
-
-```js
-CONTAINS("foobarbaz", "bar") // true
-CONTAINS("foobarbaz", "horse") // false
-CONTAINS("foobarbaz", "ba", true) // 3
-CONTAINS("foobarbaz", "horse", true) // -1
-```
-
-To determine if or at which position a value is included in an array, see the
-[POSITION() array function](Array.md#position).
-
-COUNT()
--------
-
-This is an alias for [LENGTH()](#length).
-
-CRC32()
------
-
-`CRC32(text) → hash`
-
-Calculate the CRC32 checksum for *text* and return it in a hexadecimal
-string representation. The polynomial used is 0x1EDC6F41. The initial
-value used is 0xFFFFFFFF, and the final xor value is also 0xFFFFFFFF.
-
-- **text** (string): a string
-- returns **hash** (string): CRC32 checksum as hex string
-
-```js
-CRC32("foobar") // "D5F5C7F"
-```
-
-ENCODE_URI_COMPONENT()
------------
-
-`ENCODE_URI_COMPONENT(value) → encodedURIComponentString`
-
-Return the encoded uri component of *value*.
-
-- **value** (string): a string
-- returns **encodedURIComponentString** (string): an encoded uri component of *value*
-
-FIND_FIRST()
-------------
-
-`FIND_FIRST(text, search, start, end) → position`
-
-Return the position of the first occurrence of the string *search* inside the
-string *text*. Positions start at 0.
-
-- **text** (string): the haystack
-- **search** (string): the needle
-- **start** (number, *optional*): limit the search to a subset of the text,
- beginning at *start*
-- **end** (number, *optional*): limit the search to a subset of the text,
- ending at *end*
-- returns **position** (number): the character position of the match. If *search*
- is not contained in *text*, -1 is returned. If **search** is empty, **start** is returned.
-
-```js
-FIND_FIRST("foobarbaz", "ba") // 3
-FIND_FIRST("foobarbaz", "ba", 4) // 6
-FIND_FIRST("foobarbaz", "ba", 0, 3) // -1
-```
-
-FIND_LAST()
------------
-
-`FIND_LAST(text, search, start, end) → position`
-
-Return the position of the last occurrence of the string *search* inside the
-string *text*. Positions start at 0.
-
-- **text** (string): the haystack
-- **search** (string): the needle
-- **start** (number, *optional*): limit the search to a subset of the text,
- beginning at *start*
-- **end** (number, *optional*): limit the search to a subset of the text,
- ending at *end*
-- returns **position** (number): the character position of the match. If *search*
- is not contained in *text*, -1 is returned.
- If *search* is empty, the string length is returned, or *end* + 1.
-
-```js
-FIND_LAST("foobarbaz", "ba") // 6
-FIND_LAST("foobarbaz", "ba", 7) // -1
-FIND_LAST("foobarbaz", "ba", 0, 4) // 3
-```
-
-FNV64()
------
-
-`FNV64(text) → hash`
-
-Calculate the FNV-1A 64 bit hash for *text* and return it in a hexadecimal
-string representation.
-
-- **text** (string): a string
-- returns **hash** (string): FNV-1A hash as hex string
-
-```js
-FNV64("foobar") // "85944171F73967E8"
-```
-
-JSON_PARSE()
-------------
-
-`JSON_PARSE(text) → value`
-
-Return an AQL value described by the JSON-encoded input string.
-
-- **text** (string): the string to parse as JSON
-- returns **value** (mixed): the value corresponding to the given JSON text.
- For input values that are no valid JSON strings, the function will return *null*.
-
-```js
-JSON_PARSE("123") // 123
-JSON_PARSE("[ true, false, 2 ]") // [ true, false, 2 ]
-JSON_PARSE("\\\"abc\\\"") // "abc"
-JSON_PARSE("{\\\"a\\\": 1}") // { a : 1 }
-JSON_PARSE("abc") // null
-```
-
-JSON_STRINGIFY()
-----------------
-
-`JSON_STRINGIFY(value) → text`
-
-Return a JSON string representation of the input value.
-
-- **value** (mixed): the value to convert to a JSON string
-- returns **text** (string): the JSON string representing *value*.
- For input values that cannot be converted to JSON, the function
- will return *null*.
-
-```js
-JSON_STRINGIFY("1") // "1"
-JSON_STRINGIFY("abc") // "\"abc\""
-JSON_STRINGIFY("[1, 2, 3]") // "[1,2,3]"
-```
-
-LEFT()
-------
-
-`LEFT(value, n) → substring`
-
-Return the *n* leftmost characters of the string *value*.
-
-To return the rightmost characters, see [RIGHT()](#right).
-To take a part from an arbitrary position off the string,
-see [SUBSTRING()](#substring).
-
-- **value** (string): a string
-- **n** (number): how many characters to return
-- returns **substring** (string): at most *n* characters of *value*,
- starting on the left-hand side of the string
-
-```js
-LEFT("foobar", 3) // "foo"
-LEFT("foobar", 10) // "foobar"
-```
-
-LENGTH()
---------
-
-`LENGTH(str) → length`
-
-Determine the character length of a string.
-
-- **str** (string): a string. If a number is passed, it will be casted to string first.
-- returns **length** (number): the character length of *str* (not byte length)
-
-```js
-LENGTH("foobar") // 6
-LENGTH("电脑坏了") // 4
-```
-
-*LENGTH()* can also determine the [number of elements](Array.md#length) in an array,
-the [number of attribute keys](Document.md#length) of an object / document and
-the [amount of documents](Miscellaneous.md#length) in a collection.
-
-LEVENSHTEIN_DISTANCE()
-----------------------
-
-`LEVENSHTEIN_DISTANCE(value1, value2) → levenshteinDistance`
-
-Calculate the [Levenshtein distance](https://en.wikipedia.org/wiki/Levenshtein_distance)
-between two strings.
-
-- **value1** (string): a string
-- **value2** (string): a string
-- returns **levenshteinDistance** (number): calculated Levenshtein distance
- between the input strings *value1* and *value2*
-
-```js
-LEVENSHTEIN_DISTANCE("foobar", "bar") // 3
-LEVENSHTEIN_DISTANCE(" ", "") // 1
-LEVENSHTEIN_DISTANCE("The quick brown fox jumps over the lazy dog", "The quick black dog jumps over the brown fox") // 13
-LEVENSHTEIN_DISTANCE("der mötör trötet", "der trötet") // 6
-```
-
-LIKE()
-------
-
-`LIKE(text, search, caseInsensitive) → bool`
-
-Check whether the pattern *search* is contained in the string *text*,
-using wildcard matching.
-
-- **text** (string): the string to search in
-- **search** (string): a search pattern that can contain the wildcard characters
- `%` (meaning any sequence of characters, including none) and `_` (any single
- character). Literal *%* and *:* must be escaped with two backslashes (four
- in arangosh).
- *search* cannot be a variable or a document attribute. The actual value must
- be present at query parse time already.
-- **caseInsensitive** (bool, *optional*): if set to *true*, the matching will be
- case-insensitive. The default is *false*.
-- returns **bool** (bool): *true* if the pattern is contained in *text*,
- and *false* otherwise
-
-```js
-LIKE("cart", "ca_t") // true
-LIKE("carrot", "ca_t") // false
-LIKE("carrot", "ca%t") // true
-
-LIKE("foo bar baz", "bar") // false
-LIKE("foo bar baz", "%bar%") // true
-LIKE("bar", "%bar%") // true
-
-LIKE("FoO bAr BaZ", "fOo%bAz") // false
-LIKE("FoO bAr BaZ", "fOo%bAz", true) // true
-```
-
-LOWER()
--------
-
-`LOWER(value) → lowerCaseString`
-
-Convert upper-case letters in *value* to their lower-case counterparts.
-All other characters are returned unchanged.
-
-- **value** (string): a string
-- returns **lowerCaseString** (string): *value* with upper-case characters converted
- to lower-case characters
-
-LTRIM()
--------
-
-`LTRIM(value, chars) → strippedString`
-
-Return the string *value* with whitespace stripped from the start only.
-
-To strip from the end only, see [RTRIM()](#rtrim).
-To strip both sides, see [TRIM()](#trim).
-
-- **value** (string): a string
-- **chars** (string, *optional*): override the characters that should
- be removed from the string. It defaults to `\r\n \t` (i.e. `0x0d`, `0x0a`,
- `0x20` and `0x09`).
-- returns **strippedString** (string): *value* without *chars* at the
- left-hand side
-
-```js
-LTRIM("foo bar") // "foo bar"
-LTRIM(" foo bar ") // "foo bar "
-LTRIM("--==[foo-bar]==--", "-=[]") // "foo-bar]==--"
-```
-
-MD5()
------
-
-`MD5(text) → hash`
-
-Calculate the MD5 checksum for *text* and return it in a hexadecimal
-string representation.
-
-- **text** (string): a string
-- returns **hash** (string): MD5 checksum as hex string
-
-```js
-MD5("foobar") // "3858f62230ac3c915f300c664312c63f"
-```
-
-RANDOM_TOKEN()
---------------
-
-`RANDOM_TOKEN(length) → randomString`
-
-Generate a pseudo-random token string with the specified length.
-The algorithm for token generation should be treated as opaque.
-
-- **length** (number): desired string length for the token. It must be greater
- than 0 and at most 65536.
-- returns **randomString** (string): a generated token consisting of lowercase
- letters, uppercase letters and numbers
-
-```js
-RANDOM_TOKEN(8) // "zGl09z42"
-RANDOM_TOKEN(8) // "m9w50Ft9"
-```
-
-REGEX_MATCHES()
----------------
-
-`REGEX_MATCHES(text, regex, caseInsensitive) → stringArray`
-
-Return the matches in the given string *text*, using the *regex*.
-
-- **text** (string): the string to search in
-- **regex** (string): a regular expression to use for matching the *text*
-- returns **stringArray** (array): an array of strings containing the matches
-
-The regular expression may consist of literal characters and the following
-characters and sequences:
-
-- `.` – the dot matches any single character except line terminators.
- To include line terminators, use `[\s\S]` instead to simulate `.` with *DOTALL* flag.
-- `\d` – matches a single digit, equivalent to `[0-9]`
-- `\s` – matches a single whitespace character
-- `\S` – matches a single non-whitespace character
-- `\t` – matches a tab character
-- `\r` – matches a carriage return
-- `\n` – matches a line-feed character
-- `[xyz]` – set of characters. Matches any of the enclosed characters
- (here: *x*, *y* or *z*)
-- `[^xyz]` – negated set of characters. Matches any other character than the
- enclosed ones (i.e. anything but *x*, *y* or *z* in this case)
-- `[x-z]` – range of characters. Matches any of the characters in the
- specified range, e.g. `[0-9A-F]` to match any character in
- *0123456789ABCDEF*
-- `[^x-z]` – negated range of characters. Matches any other character than the
- ones specified in the range
-- `(xyz)` – defines and matches a pattern group
-- `(x|y)` – matches either *x* or *y*
-- `^` – matches the beginning of the string (e.g. `^xyz`)
-- $
– matches the end of the string (e.g. xyz$
)
-
-Note that the characters `.`, `*`, `?`, `[`, `]`, `(`, `)`, `{`, `}`, `^`,
-and `$` have a special meaning in regular expressions and may need to be
-escaped using a backslash, which requires escaping itself (`\\`). A literal
-backslash needs to be escaped using another escaped backslash, i.e. `\\\\`.
-In arangosh, the amount of backslashes needs to be doubled.
-
-Characters and sequences may optionally be repeated using the following
-quantifiers:
-
-- `x*` – matches zero or more occurrences of *x*
-- `x+` – matches one or more occurrences of *x*
-- `x?` – matches one or zero occurrences of *x*
-- `x{y}` – matches exactly *y* occurrences of *x*
-- `x{y,z}` – matches between *y* and *z* occurrences of *x*
-- `x{y,}` – matches at least *y* occurrences of *x*
-
-Note that `xyz+` matches *xyzzz*, but if you want to match *xyzxyz* instead,
-you need to define a pattern group by wrapping the sub-expression in parentheses
-and place the quantifier right behind it: `(xyz)+`.
-
-If the regular expression in *regex* is invalid, a warning will be raised
-and the function will return *null*.
-
-```js
-REGEX_MATCHES("My-us3r_n4m3", "^[a-z0-9_-]{3,16}$", true) // ["My-us3r_n4m3"]
-REGEX_MATCHES("#4d82h4", "^#?([a-f0-9]{6}|[a-f0-9]{3})$", true) // null
-REGEX_MATCHES("john@doe.com", "^([a-z0-9_\.-]+)@([\da-z-]+)\.([a-z\.]{2,6})$", false) // ["john@doe.com", "john", "doe", "com"]
-```
-
-REGEX_SPLIT()
--------------
-
-`REGEX_SPLIT(text, splitExpression, caseInsensitive, limit) → stringArray`
-
-Split the given string *text* into a list of strings, using the *separator*.
-
-- **text** (string): the string to split
-- **splitExpression** (string): a regular expression to use for splitting the *text*
-- **limit** (number, *optional*): limit the number of split values in the result.
- If no *limit* is given, the number of splits returned is not bounded.
-- returns **stringArray** (array): an array of strings
-
-The regular expression may consist of literal characters and the following
-characters and sequences:
-
-- `.` – the dot matches any single character except line terminators.
- To include line terminators, use `[\s\S]` instead to simulate `.` with *DOTALL* flag.
-- `\d` – matches a single digit, equivalent to `[0-9]`
-- `\s` – matches a single whitespace character
-- `\S` – matches a single non-whitespace character
-- `\t` – matches a tab character
-- `\r` – matches a carriage return
-- `\n` – matches a line-feed character
-- `[xyz]` – set of characters. Matches any of the enclosed characters
- (here: *x*, *y* or *z*)
-- `[^xyz]` – negated set of characters. Matches any other character than the
-enclosed ones (i.e. anything but *x*, *y* or *z* in this case)
-- `[x-z]` – range of characters. Matches any of the characters in the
- specified range, e.g. `[0-9A-F]` to match any character in
- *0123456789ABCDEF*
-- `[^x-z]` – negated range of characters. Matches any other character than the
-ones specified in the range
-- `(xyz)` – defines and matches a pattern group
-- `(x|y)` – matches either *x* or *y*
-- `^` – matches the beginning of the string (e.g. `^xyz`)
-- $
– matches the end of the string (e.g. xyz$
)
-
-Note that the characters `.`, `*`, `?`, `[`, `]`, `(`, `)`, `{`, `}`, `^`,
-and `$` have a special meaning in regular expressions and may need to be
-escaped using a backslash, which requires escaping itself (`\\`). A literal
-backslash needs to be escaped using another escaped backslash, i.e. `\\\\`.
-In arangosh, the amount of backslashes needs to be doubled.
-
-Characters and sequences may optionally be repeated using the following
-quantifiers:
-
-- `x*` – matches zero or more occurrences of *x*
-- `x+` – matches one or more occurrences of *x*
-- `x?` – matches one or zero occurrences of *x*
-- `x{y}` – matches exactly *y* occurrences of *x*
-- `x{y,z}` – matches between *y* and *z* occurrences of *x*
-- `x{y,}` – matches at least *y* occurrences of *x*
-
-Note that `xyz+` matches *xyzzz*, but if you want to match *xyzxyz* instead,
-you need to define a pattern group by wrapping the sub-expression in parentheses
-and place the quantifier right behind it: `(xyz)+`.
-
-If the regular expression in *splitExpression* is invalid, a warning will be raised
-and the function will return *null*.
-
-```js
-REGEX_SPLIT("This is a line.\n This is yet another line\r\n This again is a line.\r Mac line ", "\.?(\n|\r|\r\n)", true, 4) // ["This is a line", "\n", " This is yet another lin", "\r"]
-REGEX_SPLIT("hypertext language, programming", "[\s, ]+") // ["hypertext", "language", "programming"]
-REGEX_SPLIT("ca,bc,a,bca,bca,bc", "a,b", true, 5) // ["c", "c,", "c", "c", "c"]
-```
-
-REGEX_TEST()
-------------
-
-`REGEX_TEST(text, search, caseInsensitive) → bool`
-
-Check whether the pattern *search* is contained in the string *text*,
-using regular expression matching.
-
-- **text** (string): the string to search in
-- **search** (string): a regular expression search pattern
-- returns **bool** (bool): *true* if the pattern is contained in *text*,
- and *false* otherwise
-- **caseInsensitive** (bool, *optional*): if set to *true*, the matching will be
- case-insensitive. The default is *false*.
-
-The regular expression may consist of literal characters and the following
-characters and sequences:
-
-- `.` – the dot matches any single character except line terminators.
- To include line terminators, use `[\s\S]` instead to simulate `.` with *DOTALL* flag.
-- `\d` – matches a single digit, equivalent to `[0-9]`
-- `\s` – matches a single whitespace character
-- `\S` – matches a single non-whitespace character
-- `\t` – matches a tab character
-- `\r` – matches a carriage return
-- `\n` – matches a line-feed character
-- `[xyz]` – set of characters. Matches any of the enclosed characters
- (here: *x*, *y* or *z*)
-- `[^xyz]` – negated set of characters. Matches any other character than the
- enclosed ones (i.e. anything but *x*, *y* or *z* in this case)
-- `[x-z]` – range of characters. Matches any of the characters in the
- specified range, e.g. `[0-9A-F]` to match any character in
- *0123456789ABCDEF*
-- `[^x-z]` – negated range of characters. Matches any other character than the
- ones specified in the range
-- `(xyz)` – defines and matches a pattern group
-- `(x|y)` – matches either *x* or *y*
-- `^` – matches the beginning of the string (e.g. `^xyz`)
-- $
– matches the end of the string (e.g. xyz$
)
-
-Note that the characters `.`, `*`, `?`, `[`, `]`, `(`, `)`, `{`, `}`, `^`,
-and `$` have a special meaning in regular expressions and may need to be
-escaped using a backslash, which requires escaping itself (`\\`). A literal
-backslash needs to be escaped using another escaped backslash, i.e. `\\\\`.
-In arangosh, the amount of backslashes needs to be doubled.
-
-Characters and sequences may optionally be repeated using the following
-quantifiers:
-
-- `x*` – matches zero or more occurrences of *x*
-- `x+` – matches one or more occurrences of *x*
-- `x?` – matches one or zero occurrences of *x*
-- `x{y}` – matches exactly *y* occurrences of *x*
-- `x{y,z}` – matches between *y* and *z* occurrences of *x*
-- `x{y,}` – matches at least *y* occurrences of *x*
-
-Note that `xyz+` matches *xyzzz*, but if you want to match *xyzxyz* instead,
-you need to define a pattern group by wrapping the sub-expression in parentheses
-and place the quantifier right behind it: `(xyz)+`.
-
-If the regular expression in *search* is invalid, a warning will be raised
-and the function will return *null*.
-
-```js
-REGEX_TEST("the quick brown fox", "the.*fox") // true
-REGEX_TEST("the quick brown fox", "^(a|the)\s+(quick|slow).*f.x$") // true
-REGEX_TEST("the\nquick\nbrown\nfox", "^the(\n[a-w]+)+\nfox$") // true
-```
-
-REGEX_REPLACE()
----------------
-
-`REGEX_REPLACE(text, search, replacement, caseInsensitive) → string`
-
-Replace the pattern *search* with the string *replacement* in the string
-*text*, using regular expression matching.
-
-- **text** (string): the string to search in
-- **search** (string): a regular expression search pattern
-- **replacement** (string): the string to replace the *search* pattern with
-- returns **string** (string): the string *text* with the *search* regex
- pattern replaced with the *replacement* string wherever the pattern exists
- in *text*
-- **caseInsensitive** (bool, *optional*): if set to *true*, the matching will be
- case-insensitive. The default is *false*.
-
-For more details about the rules for characters and sequences refer
-[REGEX_TEST()](#regextest).
-
-If the regular expression in *search* is invalid, a warning will be raised
-and the function will return *null*.
-
-```js
-REGEX_REPLACE("the quick brown fox", "the.*fox", "jumped over") // jumped over
-REGEX_REPLACE("the quick brown fox", "o", "i") // the quick briwn fix
-```
-
-REVERSE()
----------
-
-`REVERSE(value) → reversedString`
-
-Return the reverse of the string *value*.
-
-- **value** (string): a string
-- returns **reversedString** (string): a new string with the characters in
- reverse order
-
-```js
-REVERSE("foobar") // "raboof"
-REVERSE("电脑坏了") // "了坏脑电"
-```
-
-RIGHT()
--------
-
-`RIGHT(value, length) → substring`
-
-Return the *length* rightmost characters of the string *value*.
-
-To return the leftmost characters, see [LEFT()](#left).
-To take a part from an arbitrary position off the string,
-see [SUBSTRING()](#substring).
-
-- **value** (string): a string
-- **length** (number): how many characters to return
-- returns **substring** (string): at most *length* characters of *value*,
- starting on the right-hand side of the string
-
-```js
-RIGHT("foobar", 3) // "bar"
-RIGHT("foobar", 10) // "foobar"
-```
-
-RTRIM()
--------
-
-`RTRIM(value, chars) → strippedString`
-
-Return the string *value* with whitespace stripped from the end only.
-
-To strip from the start only, see [LTRIM()](#ltrim).
-To strip both sides, see [TRIM()](#trim).
-
-- **value** (string): a string
-- **chars** (string, *optional*): override the characters that should
- be removed from the string. It defaults to `\r\n \t` (i.e. `0x0d`, `0x0a`,
- `0x20` and `0x09`).
-- returns **strippedString** (string): *value* without *chars* at the
- right-hand side
-
-```js
-RTRIM("foo bar") // "foo bar"
-RTRIM(" foo bar ") // " foo bar"
-RTRIM("--==[foo-bar]==--", "-=[]") // "--==[foo-bar"
-```
-
-SHA1()
-------
-
-`SHA1(text) → hash`
-
-Calculate the SHA1 checksum for *text* and returns it in a hexadecimal
-string representation.
-
-- **text** (string): a string
-- returns **hash** (string): SHA1 checksum as hex string
-
-```js
-SHA1("foobar") // "8843d7f92416211de9ebb963ff4ce28125932878"
-```
-
-SHA512()
---------
-
-`SHA512(text) → hash`
-
-Calculate the SHA512 checksum for *text* and returns it in a hexadecimal
-string representation.
-
-- **text** (string): a string
-- returns **hash** (string): SHA512 checksum as hex string
-
-```js
-SHA512("foobar") // "0a50261ebd1a390fed2bf326f2673c145582a6342d523204973d0219337f81616a8069b012587cf5635f6925f1b56c360230c19b273500ee013e030601bf2425"
-```
-
-SPLIT()
--------
-
-`SPLIT(value, separator, limit) → strArray`
-
-Split the given string *value* into a list of strings, using the *separator*.
-
-- **value** (string): a string
-- **separator** (string): either a string or a list of strings. If *separator* is
- an empty string, *value* will be split into a list of characters. If no *separator*
- is specified, *value* will be returned as array.
-- **limit** (number, *optional*): limit the number of split values in the result.
- If no *limit* is given, the number of splits returned is not bounded.
-- returns **strArray** (array): an array of strings
-
-```js
-SPLIT( "foo-bar-baz", "-" ) // [ "foo", "bar", "baz" ]
-SPLIT( "foo-bar-baz", "-", 1 ) // [ "foo" ]
-SPLIT( "foo, bar & baz", [ ", ", " & " ] ) // [ "foo", "bar", "baz" ]
-```
-
-SOUNDEX()
------------
-
-`SOUNDEX(value) → soundexString`
-
-Return the soundex fingerprint of *value*.
-
-- **value** (string): a string
-- returns **soundexString** (string): a soundex fingerprint of *value*
-
-```js
-SOUNDEX( "example" ) // "E251"
-SOUNDEX( "ekzampul") // "E251"
-SOUNDEX( "soundex" ) // "S532"
-SOUNDEX( "sounteks" ) // "S532"
-```
-
-SUBSTITUTE()
-------------
-
-`SUBSTITUTE(value, search, replace, limit) → substitutedString`
-
-Replace search values in the string *value*.
-
-- **value** (string): a string
-- **search** (string|array): if *search* is a string, all occurrences of
- *search* will be replaced in *value*. If *search* is an array of strings,
- each occurrence of a value contained in *search* will be replaced by the
- corresponding array element in *replace*. If *replace* has less list items
- than *search*, occurrences of unmapped *search* items will be replaced by an
- empty string.
-- **replace** (string|array, *optional*): a replacement string, or an array of
- strings to replace the corresponding elements of *search* with. Can have less
- elements than *search* or be left out to remove matches. If *search* is an array
- but *replace* is a string, then all matches will be replaced with *replace*.
-- **limit** (number, *optional*): cap the number of replacements to this value
-- returns **substitutedString** (string): a new string with matches replaced
- (or removed)
-
-```js
-SUBSTITUTE( "the quick brown foxx", "quick", "lazy" )
-// "the lazy brown foxx"
-
-SUBSTITUTE( "the quick brown foxx", [ "quick", "foxx" ], [ "slow", "dog" ] )
-// "the slow brown dog"
-
-SUBSTITUTE( "the quick brown foxx", [ "the", "foxx" ], [ "that", "dog" ], 1 )
-// "that quick brown foxx"
-
-SUBSTITUTE( "the quick brown foxx", [ "the", "quick", "foxx" ], [ "A", "VOID!" ] )
-// "A VOID! brown "
-
-SUBSTITUTE( "the quick brown foxx", [ "quick", "foxx" ], "xx" )
-// "the xx brown xx"
-```
-
-`SUBSTITUTE(value, mapping, limit) → substitutedString`
-
-Alternatively, *search* and *replace* can be specified in a combined value.
-
-- **value** (string): a string
-- **mapping** (object): a lookup map with search strings as keys and replacement
- strings as values. Empty strings and *null* as values remove matches.
- Please note that no sequence of search strings can be warrantied by this;
- Means, if you have overlapping search results, one time the first may win,
- another time the second. If you need to ensure the precedence of the sequence
- choose the array based invocation method.
-- **limit** (number, *optional*): cap the number of replacements to this value
-- returns **substitutedString** (string): a new string with matches replaced
- (or removed)
-
-```js
-SUBSTITUTE("the quick brown foxx", {
- "quick": "small",
- "brown": "slow",
- "foxx": "ant"
-})
-// "the small slow ant"
-
-SUBSTITUTE("the quick brown foxx", {
- "quick": "",
- "brown": null,
- "foxx": "ant"
-})
-// "the ant"
-
-SUBSTITUTE("the quick brown foxx", {
- "quick": "small",
- "brown": "slow",
- "foxx": "ant"
-}, 2)
-// "the small slow foxx"
-```
-
-SUBSTRING()
------------
-
-`SUBSTRING(value, offset, length) → substring`
-
-Return a substring of *value*.
-
-To return the rightmost characters, see [RIGHT()](#right).
-To return the leftmost characters, see [LEFT()](#left).
-
-- **value** (string): a string
-- **offset** (number): start at *offset*, offsets start at position 0
-- **length** (number, *optional*): at most *length* characters, omit to get the
- substring from *offset* to the end of the string
-- returns **substring** (string): a substring of *value*
-
-TOKENS()
---------
-
-`TOKENS(input, analyzer) → strArray`
-
-Split the *input* string with the help of the specified *analyzer* into a token array.
-The resulting array can be used e.g. in subsequent `FILTER` statements with the *IN* operator.
-It can help to better understand how the specific analyzer is going to behave.
-
-- **input** (string): text to tokenize
-- **analyzer** (string): one of the available
- [ArangoSearch string analyzers](../../Manual/Views/ArangoSearch/Analyzers.html)
-- returns **strArray** (array): array of strings, each element being a token
-
-
-TO_BASE64()
------------
-
-`TO_BASE64(value) → toBase64String`
-
-Return the base64 representation of *value*.
-
-- **value** (string): a string
-- returns **toBase64String** (string): a base64 representation of *value*
-
-TO_HEX()
------------
-
-`TO_HEX(value) → toHexString`
-
-Return the hex representation of *value*.
-
-- **value** (string): a string
-- returns **toHexString** (string): a hex representation of *value*
-
-TRIM()
-------
-
-`TRIM(value, type) → strippedString`
-
-Return the string *value* with whitespace stripped from the start and/or end.
-
-The optional *type* parameter specifies from which parts of the string the
-whitespace is stripped. [LTRIM()](#ltrim)
-and [RTRIM()](#rtrim) are preferred
-however.
-
-- **value** (string): a string
-- **type** (number, *optional*): strip whitespace from the
- - `0` – start and end of the string (default)
- - `1` – start of the string only
- - `2` – end of the string only
-
-`TRIM(value, chars) → strippedString`
-
-Return the string *value* with whitespace stripped from the start and end.
-
-- **value** (string): a string
-- **chars** (string, *optional*): override the characters that should
- be removed from the string. It defaults to `\r\n \t` (i.e. `0x0d`, `0x0a`,
- `0x20` and `0x09`).
-- returns **strippedString** (string): *value* without *chars* on both sides
-
-```js
-TRIM("foo bar") // "foo bar"
-TRIM(" foo bar ") // "foo bar"
-TRIM("--==[foo-bar]==--", "-=[]") // "foo-bar"
-TRIM(" foobar\t \r\n ") // "foobar"
-TRIM(";foo;bar;baz, ", ",; ") // "foo;bar;baz"
-```
-
-UPPER()
--------
-
-`UPPER(value) → upperCaseString`
-
-Convert lower-case letters in *value* to their upper-case counterparts.
-All other characters are returned unchanged.
-
-- **value** (string): a string
-- returns **upperCaseString** (string): *value* with lower-case characters converted
- to upper-case characters
-
-UUID()
-------
-
-`UUID() → UUIDString`
-
-Return a universally unique identifier value.
-
-- returns **UUIDString** (string): a universally unique identifier
diff --git a/Documentation/Books/AQL/Functions/TypeCast.md b/Documentation/Books/AQL/Functions/TypeCast.md
deleted file mode 100644
index 601bf9d82b0a..000000000000
--- a/Documentation/Books/AQL/Functions/TypeCast.md
+++ /dev/null
@@ -1,273 +0,0 @@
-Type check and cast functions
-=============================
-
-Some operators expect their operands to have a certain data type. For example,
-logical operators expect their operands to be boolean values, and the arithmetic
-operators expect their operands to be numeric values. If an operation is performed
-with operands of other types, an automatic conversion to the expected types is
-tried. This is called implicit type casting. It helps to avoid query
-aborts.
-
-Type casts can also be performed upon request by invoking a type cast function.
-This is called explicit type casting. AQL offers several functions for this.
-Each of the these functions takes an operand of any data type and returns a result
-value with the type corresponding to the function name. For example, *TO_NUMBER()*
-will return a numeric value.
-
-Type casting functions
-----------------------
-
-### TO_BOOL()
-
-`TO_BOOL(value) → bool`
-
-Take an input *value* of any type and convert it into the appropriate
-boolean value.
-
-- **value** (any): input of arbitrary type
-- returns **bool** (boolean):
- - *null* is converted to *false*
- - Numbers are converted to *true*, except for 0, which is converted to *false*
- - Strings are converted to *true* if they are non-empty, and to *false* otherwise
- - Arrays are always converted to *true* (even if empty)
- - Objects / documents are always converted to *true*
-
-It's also possible to use double negation to cast to boolean:
-
-```js
-!!1 // true
-!!0 // false
-!!-0.0 // false
-not not 1 // true
-!!"non-empty string" // true
-!!"" // false
-```
-
-`TO_BOOL()` is preferred however, because it states the intention clearer.
-
-### TO_NUMBER()
-
-`TO_NUMBER(value) → number`
-
-Take an input *value* of any type and convert it into a numeric value.
-
-- **value** (any): input of arbitrary type
-- returns **number** (number):
- - *null* and *false* are converted to the value *0*
- - *true* is converted to *1*
- - Numbers keep their original value
- - Strings are converted to their numeric equivalent if the string contains a
- valid representation of a number. Whitespace at the start and end of the string
- is allowed. String values that do not contain any valid representation of a number
- will be converted to the number *0*.
- - An empty array is converted to *0*, an array with one member is converted into the
- result of `TO_NUMBER()` for its sole member. An array with two or more members is
- converted to the number *0*.
- - An object / document is converted to the number *0*.
-
- A unary plus will also cast to a number, but `TO_NUMBER()` is the preferred way:
- ```js
-+'5' // 5
-+[8] // 8
-+[8,9] // 0
-+{} // 0
- ```
- A unary minus works likewise, except that a numeric value is also negated:
- ```js
--'5' // -5
--[8] // -8
--[8,9] // 0
--{} // 0
- ```
-
-### TO_STRING()
-
-`TO_STRING(value) → str`
-
-Take an input *value* of any type and convert it into a string value.
-
-- **value** (any): input of arbitrary type
-- returns **str** (string):
- - *null* is converted to an empty string *""*
- - *false* is converted to the string *"false"*, *true* to the string *"true"*
- - Numbers are converted to their string representations. This can also be a
- scientific notation (e.g. "2e-7")
- - Arrays and objects / documents are converted to string representations,
- which means JSON-encoded strings with no additional whitespace
-
-```js
-TO_STRING(null) // ""
-TO_STRING(true) // "true"
-TO_STRING(false) // "false"
-TO_STRING(123) // "123"
-TO_STRING(+1.23) // "1.23"
-TO_STRING(-1.23) // "-1.23"
-TO_STRING(0.0000002) // "2e-7"
-TO_STRING( [1, 2, 3] ) // "[1,2,3]"
-TO_STRING( { foo: "bar", baz: null } ) // "{\"foo\":\"bar\",\"baz\":null}"
-```
-
-### TO_ARRAY()
-
-`TO_ARRAY(value) → array`
-
-Take an input *value* of any type and convert it into an array value.
-
-- **value** (any): input of arbitrary type
-- returns **array** (array):
- - *null* is converted to an empty array
- - Boolean values, numbers and strings are converted to an array containing
- the original value as its single element
- - Arrays keep their original value
- - Objects / documents are converted to an array containing their attribute
- **values** as array elements, just like [VALUES()](Document.md#values)
-
-```js
-TO_ARRAY(null) // []
-TO_ARRAY(false) // [false]
-TO_ARRAY(true) // [true]
-TO_ARRAY(5) // [5]
-TO_ARRAY("foo") // ["foo"]
-TO_ARRAY([1, 2, "foo"]) // [1, 2, "foo"]
-TO_ARRAY({foo: 1, bar: 2, baz: [3, 4, 5]}) // [1, 2, [3, 4, 5]]
-```
-
-### TO_LIST()
-
-`TO_LIST(value) → array`
-
-This is an alias for [TO_ARRAY()](#toarray).
-
-Type check functions
---------------------
-
-AQL also offers functions to check the data type of a value at runtime. The
-following type check functions are available. Each of these functions takes an
-argument of any data type and returns true if the value has the type that is
-checked for, and false otherwise.
-
-### IS_NULL()
-
-`IS_NULL(value) → bool`
-
-Check whether *value* is *null*. Identical to `value == null`.
-
-To test if an attribute exists, see [HAS()](Document.md#has) instead.
-
-- **value** (any): value to test
-- returns **bool** (boolean): *true* if *value* is `null`,
- *false* otherwise
-
-### IS_BOOL()
-
-`IS_BOOL(value) → bool`
-
-Check whether *value* is a *boolean* value
-
-- **value** (any): value to test
-- returns **bool** (boolean): *true* if *value* is `true` or `false`,
- *false* otherwise
-
-### IS_NUMBER()
-
-`IS_NUMBER(value) → bool`
-
-Check whether *value* is a number
-
-- **value** (any): value to test
-- returns **bool** (boolean): *true* if *value* is a number,
- *false* otherwise
-
-### IS_STRING()
-
-`IS_STRING(value) → bool`
-
-Check whether *value* is a string
-
-- **value** (any): value to test
-- returns **bool** (boolean): *true* if *value* is a string,
- *false* otherwise
-
-### IS_ARRAY()
-
-`IS_ARRAY(value) → bool`
-
-Check whether *value* is an array / list
-
-- **value** (any): value to test
-- returns **bool** (boolean): *true* if *value* is an array / list,
- *false* otherwise
-
-### IS_LIST()
-
-`IS_LIST(value) → bool`
-
-This is an alias for [IS_ARRAY()](#isarray)
-
-### IS_OBJECT()
-
-`IS_OBJECT(value) → bool`
-
-Check whether *value* is an object / document
-
-- **value** (any): value to test
-- returns **bool** (boolean): *true* if *value* is an object / document,
- *false* otherwise
-
-### IS_DOCUMENT()
-
-`IS_DOCUMENT(value) → bool`
-
-This is an alias for [IS_OBJECT()](#isobject)
-
-### IS_DATESTRING()
-
-`IS_DATESTRING(str) → bool`
-
-Check whether *value* is a string that can be used in a date function.
-This includes partial dates such as *"2015"* or *"2015-10"* and strings
-containing properly formatted but invalid dates such as *"2015-02-31"*.
-
-- **str** (string): date string to test
-- returns **bool** (boolean): *true* if *str* is a correctly formatted date string,
- *false* otherwise including all non-string values, even if some of them may be usable
- in date functions (numeric timestamps)
-
-### IS_KEY()
-
-`IS_KEY(str) → bool`
-
-Check whether *value* is a string that can be used as a
-document key, i.e. as the value of the *_key* attribute.
-See [Naming Conventions for Document Keys](../../Manual/DataModeling/NamingConventions/DocumentKeys.html).
-
-- **str** (string): document key to test
-- returns **bool** (boolean): whether *str* can be used as document key
-
-### TYPENAME()
-
-`TYPENAME(value) → typeName`
-
-Return the data type name of *value*.
-
-- **value** (any): input of arbitrary type
-- returns **typeName** (string): data type name of *value*
- (`"null"`, `"bool"`, `"number"`, `"string"`, `"array"` or `"object"`)
-
-Example Value | Data Type Name
----------------:|---------------
-`null` | `"null"`
-`true` | `"bool"`
-`false` | `"bool"`
-`123` | `"number"`
-`-4.56` | `"number"`
-`0` | `"number"`
-`"foobar"` | `"string"`
-`"123"` | `"string"`
-`""` | `"string"`
-`[ 1, 2, 3 ]` | `"array"`
-`["foo",true]` | `"array"`
-`[ ]` | `"array"`
-`{"foo":"bar"}` | `"object"`
-`{"foo": null}` | `"object"`
-`{ }` | `"object"`
diff --git a/Documentation/Books/AQL/Fundamentals/BindParameters.md b/Documentation/Books/AQL/Fundamentals/BindParameters.md
deleted file mode 100644
index 490d5e980e8a..000000000000
--- a/Documentation/Books/AQL/Fundamentals/BindParameters.md
+++ /dev/null
@@ -1,122 +0,0 @@
-Bind parameters
-===============
-
-AQL supports the usage of bind parameters, thus allowing to separate the query
-text from literal values used in the query. It is good practice to separate the
-query text from the literal values because this will prevent (malicious)
-injection of keywords and other collection names into an existing query. This
-injection would be dangerous because it may change the meaning of an existing
-query.
-
-Using bind parameters, the meaning of an existing query cannot be changed. Bind
-parameters can be used everywhere in a query where literals can be used.
-
-The syntax for bind parameters is *@name* where *@* signifies that this is a
-bind parameter and *name* is the actual parameter name. Parameter names must
-start with any of the letters *a* to *z* (upper or lower case) or a digit
-(*0* to *9*), and can be followed by any letter, digit or the underscore symbol.
-
-```js
-FOR u IN users
- FILTER u.id == @id && u.name == @name
- RETURN u
-```
-
-The bind parameter values need to be passed along with the query when it is
-executed, but not as part of the query text itself. In the web interface,
-there is a pane next to the query editor where the bind parameters can be
-entered. When using `db._query()` (in arangosh for instance), then an
-object of key-value pairs can be passed for the parameters. Such an object
-can also be passed to the HTTP API endpoint `_api/cursor`, as attribute
-value for the key *bindVars*:
-
-```json
-{
- "query": "FOR u IN users FILTER u.id == @id && u.name == @name RETURN u",
- "bindVars": {
- "id": 123,
- "name": "John Smith"
- }
-}
-```
-
-Bind parameters that are declared in the query must also be passed a parameter
-value, or the query will fail. Specifying parameters that are not declared in
-the query will result in an error too.
-
-Bind variables represent a value like a string, and must not be put in quotes
-in the AQL code:
-
-```js
-FILTER u.name == "@name" // wrong
-FILTER u.name == @name // correct
-```
-
-If you need to do string processing (concatenation, etc.) in the query, you
-need to use [string functions](../Functions/String.md) to do so:
-
-```js
-FOR u IN users
- FILTER u.id == CONCAT('prefix', @id, 'suffix') && u.name == @name
- RETURN u
-```
-
-Bind paramers can be used for both, the dot notation as well as the square
-bracket notation for sub-attribute access. They can also be chained:
-
-```js
-LET doc = { foo: { bar: "baz" } }
-
-RETURN doc.@attr.@subattr
-// or
-RETURN doc[@attr][@subattr]
-```
-
-```json
-{
- "attr": "foo",
- "subattr": "bar"
-}
-```
-
-Both variants in above example return `[ "baz" ]` as query result.
-
-The whole attribute path, for highly nested data in particular, can also be
-specified using the dot notation and a single bind parameter, by passing an
-array of strings as parameter value. The elements of the array represent the
-attribute keys of the path:
-
-```js
-LET doc = { a: { b: { c: 1 } } }
-RETURN doc.@attr
-```
-
-```json
-{ "attr": [ "a", "b", "c" ] }
-```
-
-The example query returns `[ 1 ]` as result. Note that `{ "attr": "a.b.c" }`
-would return the value of an attribute called *a.b.c*, not the value of
-attribute *c* with the parents *a* and *b* as `[ "a", "b", "c" ]` would.
-
-A special type of bind parameter exists for injecting collection names. This
-type of bind parameter has a name prefixed with an additional *@* symbol (thus
-when using the bind parameter in a query, two *@* symbols must be used).
-
-```js
-FOR u IN @@collection
- FILTER u.active == true
- RETURN u
-```
-
-```json
-{ "@collection": "myCollection" }
-```
-
-Keywords can't be replaced by bind-values; i.e. `FOR`, `FILTER`, `IN`, `INBOUND` or function calls.
-
-Specific information about parameters binding can also be found in:
-
-- [AQL with Web Interface](../Invocation/WithWebInterface.md)
-- [AQL with Arangosh](../Invocation/WithArangosh.md)
-- [HTTP Interface for AQL Queries](../../HTTP/AqlQueryCursor/index.html)
diff --git a/Documentation/Books/AQL/Fundamentals/DataTypes.md b/Documentation/Books/AQL/Fundamentals/DataTypes.md
deleted file mode 100644
index 9651d35f44b9..000000000000
--- a/Documentation/Books/AQL/Fundamentals/DataTypes.md
+++ /dev/null
@@ -1,263 +0,0 @@
-Data types
-==========
-
-AQL supports both *primitive* data types consisting of exactly one value and
-*compound* data types comprised of multiple values. The following types are
-available:
-
-| Data type | Description |
-|------------:|-------------|
-| **null** | An empty value, also: the absence of a value
-| **boolean** | Boolean truth value with possible values *false* and *true*
-| **number** | Signed (real) number
-| **string** | UTF-8 encoded text value
-| **array** / list | Sequence of values, referred to by their positions
-| **object** / document | Sequence of values, referred to by their names
-
-Primitive types
----------------
-
-### Null value
-
-A `null` value can be used to represent an empty or absent value.
-It is different from a numerical value of zero (`null != 0`) and other
-*falsy* values (`false`, zero-length string, empty array or object).
-It is also known as *nil* or *None* in other languages.
-
-The system may return `null` in the absence of value, for example
-if you call a [function](../Functions/README.md) with unsupported values
-as arguments or if you try to [access an attribute](DocumentData.md)
-which does not exist.
-
-### Boolean data type
-
-The Boolean data type has two possible values, `true` and `false`.
-They represent the two truth values in logic and mathematics.
-
-### Numeric literals
-
-Numeric literals can be integers or real values (floating-point numbers).
-They can optionally be signed with the `+` or `-` symbols.
-A decimal point `.` is used as separator for the optional fractional part.
-The scientific notation (*E-notation*) is also supported.
-
-```
- 1
- +1
- 42
- -1
--42
- 1.23
--99.99
- 0.5
- .5
- -4.87e103
- -4.87E103
-```
-
-The following notations are invalid and will throw a syntax error:
-
-```
- 1.
-01.23
-00.23
-00
-```
-
-All numeric values are treated as 64-bit double-precision values internally.
-The internal format used is IEEE 754.
-
-### String literals
-
-String literals must be enclosed in single or double quotes. If the used quote
-character is to be used itself within the string literal, it must be escaped
-using the backslash symbol. A literal backslash also needs to be escaped with
-a backslash.
-
-```
-"yikes!"
-"don't know"
-"this is a \"quoted\" word"
-"this is a longer string."
-"the path separator on Windows is \\"
-
-'yikes!'
-'don\'t know'
-'this is a "quoted" word'
-'this is a longer string.'
-'the path separator on Windows is \\'
-```
-
-All string literals must be UTF-8 encoded. It is currently not possible to use
-arbitrary binary data if it is not UTF-8 encoded. A workaround to use binary
-data is to encode the data using [Base64](https://en.wikipedia.org/wiki/Base64)
-or other algorithms on the application
-side before storing, and decoding it on application side after retrieval.
-
-Compound types
---------------
-
-AQL supports two compound types:
-
-- **array**: A composition of unnamed values, each accessible
- by their positions. Sometimes called *list*.
-- **object**: A composition of named values, each accessible
- by their names. A *document* is an object at the top level.
-
-### Arrays / Lists
-
-The first supported compound type is the array type. Arrays are effectively
-sequences of (unnamed / anonymous) values. Individual array elements can be
-accessed by their positions. The order of elements in an array is important.
-
-An *array declaration* starts with a left square bracket `[` and ends with
-a right square bracket `]`. The declaration contains zero, one or more
-*expression*s, separated from each other with the comma `,` symbol.
-Whitespace around elements is ignored in the declaration, thus line breaks,
-tab stops and blanks can be used for formatting.
-
-In the easiest case, an array is empty and thus looks like:
-
-```json
-[ ]
-```
-
-Array elements can be any legal *expression* values. Nesting of arrays is
-supported.
-
-```json
-[ true ]
-[ 1, 2, 3 ]
-[ -99, "yikes!", [ false, ["no"], [] ], 1 ]
-[ [ "fox", "marshal" ] ]
-```
-
-Individual array values can later be accessed by their positions using the `[]`
-accessor. The position of the accessed element must be a numeric
-value. Positions start at 0. It is also possible to use negative index values
-to access array values starting from the end of the array. This is convenient if
-the length of the array is unknown and access to elements at the end of the array
-is required.
-
-```js
-// access 1st array element (elements start at index 0)
-u.friends[0]
-
-// access 3rd array element
-u.friends[2]
-
-// access last array element
-u.friends[-1]
-
-// access second to last array element
-u.friends[-2]
-```
-
-### Objects / Documents
-
-The other supported compound type is the object (or document) type. Objects are a
-composition of zero to many attributes. Each attribute is a name/value pair.
-Object attributes can be accessed individually by their names. This data type is
-also known as dictionary, map, associative array and other names.
-
-Object declarations start with a left curly bracket `{` and end with a
-right curly bracket `}`. An object contains zero to many attribute declarations,
-separated from each other with the `,` symbol. Whitespace around elements is ignored
-in the declaration, thus line breaks, tab stops and blanks can be used for formatting.
-
-In the simplest case, an object is empty. Its declaration would then be:
-
-```json
-{ }
-```
-
-Each attribute in an object is a name/value pair. Name and value of an
-attribute are separated using the colon `:` symbol. The name is always a string,
-whereas the value can be of any type including sub-objects.
-
-The attribute name is mandatory - there can't be anonymous values in an object.
-It can be specified as a quoted or unquoted string:
-
-```js
-{ name: … } // unquoted
-{ 'name': … } // quoted (apostrophe / "single quote mark")
-{ "name": … } // quoted (quotation mark / "double quote mark")
-```
-
-It must be quoted if it contains whitespace, escape sequences or characters
-other than ASCII letters (`a`-`z`, `A`-`Z`), digits (`0`-`9`),
-underscores (`_`) and dollar signs (`$`). The first character has to be a
-letter, underscore or dollar sign.
-
-If a [keyword](../Fundamentals/Syntax.md#keywords) is used as an attribute name
-then the attribute name must be quoted or escaped by ticks or backticks:
-
-```js
-{ return: … } // error, return is a keyword!
-{ 'return': … } // quoted
-{ "return": … } // quoted
-{ `return`: … } // escaped (backticks)
-{ ´return´: … } // escaped (ticks)
-```
-
-Attribute names can be computed using dynamic expressions, too.
-To disambiguate regular attribute names from attribute name expressions,
-computed attribute names must be enclosed in square brackets `[ … ]`:
-
-```js
-{ [ CONCAT("test/", "bar") ] : "someValue" }
-```
-
-There is also shorthand notation for attributes which is handy for
-returning existing variables easily:
-
-```js
-LET name = "Peter"
-LET age = 42
-RETURN { name, age }
-```
-
-The above is the shorthand equivalent for the generic form:
-
-```js
-LET name = "Peter"
-LET age = 42
-RETURN { name: name, age: age }
-```
-
-Any valid expression can be used as an attribute value. That also means nested
-objects can be used as attribute values:
-
-```js
-{ name : "Peter" }
-{ "name" : "Vanessa", "age" : 15 }
-{ "name" : "John", likes : [ "Swimming", "Skiing" ], "address" : { "street" : "Cucumber lane", "zip" : "94242" } }
-```
-
-Individual object attributes can later be accessed by their names using the
-dot `.` accessor:
-
-```js
-u.address.city.name
-u.friends[0].name.first
-```
-
-Attributes can also be accessed using the square bracket `[]` accessor:
-
-```js
-u["address"]["city"]["name"]
-u["friends"][0]["name"]["first"]
-```
-
-In contrast to the dot accessor, the square brackets allow for expressions:
-
-```js
-LET attr1 = "friends"
-LET attr2 = "name"
-u[attr1][0][attr2][ CONCAT("fir", "st") ]
-```
-
-{% hint 'info' %}
-If a non-existing attribute is accessed in one or the other way,
-the result will be `null`, without error or warning.
-{% endhint %}
diff --git a/Documentation/Books/AQL/Fundamentals/DocumentData.md b/Documentation/Books/AQL/Fundamentals/DocumentData.md
deleted file mode 100644
index e199f5a3adea..000000000000
--- a/Documentation/Books/AQL/Fundamentals/DocumentData.md
+++ /dev/null
@@ -1,43 +0,0 @@
-Accessing data from collections
-===============================
-
-Collection data can be accessed by specifying a collection name in a query. A
-collection can be understood as an array of documents, and that is how they are
-treated in AQL. Documents from collections are normally accessed using the
-*FOR* keyword. Note that when iterating over documents from a collection, the
-order of documents is undefined. To traverse documents in an explicit and
-deterministic order, the *SORT* keyword should be used in addition.
-
-Data in collections is stored in documents, with each document potentially
-having different attributes than other documents. This is true even for
-documents of the same collection.
-
-It is therefore quite normal to encounter documents that do not have some or all
-of the attributes that are queried in an AQL query. In this case, the
-non-existing attributes in the document will be treated as if they would exist
-with a value of *null*. That means that comparing a document attribute to
-*null* will return true if the document has the particular attribute and the
-attribute has a value of *null*, or that the document does not have the
-particular attribute at all.
-
-For example, the following query will return all documents from the collection
-*users* that have a value of *null* in the attribute *name*, plus all documents
-from *users* that do not have the *name* attribute at all:
-
- FOR u IN users
- FILTER u.name == null
- RETURN u
-
-Furthermore, *null* is less than any other value (excluding *null* itself). That
-means documents with non-existing attributes may be included in the result
-when comparing attribute values with the less than or less equal operators.
-
-For example, the following query will return all documents from the collection
-*users* that have an attribute *age* with a value less than *39*, but also all
-documents from the collection that do not have the attribute *age* at all.
-
- FOR u IN users
- FILTER u.age < 39
- RETURN u
-
-This behavior should always be taken into account when writing queries.
diff --git a/Documentation/Books/AQL/Fundamentals/QueryErrors.md b/Documentation/Books/AQL/Fundamentals/QueryErrors.md
deleted file mode 100644
index d46ad014dab8..000000000000
--- a/Documentation/Books/AQL/Fundamentals/QueryErrors.md
+++ /dev/null
@@ -1,32 +0,0 @@
-Errors
-======
-
-Issuing an invalid query to the server will result in a parse error if the query
-is syntactically invalid. ArangoDB will detect such errors during query
-inspection and abort further processing. Instead, the error number and an error
-message are returned so that the errors can be fixed.
-
-If a query passes the parsing stage, all collections referenced in the query
-will be opened. If any of the referenced collections is not present, query
-execution will again be aborted and an appropriate error message will be
-returned.
-
-Under some circumstances, executing a query may also produce run-time errors
-that cannot be predicted from inspecting the query text alone. This is because
-queries may use data from collections that may also be inhomogeneous. Some
-examples that will cause run-time errors are:
-
-- Division by zero: Will be triggered when an attempt is made to use the value
- *0* as the divisor in an arithmetic division or modulus operation
-- Invalid operands for arithmetic operations: Will be triggered when an attempt
- is made to use any non-numeric values as operands in arithmetic operations.
- This includes unary (unary minus, unary plus) and binary operations (plus,
- minus, multiplication, division, and modulus)
-- Invalid operands for logical operations: Will be triggered when an attempt is
- made to use any non-boolean values as operand(s) in logical operations. This
- includes unary (logical not/negation), binary (logical and, logical or), and
- the ternary operators
-
-Please refer to the [Arango Errors](../../Manual/Appendix/ErrorCodes.html) page
-for a list of error codes and meanings.
-
diff --git a/Documentation/Books/AQL/Fundamentals/QueryResults.md b/Documentation/Books/AQL/Fundamentals/QueryResults.md
deleted file mode 100644
index f86a5a7c25c6..000000000000
--- a/Documentation/Books/AQL/Fundamentals/QueryResults.md
+++ /dev/null
@@ -1,59 +0,0 @@
-Query results
-=============
-
-Result sets
------------
-
-The result of an AQL query is an array of values. The individual values in the
-result array may or may not have a homogeneous structure, depending on what is
-actually queried.
-
-For example, when returning data from a collection with inhomogeneous documents
-(the individual documents in the collection have different attribute names)
-without modification, the result values will as well have an inhomogeneous
-structure. Each result value itself is a document:
-
-```js
-FOR u IN users
- RETURN u
-```
-
-```json
-[ { "id": 1, "name": "John", "active": false },
- { "age": 32, "id": 2, "name": "Vanessa" },
- { "friends": [ "John", "Vanessa" ], "id": 3, "name": "Amy" } ]
-```
-
-However, if a fixed set of attributes from the collection is queried, then the
-query result values will have a homogeneous structure. Each result value is
-still a document:
-
-```js
-FOR u IN users
- RETURN { "id": u.id, "name": u.name }
-```
-
-```json
-[ { "id": 1, "name": "John" },
- { "id": 2, "name": "Vanessa" },
- { "id": 3, "name": "Amy" } ]
-```
-
-It is also possible to query just scalar values. In this case, the result set
-is an array of scalars, and each result value is a scalar value:
-
-```js
-FOR u IN users
- RETURN u.id
-```
-
-```json
-[ 1, 2, 3 ]
-```
-
-If a query does not produce any results because no matching data can be
-found, it will produce an empty result array:
-
-```json
-[ ]
-```
diff --git a/Documentation/Books/AQL/Fundamentals/README.md b/Documentation/Books/AQL/Fundamentals/README.md
deleted file mode 100644
index 32d3fe05f2e9..000000000000
--- a/Documentation/Books/AQL/Fundamentals/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-AQL Fundamentals
-================
-
-
-* [AQL Syntax](Syntax.md) explains the structure of the AQL language.
-* [Data Types](DataTypes.md) describes the primitive and compound data types supported by AQL.
-* [Bind Parameters](BindParameters.md): AQL supports the usage of bind parameters. This allows to separate the query text from literal values used in the query.
-* [Type and value order](TypeValueOrder.md): AQL uses a set of rules (using values and types) for equality checks and comparisons.
-* [Accessing Data from Collections](DocumentData.md): describes the impact of non-existent or null attributes for selection queries.
-* [Query Results](QueryResults.md): the result of an AQL query is an array of values.
-* [Query Errors](QueryErrors.md): errors may arise from the AQL parsing or execution.
-
-Learn more about optimizing your queries by going through the
-[Performance Course](https://www.arangodb.com/arangodb-performance-course/).
diff --git a/Documentation/Books/AQL/Fundamentals/Syntax.md b/Documentation/Books/AQL/Fundamentals/Syntax.md
deleted file mode 100644
index 8e675c3d42ae..000000000000
--- a/Documentation/Books/AQL/Fundamentals/Syntax.md
+++ /dev/null
@@ -1,223 +0,0 @@
-AQL Syntax
-==========
-
-Query types
------------
-
-An AQL query must either return a result (indicated by usage of the *RETURN*
-keyword) or execute a data-modification operation (indicated by usage
-of one of the keywords *INSERT*, *UPDATE*, *REPLACE*, *REMOVE* or *UPSERT*). The AQL
-parser will return an error if it detects more than one data-modification
-operation in the same query or if it cannot figure out if the query is meant
-to be a data retrieval or a modification operation.
-
-AQL only allows *one* query in a single query string; thus semicolons to
-indicate the end of one query and separate multiple queries (as seen in SQL) are
-not allowed.
-
-Whitespace
-----------
-
-Whitespaces (blanks, carriage returns, line feeds, and tab stops) can be used
-in the query text to increase its readability. Tokens have to be separated by
-any number of whitespaces. Whitespace within strings or names must be enclosed
-in quotes in order to be preserved.
-
-Comments
---------
-
-Comments can be embedded at any position in a query. The text contained in the
-comment is ignored by the AQL parser.
-
-Multi-line comments cannot be nested, which means subsequent comment starts within
-comments are ignored, comment ends will end the comment.
-
-AQL supports two types of comments:
-
-- Single line comments: These start with a double forward slash and end at
- the end of the line, or the end of the query string (whichever is first).
-- Multi line comments: These start with a forward slash and asterisk, and
- end with an asterisk and a following forward slash. They can span as many
- lines as necessary.
-
-
- /* this is a comment */ RETURN 1
- /* these */ RETURN /* are */ 1 /* multiple */ + /* comments */ 1
- /* this is
- a multi line
- comment */
- // a single line comment
-
-Keywords
---------
-
-On the top level, AQL offers the following operations:
-- `FOR`: array iteration
-- `RETURN`: results projection
-- `FILTER`: non-view results filtering
-- `SEARCH`: view results filtering
-- `SORT`: result sorting
-- `LIMIT`: result slicing
-- `LET`: variable assignment
-- `COLLECT`: result grouping
-- `INSERT`: insertion of new documents
-- `UPDATE`: (partial) update of existing documents
-- `REPLACE`: replacement of existing documents
-- `REMOVE`: removal of existing documents
-- `UPSERT`: insertion or update of existing documents
-
-Each of the above operations can be initiated in a query by using a keyword of
-the same name. An AQL query can (and typically does) consist of multiple of the
-above operations.
-
-An example AQL query may look like this:
-
-```js
-FOR u IN users
- FILTER u.type == "newbie" && u.active == true
- RETURN u.name
-```
-
-In this example query, the terms *FOR*, *FILTER*, and *RETURN* initiate the
-higher-level operation according to their name. These terms are also keywords,
-meaning that they have a special meaning in the language.
-
-For example, the query parser will use the keywords to find out which high-level
-operations to execute. That also means keywords can only be used at certain
-locations in a query. This also makes all keywords reserved words that must not
-be used for other purposes than they are intended for.
-
-For example, it is not possible to use a keyword as a collection or attribute
-name. If a collection or attribute need to have the same name as a keyword, the
-collection or attribute name needs to be quoted.
-
-Keywords are case-insensitive, meaning they can be specified in lower, upper, or
-mixed case in queries. In this documentation, all keywords are written in upper
-case to make them distinguishable from other query parts.
-
-There are a few more keywords in addition to the higher-level operation keywords.
-Additional keywords may be added in future versions of ArangoDB.
-The complete list of keywords is currently:
-
-
-
- AGGREGATE
- ALL
- AND
- ANY
- ASC
- COLLECT
- DESC
- DISTINCT
- FALSE
- FILTER
- FOR
- GRAPH
- IN
- INBOUND
- INSERT
- INTO
- LET
- LIMIT
- NONE
- NOT
- NULL
- OR
- OUTBOUND
- REMOVE
- REPLACE
- RETURN
- SHORTEST_PATH
- SORT
- TRUE
- UPDATE
- UPSERT
- WITH
-
-
-
-Names
------
-
-In general, names are used to identify objects (collections, attributes,
-variables, and functions) in AQL queries.
-
-The maximum supported length of any name is 64 bytes. Names in AQL are always
-case-sensitive.
-
-Keywords must not be used as names. If a reserved keyword should be used as a
-name, the name must be enclosed in backticks or forward ticks. Enclosing a name in
-backticks or forward ticks makes it possible to use otherwise reserved keywords
-as names. An example for this is:
-
-```js
-FOR f IN `filter`
- RETURN f.`sort`
-```
-
-Due to the backticks, *filter* and *sort* are interpreted as names and not as
-keywords here.
-
-The example can alternatively written as:
-
-```js
-FOR f IN ´filter´
- RETURN f.´sort´
-```
-
-### Collection names
-
-Collection names can be used in queries as they are. If a collection happens to
-have the same name as a keyword, the name must be enclosed in backticks.
-
-Please refer to the [Naming Conventions in ArangoDB](../../Manual/DataModeling/NamingConventions/CollectionAndViewNames.html)
-about collection naming conventions.
-
-AQL currently has a limit of up to 256 collections used in one AQL query.
-This limit applies to the sum of all involved document and edge collections.
-
-### Attribute names
-
-When referring to attributes of documents from a collection, the fully qualified
-attribute name must be used. This is because multiple collections with ambiguous
-attribute names may be used in a query. To avoid any ambiguity, it is not
-allowed to refer to an unqualified attribute name.
-
-Please refer to the [Naming Conventions in ArangoDB](../../Manual/DataModeling/NamingConventions/AttributeNames.html)
-for more information about the attribute naming conventions.
-
-```js
-FOR u IN users
- FOR f IN friends
- FILTER u.active == true && f.active == true && u.id == f.userId
- RETURN u.name
-```
-
-In the above example, the attribute names *active*, *name*, *id*, and *userId*
-are qualified using the collection names they belong to (*u* and *f*
-respectively).
-
-### Variable names
-
-AQL allows the user to assign values to additional variables in a query. All
-variables that are assigned a value must have a name that is unique within the
-context of the query. Variable names must be different from the names of any
-collection name used in the same query.
-
-```js
-FOR u IN users
- LET friends = u.friends
- RETURN { "name" : u.name, "friends" : friends }
-```
-
-In the above query, *users* is a collection name, and both *u* and *friends* are
-variable names. This is because the *FOR* and *LET* operations need target
-variables to store their intermediate results.
-
-Allowed characters in variable names are the letters *a* to *z* (both in lower
-and upper case), the numbers *0* to *9*, the underscore (*_*) symbol and the
-dollar (*$*) sign. A variable name must not start with a number. If a variable name
-starts with the underscore character, the underscore must be followed by least one
-letter (a-z or A-Z) or digit (0-9).
-
-The dollar sign can be used only as the very first character in a variable name.
diff --git a/Documentation/Books/AQL/Fundamentals/TypeValueOrder.md b/Documentation/Books/AQL/Fundamentals/TypeValueOrder.md
deleted file mode 100644
index 5af7a7791ed5..000000000000
--- a/Documentation/Books/AQL/Fundamentals/TypeValueOrder.md
+++ /dev/null
@@ -1,125 +0,0 @@
-Type and value order
-====================
-
-When checking for equality or inequality or when determining the sort order of
-values, AQL uses a deterministic algorithm that takes both the data types and
-the actual values into account.
-
-The compared operands are first compared by their data types, and only by their
-data values if the operands have the same data types.
-
-The following type order is used when comparing data types:
-
- null < bool < number < string < array/list < object/document
-
-This means *null* is the smallest type in AQL and *document* is the type with
-the highest order. If the compared operands have a different type, then the
-comparison result is determined and the comparison is finished.
-
-For example, the boolean *true* value will always be less than any numeric or
-string value, any array (even an empty array) or any object / document. Additionally, any
-string value (even an empty string) will always be greater than any numeric
-value, a boolean value, *true* or *false*.
-
- null < false
- null < true
- null < 0
- null < ''
- null < ' '
- null < '0'
- null < 'abc'
- null < [ ]
- null < { }
-
- false < true
- false < 0
- false < ''
- false < ' '
- false < '0'
- false < 'abc'
- false < [ ]
- false < { }
-
- true < 0
- true < ''
- true < ' '
- true < '0'
- true < 'abc'
- true < [ ]
- true < { }
-
- 0 < ''
- 0 < ' '
- 0 < '0'
- 0 < 'abc'
- 0 < [ ]
- 0 < { }
-
- '' < ' '
- '' < '0'
- '' < 'abc'
- '' < [ ]
- '' < { }
-
- [ ] < { }
-
-If the two compared operands have the same data types, then the operands values
-are compared. For the primitive types (null, boolean, number, and string), the
-result is defined as follows:
-
-- null: *null* is equal to *null*
-- boolean: *false* is less than *true*
-- number: numeric values are ordered by their cardinal value
-- string: string values are ordered using a localized comparison, using the configured
- [server language](../../Manual/Programs/Arangod/Global.html#default-language)
- for sorting according to the alphabetical order rules of that language
-
-Note: unlike in SQL, *null* can be compared to any value, including *null*
-itself, without the result being converted into *null* automatically.
-
-For compound, types the following special rules are applied:
-
-Two array values are compared by comparing their individual elements position by
-position, starting at the first element. For each position, the element types
-are compared first. If the types are not equal, the comparison result is
-determined, and the comparison is finished. If the types are equal, then the
-values of the two elements are compared. If one of the arrays is finished and
-the other array still has an element at a compared position, then *null* will be
-used as the element value of the fully traversed array.
-
-If an array element is itself a compound value (an array or an object / document), then the
-comparison algorithm will check the element's sub values recursively. The element's
-sub-elements are compared recursively.
-
- [ ] < [ 0 ]
- [ 1 ] < [ 2 ]
- [ 1, 2 ] < [ 2 ]
- [ 99, 99 ] < [ 100 ]
- [ false ] < [ true ]
- [ false, 1 ] < [ false, '' ]
-
-Two object / documents operands are compared by checking attribute names and value. The
-attribute names are compared first. Before attribute names are compared, a
-combined array of all attribute names from both operands is created and sorted
-lexicographically. This means that the order in which attributes are declared
-in an object / document is not relevant when comparing two objects / documents.
-
-The combined and sorted array of attribute names is then traversed, and the
-respective attributes from the two compared operands are then looked up. If one
-of the objects / documents does not have an attribute with the sought name, its attribute
-value is considered to be *null*. Finally, the attribute value of both
-objects / documents is compared using the before mentioned data type and value comparison.
-The comparisons are performed for all object / document attributes until there is an
-unambiguous comparison result. If an unambiguous comparison result is found, the
-comparison is finished. If there is no unambiguous comparison result, the two
-compared objects / documents are considered equal.
-
- { } < { "a" : 1 }
- { } < { "a" : null }
- { "a" : 1 } < { "a" : 2 }
- { "b" : 1 } < { "a" : 0 }
- { "a" : { "c" : true } } < { "a" : { "c" : 0 } }
- { "a" : { "c" : true, "a" : 0 } } < { "a" : { "c" : false, "a" : 1 } }
-
- { "a" : 1, "b" : 2 } == { "b" : 2, "a" : 1 }
-
diff --git a/Documentation/Books/AQL/Graphs/KShortestPaths.md b/Documentation/Books/AQL/Graphs/KShortestPaths.md
deleted file mode 100644
index ecdbdcb923b2..000000000000
--- a/Documentation/Books/AQL/Graphs/KShortestPaths.md
+++ /dev/null
@@ -1,220 +0,0 @@
-k Shortest Paths in AQL
-=======================
-
-General query idea
---------------------
-
-This type of query is supposed to find the first *k* paths in order of length
-(or weight) between two given documents, *startVertex* and *targetVertex* in
-your graph.
-
-Every such path will be returned as a JSON object with three components:
-
-- an array containing the `vertices` on the path
-- an array containing the `edges` on the path
-- the `weight` of the path, that is the sum of all edge weights
-
-If no *weightAttribute* is given, the weight of the path is just its length.
-
-**Example**
-
-Let su take a look at a simple example to explain how it works.
-This is the graph that we are going to find some shortest path on:
-
-![train_map](train_map.png)
-
-Each ellipse stands for a train station with the name of the city written inside
-of it. They are the vertices of the graph. Arrows represent train connections
-between cities and are the edges of the graph. The numbers near the arrows
-describe how long it takes to get from one station to another. They are used
-as edge weights.
-
-Let us assume that we want to go from **Aberdeen** to **London** by train.
-
-We expect to see the following vertices on *the* shortest path, in this order:
-
-1. Aberdeen
-2. Leuchars
-3. Edinburgh
-4. York
-5. London
-
-By the way, the weight of the path is: 1.5 + 1.5 + 3.5 + 1.8 = **8.3**.
-
-Let us look at alternative paths next, for example because we know that the
-direct connection between York and London does not operate currently.
-An alternative path, which is slightly longer, goes like this:
-
-1. Aberdeen
-2. Leuchars
-3. Edinburgh
-4. York
-5. **Carlisle**
-6. **Birmingham**
-7. London
-
-Its weight is: 1.5 + 1.5 + 3.5 + 2.0 + 1.5 = **10.0**.
-
-Another route goes via Glasgow. There are seven stations on the path as well,
-however, it is quicker if we compare the edge weights:
-
-1. Aberdeen
-2. Leuchars
-3. Edinburgh
-4. **Glasgow**
-5. Carlisle
-6. Birmingham
-7. London
-
-The path weight is lower: 1.5 + 1.5 + 1.0 + 1.0 + 2.0 + 1.5 = **8.5**.
-
-Syntax
-------
-
-The syntax for k Shortest Paths queries is similar to the one for
-[Shortest Path](ShortestPath.md) and there are also two options to either
-use a named graph or a set of edge collections. It only emits a path
-variable however, whereas SHORTEST_PATH emits a vertex and an edge variable.
-
-{% hint 'warning' %}
-It is highly recommended that you use a **LIMIT** statement, as
-k Shortest Paths is a potentially expensive operation. On large connected
-graphs it can return a large number of paths, or perform an expensive
-(but unsuccessful) search for more short paths.
-{% endhint %}
-
-### Working with named graphs
-
-```
-FOR path
- IN OUTBOUND|INBOUND|ANY K_SHORTEST_PATHS
- startVertex TO targetVertex
- GRAPH graphName
- [OPTIONS options]
- [LIMIT offset, count]
-```
-
-- `FOR`: emits the variable **path** which contains one path as an object containing
- `vertices`, `edges`, and the `weight` of the path.
-- `IN` `OUTBOUND|INBOUND|ANY`: defines in which direction
- edges are followed (outgoing, incoming, or both)
-- `K_SHORTEST_PATHS`: the keyword to compute k Shortest Paths
-- **startVertex** `TO` **targetVertex** (both string|object): the two vertices between
- which the paths will be computed. This can be specified in the form of
- a ID string or in the form of a document with the attribute `_id`. All other
- values will lead to a warning and an empty result. If one of the specified
- documents does not exist, the result is empty as well and there is no warning.
-- `GRAPH` **graphName** (string): the name identifying the named graph. Its vertex and
- edge collections will be looked up.
-- `OPTIONS` **options** (object, *optional*): used to modify the execution of the
- traversal. Only the following attributes have an effect, all others are ignored:
- - **weightAttribute** (string): a top-level edge attribute that should be used
- to read the edge weight. If the attribute does not exist or is not numeric, the
- *defaultWeight* will be used instead.
- - **defaultWeight** (number): this value will be used as fallback if there is
- no *weightAttribute* in the edge document, or if it's not a number. The default
- is 1.
-- `LIMIT` (see [LIMIT operation](../Operations/Limit.html), *optional*):
- the maximal number of paths to return. It is highly recommended to use
- a `LIMIT` for `K_SHORTEST_PATHS`.
-
-### Working with collection sets
-
-```
-FOR path
- IN OUTBOUND|INBOUND|ANY K_SHORTEST_PATHS
- startVertex TO targetVertex
- edgeCollection1, ..., edgeCollectionN
- [OPTIONS options]
- [LIMIT offset, count]
-```
-
-Instead of `GRAPH graphName` you can specify a list of edge collections.
-The involved vertex collections are determined by the edges of the given
-edge collections.
-
-### Traversing in mixed directions
-
-For k shortest paths with a list of edge collections you can optionally specify the
-direction for some of the edge collections. Say for example you have three edge
-collections *edges1*, *edges2* and *edges3*, where in *edges2* the direction
-has no relevance, but in *edges1* and *edges3* the direction should be taken into
-account. In this case you can use *OUTBOUND* as general search direction and *ANY*
-specifically for *edges2* as follows:
-
-```
-FOR vertex IN OUTBOUND K_SHORTEST_PATHS
- startVertex TO targetVertex
- edges1, ANY edges2, edges3
-```
-
-All collections in the list that do not specify their own direction will use the
-direction defined after `IN` (here: `OUTBOUND`). This allows to use a different
-direction for each collection in your path search.
-
-Examples
---------
-
-We load an example graph to get a named graph that reflects some possible
-train connections in Europe and North America.
-
-![train_map](train_map.png)
-
- @startDocuBlockInline GRAPHKSP_01_create_graph
- @EXAMPLE_ARANGOSH_OUTPUT{GRAPHKSP_01_create_graph}
- ~addIgnoreCollection("places");
- ~addIgnoreCollection("connections");
- var examples = require("@arangodb/graph-examples/example-graph.js");
- var graph = examples.loadGraph("kShortestPathsGraph");
- db.places.toArray();
- db.connections.toArray();
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock GRAPHKSP_01_create_graph
-
-Suppose we want to query a route from **Aberdeen** to **London**, and compare
-the outputs of SHORTEST_PATH and K_SHORTEST_PATHS with LIMIT 1. Note that while
-SHORTEST_PATH and K_SHORTEST_PATH with LIMIT 1 should return a path of the same
-length (or weight), they do not need to return the same path.
-
- @startDocuBlockInline GRAPHKSP_02_Aberdeen_to_London
- @EXAMPLE_ARANGOSH_OUTPUT{GRAPHKSP_02_Aberdeen_to_London}
- db._query("FOR v, e IN OUTBOUND SHORTEST_PATH 'places/Aberdeen' TO 'places/London' GRAPH 'kShortestPathsGraph' RETURN [v,e]");
- db._query("FOR p IN OUTBOUND K_SHORTEST_PATHS 'places/Aberdeen' TO 'places/London' GRAPH 'kShortestPathsGraph' LIMIT 1 RETURN p");
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock GRAPHKSP_02_Aberdeen_to_London
-
-Next, we can ask for more than one option for a route:
-
- @startDocuBlockInline GRAPHKSP_03_Aberdeen_to_London
- @EXAMPLE_ARANGOSH_OUTPUT{GRAPHKSP_03_Aberdeen_to_London}
- db._query("FOR p IN OUTBOUND K_SHORTEST_PATHS 'places/Aberdeen' TO 'places/London' GRAPH 'kShortestPathsGraph' LIMIT 3 RETURN p");
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock GRAPHKSP_03_Aberdeen_to_London
-
-If we ask for routes that don't exist we get an empty result:
-
- @startDocuBlockInline GRAPHKSP_04_Aberdeen_to_Toronto
- @EXAMPLE_ARANGOSH_OUTPUT{GRAPHKSP_04_Aberdeen_to_Toronto}
- db._query("FOR p IN OUTBOUND K_SHORTEST_PATHS 'places/Aberdeen' TO 'places/Toronto' GRAPH 'kShortestPathsGraph' LIMIT 3 RETURN p");
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock GRAPHKSP_04_Aberdeen_to_Toronto
-
-We can use the attribute *travelTime* that connections have as edge weights to
-take into account which connections are quicker:
-
- @startDocuBlockInline GRAPHKSP_05_StAndrews_to_Cologne
- @EXAMPLE_ARANGOSH_OUTPUT{GRAPHKSP_05_StAndrews_to_Cologne}
- db._query("FOR p IN OUTBOUND K_SHORTEST_PATHS 'places/StAndrews' TO 'places/Cologne' GRAPH 'kShortestPathsGraph' OPTIONS { 'weightAttribute': 'travelTime', defaultWeight: '15'} LIMIT 3 RETURN p");
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock GRAPHKSP_05_StAndrews_to_Cologne
-
-And finally clean up by removing the named graph:
-
- @startDocuBlockInline GRAPHKSP_99_drop_graph
- @EXAMPLE_ARANGOSH_OUTPUT{GRAPHKSP_99_drop_graph}
- var examples = require("@arangodb/graph-examples/example-graph.js");
- examples.dropGraph("kShortestPathsGraph");
- ~removeIgnoreCollection("places");
- ~removeIgnoreCollection("connections");
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock GRAPHKSP_99_drop_graph
diff --git a/Documentation/Books/AQL/Graphs/README.md b/Documentation/Books/AQL/Graphs/README.md
deleted file mode 100644
index cead9d723cf1..000000000000
--- a/Documentation/Books/AQL/Graphs/README.md
+++ /dev/null
@@ -1,34 +0,0 @@
-Graphs in AQL
-=============
-
-There are multiple ways to work with [graphs in ArangoDB](../../Manual/Graphs/index.html),
-as well as different ways to query your graphs using AQL.
-
-The two options in managing graphs are to either use
-
-- named graphs where ArangoDB manages the collections involved in one graph, or
-- graph functions on a combination of document and edge collections.
-
-Named graphs can be defined through the [graph-module](../../Manual/Graphs/GeneralGraphs/index.html)
-or via the [web interface](../../Manual/Programs/WebInterface/index.html).
-The definition contains the name of the graph, and the vertex and edge collections
-involved. Since the management functions are layered on top of simple sets of
-document and edge collections, you can also use regular AQL functions to work with them.
-
-Both variants (named graphs and loosely coupled collection sets a.k.a. anonymous graphs)
-are supported by the AQL language constructs for graph querying. These constructs
-make full use of optimizations and therefore best performance is to be expected:
-
-- [AQL Traversals](Traversals.md) to follow edges connected to a start vertex,
- up to a variable depth. It can be combined with AQL filter conditions.
-
-- [AQL Shortest Path](ShortestPath.md) to find the vertices and edges between two
- given vertices, with as few hops as possible.
-
-These types of queries are only useful if you use edge collections and/or graphs in
-your data model.
-
-{% hint 'info' %}
-New to graphs? [**Take our free graph course for freshers**](https://www.arangodb.com/arangodb-graph-course/)
-and get from zero knowledge to advanced query techniques.
-{% endhint %}
diff --git a/Documentation/Books/AQL/Graphs/ShortestPath.md b/Documentation/Books/AQL/Graphs/ShortestPath.md
deleted file mode 100644
index b814fe32c1cd..000000000000
--- a/Documentation/Books/AQL/Graphs/ShortestPath.md
+++ /dev/null
@@ -1,170 +0,0 @@
-Shortest Path in AQL
-====================
-
-General query idea
-------------------
-
-This type of query is supposed to find the shortest path between two given documents
-(*startVertex* and *targetVertex*) in your graph. For all vertices on this shortest
-path you will get a result in form of a set with two items:
-
-1. The vertex on this path.
-2. The edge pointing to it.
-
-### Example execution
-
-Let's take a look at a simple example to explain how it works.
-This is the graph that we are going to find a shortest path on:
-
-![traversal graph](traversal_graph.png)
-
-Now we use the following parameters for our query:
-
-1. We start at the vertex **A**.
-2. We finish with the vertex **D**.
-
-So obviously we will have the vertices **A**, **B**, **C** and **D** on the
-shortest path in exactly this order. Than the shortest path statement will
-return the following pairs:
-
-| Vertex | Edge |
-|--------|-------|
-| A | null |
-| B | A → B |
-| C | B → C |
-| D | C → D |
-
-Note: The first edge will always be `null` because there is no edge pointing
-to the *startVertex*.
-
-Syntax
-------
-
-Now let's see how we can write a shortest path query.
-You have two options here, you can either use a named graph or a set of edge
-collections (anonymous graph).
-
-### Working with named graphs
-
-```
-FOR vertex[, edge]
- IN OUTBOUND|INBOUND|ANY SHORTEST_PATH
- startVertex TO targetVertex
- GRAPH graphName
- [OPTIONS options]
-```
-
-- `FOR`: emits up to two variables:
- - **vertex** (object): the current vertex on the shortest path
- - **edge** (object, *optional*): the edge pointing to the vertex
-- `IN` `OUTBOUND|INBOUND|ANY`: defines in which direction edges are followed
- (outgoing, incoming, or both)
-- **startVertex** `TO` **targetVertex** (both string|object): the two vertices between
- which the shortest path will be computed. This can be specified in the form of
- an ID string or in the form of a document with the attribute `_id`. All other
- values will lead to a warning and an empty result. If one of the specified
- documents does not exist, the result is empty as well and there is no warning.
-- `GRAPH` **graphName** (string): the name identifying the named graph. Its vertex and
- edge collections will be looked up.
-- `OPTIONS` **options** (object, *optional*): used to modify the execution of the
- traversal. Only the following attributes have an effect, all others are ignored:
- - **weightAttribute** (string): a top-level edge attribute that should be used
- to read the edge weight. If the attribute is not existent or not numeric, the
- *defaultWeight* will be used instead.
- - **defaultWeight** (number): this value will be used as fallback if there is
- no *weightAttribute* in the edge document, or if it's not a number. The default
- is 1.
-
-### Working with collection sets
-
-```
-FOR vertex[, edge]
- IN OUTBOUND|INBOUND|ANY SHORTEST_PATH
- startVertex TO targetVertex
- edgeCollection1, ..., edgeCollectionN
- [OPTIONS options]
-```
-
-Instead of `GRAPH graphName` you may specify a list of edge collections (anonymous
-graph). The involved vertex collections are determined by the edges of the given
-edge collections. The rest of the behavior is similar to the named version.
-
-### Traversing in mixed directions
-
-For shortest path with a list of edge collections you can optionally specify the
-direction for some of the edge collections. Say for example you have three edge
-collections *edges1*, *edges2* and *edges3*, where in *edges2* the direction
-has no relevance, but in *edges1* and *edges3* the direction should be taken into
-account. In this case you can use *OUTBOUND* as general search direction and *ANY*
-specifically for *edges2* as follows:
-
-```
-FOR vertex IN OUTBOUND SHORTEST_PATH
- startVertex TO targetVertex
- edges1, ANY edges2, edges3
-```
-
-All collections in the list that do not specify their own direction will use the
-direction defined after *IN* (here: *OUTBOUND*). This allows to use a different
-direction for each collection in your path search.
-
-Conditional shortest path
--------------------------
-
-The SHORTEST_PATH computation will only find an unconditioned shortest path.
-With this construct it is not possible to define a condition like: "Find the
-shortest path where all edges are of type *X*". If you want to do this, use a
-normal [Traversal](Traversals.md) instead with the option `{bfs: true}` in
-combination with `LIMIT 1`.
-
-Please also consider [to use `WITH`](../Operations/With.md) to specify the collections you expect to be involved.
-
-Examples
---------
-We will create a simple symmetric traversal demonstration graph:
-
-![traversal graph](traversal_graph.png)
-
- @startDocuBlockInline GRAPHSP_01_create_graph
- @EXAMPLE_ARANGOSH_OUTPUT{GRAPHSP_01_create_graph}
- ~addIgnoreCollection("circles");
- ~addIgnoreCollection("edges");
- var examples = require("@arangodb/graph-examples/example-graph.js");
- var graph = examples.loadGraph("traversalGraph");
- db.circles.toArray();
- db.edges.toArray();
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock GRAPHSP_01_create_graph
-
-We start with the shortest path from **A** to **D** as above:
-
- @startDocuBlockInline GRAPHSP_02_A_to_D
- @EXAMPLE_ARANGOSH_OUTPUT{GRAPHSP_02_A_to_D}
- db._query("FOR v, e IN OUTBOUND SHORTEST_PATH 'circles/A' TO 'circles/D' GRAPH 'traversalGraph' RETURN [v._key, e._key]");
- db._query("FOR v, e IN OUTBOUND SHORTEST_PATH 'circles/A' TO 'circles/D' edges RETURN [v._key, e._key]");
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock GRAPHSP_02_A_to_D
-
-We can see our expectations are fulfilled. We find the vertices in the correct ordering and
-the first edge is *null*, because no edge is pointing to the start vertex on t his path.
-
-We can also compute shortest paths based on documents found in collections:
-
- @startDocuBlockInline GRAPHSP_03_A_to_D
- @EXAMPLE_ARANGOSH_OUTPUT{GRAPHSP_03_A_to_D}
- db._query("FOR a IN circles FILTER a._key == 'A' FOR d IN circles FILTER d._key == 'D' FOR v, e IN OUTBOUND SHORTEST_PATH a TO d GRAPH 'traversalGraph' RETURN [v._key, e._key]");
- db._query("FOR a IN circles FILTER a._key == 'A' FOR d IN circles FILTER d._key == 'D' FOR v, e IN OUTBOUND SHORTEST_PATH a TO d edges RETURN [v._key, e._key]");
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock GRAPHSP_03_A_to_D
-
-
-And finally clean it up again:
-
- @startDocuBlockInline GRAPHSP_99_drop_graph
- @EXAMPLE_ARANGOSH_OUTPUT{GRAPHSP_99_drop_graph}
- var examples = require("@arangodb/graph-examples/example-graph.js");
- examples.dropGraph("traversalGraph");
- ~removeIgnoreCollection("circles");
- ~removeIgnoreCollection("edges");
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock GRAPHSP_99_drop_graph
diff --git a/Documentation/Books/AQL/Graphs/Traversals.md b/Documentation/Books/AQL/Graphs/Traversals.md
deleted file mode 100644
index 58435dc61a85..000000000000
--- a/Documentation/Books/AQL/Graphs/Traversals.md
+++ /dev/null
@@ -1,543 +0,0 @@
-Graph traversals in AQL
-=======================
-
-Syntax
-------
-
-There are two slightly different syntaxes for traversals in AQL, one for
-- [named graphs](../../Manual/Graphs/index.html#named-graphs) and another to
-- specify a [set of edge collections](#working-with-collection-sets)
- ([anonymous graph](../../Manual/Graphs/index.html#anonymous-graphs)).
-
-### Working with named graphs
-
-```
-[WITH vertexCollection1[, vertexCollection2[, ...vertexCollectionN]]]
-FOR vertex[, edge[, path]]
- IN [min[..max]]
- OUTBOUND|INBOUND|ANY startVertex
- GRAPH graphName
- [PRUNE pruneCondition]
- [OPTIONS options]
-```
-- `WITH`: optional for single server instances, but required for
- [graph traversals in a cluster](#graph-traversals-in-a-cluster).
- - **collections** (collection, *repeatable*): list of vertex collections that will
- be involved in the traversal
-- `FOR`: emits up to three variables:
- - **vertex** (object): the current vertex in a traversal
- - **edge** (object, *optional*): the current edge in a traversal
- - **path** (object, *optional*): representation of the current path with
- two members:
- - `vertices`: an array of all vertices on this path
- - `edges`: an array of all edges on this path
-- `IN` `min..max`: the minimal and maximal depth for the traversal:
- - **min** (number, *optional*): edges and vertices returned by this query will
- start at the traversal depth of *min* (thus edges and vertices below will
- not be returned). If not specified, it defaults to 1. The minimal
- possible value is 0.
- - **max** (number, *optional*): up to *max* length paths are traversed.
- If omitted, *max* defaults to *min*. Thus only the vertices and edges in
- the range of *min* are returned. *max* can not be specified without *min*.
-- `OUTBOUND|INBOUND|ANY`: follow outgoing, incoming, or edges pointing in either
- direction in the traversal; Please note that this can't be replaced by a bind parameter.
-- **startVertex** (string|object): a vertex where the traversal will originate from.
- This can be specified in the form of an ID string or in the form of a document
- with the attribute `_id`. All other values will lead to a warning and an empty
- result. If the specified document does not exist, the result is empty as well
- and there is no warning.
-- `GRAPH` **graphName** (string): the name identifying the named graph.
- Its vertex and edge collections will be looked up. Note that the graph name
- is like a regular string, hence it must be enclosed by quote marks.
-- `PRUNE` **condition** (AQL condition, *optional*, (since version 3.4.5)):
- A condition, like in a FILTER statement, which will be evaluated in every step
- of the traversal, as early as possible. The semantics of this condition is as follows:
- * If the condition evaluates to `true` this path will be considered as a result,
- it might still be post filtered or ignored due to depth constraints. However
- the search will not continue from this path, namely there will be no
- result having this path as a prefix.
- e.g.: Take the path: `(A) -> (B) -> (C)` starting at `A` and PRUNE on `B`
- will result in `(A)` and `(A) -> (B)` being valid paths, and `(A) -> (B) -> (C)`
- not returned, it got pruned on B.
- * If the condition evaluates to `false` we will continue our search beyond
- this path.
- There is only one `PRUNE` condition possible, but it can contain an arbitrary amount
- of `AND` or `OR` statements.
- Also note that you can use the output variables of this traversal in the `PRUNE`,
- as well as all variables defined before this Traversal statement.
-- `OPTIONS` **options** (object, *optional*): used to modify the execution of the
- traversal. Only the following attributes have an effect, all others are ignored:
- - **bfs** (bool): optionally use the alternative breadth-first traversal algorithm
- - true – the traversal will be executed breadth-first. The results will first
- contain all vertices at depth 1. Than all vertices at depth 2 and so on.
- - false (default) – the traversal will be executed depth-first. It will first
- return all paths from *min* depth to *max* depth for one vertex at depth 1.
- Than for the next vertex at depth 1 and so on.
- - **uniqueVertices** (string): optionally ensure vertex uniqueness
- - "path" – it is guaranteed that there is no path returned with a duplicate vertex
- - "global" – it is guaranteed that each vertex is visited at most once during
- the traversal, no matter how many paths lead from the start vertex to this one.
- If you start with a `min depth > 1` a vertex that was found before *min* depth
- might not be returned at all (it still might be part of a path). **Note:**
- Using this configuration the result is not deterministic any more. If there
- are multiple paths from *startVertex* to *vertex*, one of those is picked.
- It is required to set `bfs: true` because with depth-first search the results
- would be unpredictable.
- - "none" (default) – no uniqueness check is applied on vertices
- - **uniqueEdges** (string): optionally ensure edge uniqueness
- - "path" (default) – it is guaranteed that there is no path returned with a
- duplicate edge
- - "none" – no uniqueness check is applied on edges. **Note:**
- Using this configuration the traversal will follow edges in cycles.
-
-### Working with collection sets
-
-```
-[WITH vertexCollection1[, vertexCollection2[, ...vertexCollectionN]]]
-FOR vertex[, edge[, path]]
- IN [min[..max]]
- OUTBOUND|INBOUND|ANY startVertex
- edgeCollection1, ..., edgeCollectionN
- [PRUNE pruneCondition]
- [OPTIONS options]
-```
-
-Instead of `GRAPH graphName` you may specify a list of edge collections. Vertex
-collections are determined by the edges in the edge collections. The traversal
-options are the same as with the [named graph variant](#working-with-named-graphs).
-
-If the same edge collection is specified multiple times, it will behave as if it
-were specified only once. Specifying the same edge collection is only allowed when
-the collections do not have conflicting traversal directions.
-
-ArangoSearch Views cannot be used as edge collections.
-
-### Traversing in mixed directions
-
-For traversals with a list of edge collections you can optionally specify the
-direction for some of the edge collections. Say for example you have three edge
-collections *edges1*, *edges2* and *edges3*, where in *edges2* the direction has
-no relevance but in *edges1* and *edges3* the direction should be taken into account.
-In this case you can use *OUTBOUND* as general traversal direction and *ANY*
-specifically for *edges2* as follows:
-
-```
-FOR vertex IN OUTBOUND
- startVertex
- edges1, ANY edges2, edges3
-```
-
-All collections in the list that do not specify their own direction will use the
-direction defined after `IN`. This allows to use a different direction for each
-collection in your traversal.
-
-### Graph traversals in a cluster
-
-Due to the nature of graphs, edges may reference vertices from arbitrary
-collections. Following the paths can thus involve documents from various
-collections and it's not possible to predict which will be visited in a
-traversal. Hence, which collections need to be locked can only be determined
-at run time. Deadlocks may occur under certain circumstances.
-
-Please consider to use the [`WITH` statement](../Operations/With.md) to
-specify the collections you expect to be involved.
-
-Using filters and the explainer to extrapolate the costs
---------------------------------------------------------
-
-All three variables emitted by the traversals might as well be used in filter
-statements. For some of these filter statements the optimizer can detect that it
-is possible to prune paths of traversals earlier, hence filtered results will
-not be emitted to the variables in the first place. This may significantly
-improve the performance of your query. Whenever a filter is not fulfilled,
-the complete set of *vertex*, *edge* and *path* will be skipped. All paths
-with a length greater than *max* will never be computed.
-
-In the current state, `AND` combined filters can be optimized, but `OR`
-combined filters cannot.
-
-The following examples are based on the [traversal graph](../../Manual/Graphs/index.html#the-traversal-graph).
-
-### Pruning
-
-Introduced in: v3.4.5
-
-Pruning is the easiest variant to formulate conditions to reduce the amount of data
-to be checked during a search. So it allows to improve query performance and reduces
-the amount of overhead generated by the query. Pruning can be executed on the
-vertex, the edge and the path and any variable defined before.
-See examples:
-
- @startDocuBlockInline GRAPHTRAV_graphPruneEdges
- @EXAMPLE_AQL{GRAPHTRAV_graphPruneEdges}
- @DATASET{traversalGraph}
- FOR v, e, p IN 1..5 OUTBOUND 'circles/A' GRAPH 'traversalGraph'
- PRUNE e.theTruth == true
- RETURN { vertices: p.vertices[*]._key, edges: p.edges[*].label }
- @END_EXAMPLE_AQL
- @endDocuBlock GRAPHTRAV_graphPruneEdges
-
-This will search until it sees an edge having `theTruth == true`.
-The path with this edge will be returned, the search will not
-continue after this edge.
-Namely all responses either have no edge with `theTruth == true`
-or the last edge on the path has `theTruth == true`.
-
- @startDocuBlockInline GRAPHTRAV_graphPruneVertices
- @EXAMPLE_AQL{GRAPHTRAV_graphPruneVertices}
- @DATASET{traversalGraph}
- FOR v, e, p IN 1..5 OUTBOUND 'circles/A' GRAPH 'traversalGraph'
- PRUNE v._key == 'G'
- FILTER v._key == 'G'
- RETURN { vertices: p.vertices[*]._key, edges: p.edges[*].label }
- @END_EXAMPLE_AQL
- @endDocuBlock GRAPHTRAV_graphPruneVertices
-
-This will search for all paths from the source `circles/A` to the vertex `circles/G`.
-This is done with first the PRUNE which makes sure we stop search as soon as we have found
-`G` and we will not go beyond `G` and via a loop return to it.
-With the second filter, we remove all paths that do not end in `G` namely
-all shorter ones that have not been cut out by prune.
-Hence the list of all paths from `A` to `G` are returned.
-
-Note you can also prune as soon as you reach a certain collection with the following
-example:
-
- @startDocuBlockInline GRAPHTRAV_graphPruneCollection
- @EXAMPLE_AQL{GRAPHTRAV_graphPruneCollection}
- @DATASET{traversalGraph}
- FOR v, e, p IN 1..5 OUTBOUND 'circles/A' GRAPH 'traversalGraph'
- PRUNE IS_SAME_COLLECTION('circles', v)
- RETURN { vertices: p.vertices[*]._key, edges: p.edges[*].label }
- @END_EXAMPLE_AQL
- @endDocuBlock GRAPHTRAV_graphPruneCollection
-
-### Filtering on paths
-
-Filtering on paths allows for the second most powerful filtering and may have the
-second highest impact on performance. Using the path variable you can filter on
-specific iteration depths. You can filter for absolute positions in the path
-by specifying a positive number (which then qualifies for the optimizations),
-or relative positions to the end of the path by specifying a negative number.
-
-#### Filtering edges on the path
-
-
- @startDocuBlockInline GRAPHTRAV_graphFilterEdges
- @EXAMPLE_AQL{GRAPHTRAV_graphFilterEdges}
- @DATASET{traversalGraph}
- FOR v, e, p IN 1..5 OUTBOUND 'circles/A' GRAPH 'traversalGraph'
- FILTER p.edges[0].theTruth == true
- RETURN { vertices: p.vertices[*]._key, edges: p.edges[*].label }
- @END_EXAMPLE_AQL
- @endDocuBlock GRAPHTRAV_graphFilterEdges
-
-
-will filter all paths where the start edge (index 0) has the attribute
-*theTruth* equal to *true*. The resulting paths will be up to 5 items long.
-
-### Filtering vertices on the path
-
-Similar to filtering the edges on the path you can also filter the vertices:
-
- @startDocuBlockInline GRAPHTRAV_graphFilterVertices
- @EXAMPLE_AQL{GRAPHTRAV_graphFilterVertices}
- @DATASET{traversalGraph}
- FOR v, e, p IN 1..5 OUTBOUND 'circles/A' GRAPH 'traversalGraph'
- FILTER p.vertices[1]._key == "G"
- RETURN { vertices: p.vertices[*]._key, edges: p.edges[*].label }
- @END_EXAMPLE_AQL
- @endDocuBlock GRAPHTRAV_graphFilterVertices
-
-#### Combining several filters
-
-And of course you can combine these filters in any way you like:
-
- @startDocuBlockInline GRAPHTRAV_graphFilterCombine
- @EXAMPLE_AQL{GRAPHTRAV_graphFilterCombine}
- @DATASET{traversalGraph}
- FOR v, e, p IN 1..5 OUTBOUND 'circles/A' GRAPH 'traversalGraph'
- FILTER p.edges[0].theTruth == true
- AND p.edges[1].theFalse == false
- FILTER p.vertices[1]._key == "G"
- RETURN { vertices: p.vertices[*]._key, edges: p.edges[*].label }
- @END_EXAMPLE_AQL
- @endDocuBlock GRAPHTRAV_graphFilterCombine
-
-The query will filter all paths where the first edge has the attribute
-*theTruth* equal to *true*, the first vertex is "G" and the second edge has
-the attribute *theFalse* equal to *false*. The resulting paths will be up to
-5 items long.
-
-**Note**: Although we have defined a *min* of 1, we will only get results of
-depth 2. This is because for all results in depth 1 the second edge does not
-exist and hence cannot fulfill the condition here.
-
-#### Filter on the entire path
-
-With the help of array comparison operators filters can also be defined
-on the entire path, like ALL edges should have theTruth == true:
-
- @startDocuBlockInline GRAPHTRAV_graphFilterEntirePath
- @EXAMPLE_AQL{GRAPHTRAV_graphFilterEntirePath}
- @DATASET{traversalGraph}
- FOR v, e, p IN 1..5 OUTBOUND 'circles/A' GRAPH 'traversalGraph'
- FILTER p.edges[*].theTruth ALL == true
- RETURN { vertices: p.vertices[*]._key, edges: p.edges[*].label }
- @END_EXAMPLE_AQL
- @endDocuBlock GRAPHTRAV_graphFilterEntirePath
-
-Or NONE of the edges should have theTruth == true:
-
- @startDocuBlockInline GRAPHTRAV_graphFilterPathEdges
- @EXAMPLE_AQL{GRAPHTRAV_graphFilterPathEdges}
- @DATASET{traversalGraph}
- FOR v, e, p IN 1..5 OUTBOUND 'circles/A' GRAPH 'traversalGraph'
- FILTER p.edges[*].theTruth NONE == true
- RETURN { vertices: p.vertices[*]._key, edges: p.edges[*].label }
- @END_EXAMPLE_AQL
- @endDocuBlock GRAPHTRAV_graphFilterPathEdges
-
-Both examples above are recognized by the optimizer and can potentially use other indexes
-than the edge index.
-
-It is also possible to define that at least one edge on the path has to fulfill the condition:
-
- @startDocuBlockInline GRAPHTRAV_graphFilterPathAnyEdge
- @EXAMPLE_AQL{GRAPHTRAV_graphFilterPathAnyEdge}
- @DATASET{traversalGraph}
- FOR v, e, p IN 1..5 OUTBOUND 'circles/A' GRAPH 'traversalGraph'
- FILTER p.edges[*].theTruth ANY == true
- RETURN { vertices: p.vertices[*]._key, edges: p.edges[*].label }
- @END_EXAMPLE_AQL
- @endDocuBlock GRAPHTRAV_graphFilterPathAnyEdge
-
-It is guaranteed that at least one, but potentially more edges fulfill the condition.
-All of the above filters can be defined on vertices in the exact same way.
-
-### Filtering on the path vs. filtering on vertices or edges
-Filtering on the path influences the Iteration on your graph. If certain conditions
-aren't met, the traversal may stop continuing along this path.
-
-In contrast filters on vertex or edge only express whether you're interested in the actual value of these
-documents. Thus, it influences the list of returned documents (if you return v or e) similar
-as specifying a non-null `min` value. If you specify a min value of 2, the traversal over the first
-two nodes of these paths has to be executed - you just won't see them in your result array.
-
-Similar are filters on vertices or edges - the traverser has to walk along these nodes, since
-you may be interested in documents further down the path.
-
-### Examples
-
-We will create a simple symmetric traversal demonstration graph:
-
-![traversal graph](traversal_graph.png)
-
- @startDocuBlockInline GRAPHTRAV_01_create_graph
- @EXAMPLE_ARANGOSH_OUTPUT{GRAPHTRAV_01_create_graph}
- ~addIgnoreCollection("circles");
- ~addIgnoreCollection("edges");
- var examples = require("@arangodb/graph-examples/example-graph.js");
- var graph = examples.loadGraph("traversalGraph");
- db.circles.toArray();
- db.edges.toArray();
- print("once you don't need them anymore, clean them up:");
- examples.dropGraph("traversalGraph");
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock GRAPHTRAV_01_create_graph
-
-To get started we select the full graph. For better overview we only return
-the vertex IDs:
-
- @startDocuBlockInline GRAPHTRAV_02_traverse_all_a
- @EXAMPLE_AQL{GRAPHTRAV_02_traverse_all_a}
- @DATASET{traversalGraph}
- FOR v IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph'
- RETURN v._key
- @END_EXAMPLE_AQL
- @endDocuBlock GRAPHTRAV_02_traverse_all_a
-
- @startDocuBlockInline GRAPHTRAV_02_traverse_all_b
- @EXAMPLE_AQL{GRAPHTRAV_02_traverse_all_b}
- @DATASET{traversalGraph}
- FOR v IN 1..3 OUTBOUND 'circles/A' edges RETURN v._key
- @END_EXAMPLE_AQL
- @endDocuBlock GRAPHTRAV_02_traverse_all_b
-
-We can nicely see that it is heading for the first outer vertex, then goes back to
-the branch to descend into the next tree. After that it returns to our start node,
-to descend again. As we can see both queries return the same result, the first one
-uses the named graph, the second uses the edge collections directly.
-
-Now we only want the elements of a specific depth (min = max = 2), the ones that
-are right behind the fork:
-
- @startDocuBlockInline GRAPHTRAV_03_traverse_3a
- @EXAMPLE_AQL{GRAPHTRAV_03_traverse_3a}
- @DATASET{traversalGraph}
- FOR v IN 2..2 OUTBOUND 'circles/A' GRAPH 'traversalGraph'
- RETURN v._key
- @END_EXAMPLE_AQL
- @endDocuBlock GRAPHTRAV_03_traverse_3a
-
- @startDocuBlockInline GRAPHTRAV_03_traverse_3b
- @EXAMPLE_AQL{GRAPHTRAV_03_traverse_3b}
- @DATASET{traversalGraph}
- FOR v IN 2 OUTBOUND 'circles/A' GRAPH 'traversalGraph'
- RETURN v._key
- @END_EXAMPLE_AQL
- @endDocuBlock GRAPHTRAV_03_traverse_3b
-
-As you can see, we can express this in two ways: with or without *max* parameter
-in the expression.
-
-### Filter examples
-
-Now let's start to add some filters. We want to cut of the branch on the right
-side of the graph, we may filter in two ways:
-
-- we know the vertex at depth 1 has `_key` == `G`
-- we know the `label` attribute of the edge connecting **A** to **G** is `right_foo`
-
- @startDocuBlockInline GRAPHTRAV_04_traverse_4a
- @EXAMPLE_AQL{GRAPHTRAV_04_traverse_4a}
- @DATASET{traversalGraph}
- FOR v, e, p IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph'
- FILTER p.vertices[1]._key != 'G'
- RETURN v._key
- @END_EXAMPLE_AQL
- @endDocuBlock GRAPHTRAV_04_traverse_4a
-
- @startDocuBlockInline GRAPHTRAV_04_traverse_4b
- @EXAMPLE_AQL{GRAPHTRAV_04_traverse_4b}
- @DATASET{traversalGraph}
- FOR v, e, p IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph'
- FILTER p.edges[0].label != 'right_foo'
- RETURN v._key
- @END_EXAMPLE_AQL
- @endDocuBlock GRAPHTRAV_04_traverse_4b
-
-As we can see all vertices behind **G** are skipped in both queries.
-The first filters on the vertex `_key`, the second on an edge label.
-Note again, as soon as a filter is not fulfilled for any of the three elements
-`v`, `e` or `p`, the complete set of these will be excluded from the result.
-
-We also may combine several filters, for instance to filter out the right branch
-(**G**), and the **E** branch:
-
- @startDocuBlockInline GRAPHTRAV_05_traverse_5a
- @EXAMPLE_AQL{GRAPHTRAV_05_traverse_5a}
- @DATASET{traversalGraph}
- FOR v,e,p IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph'
- FILTER p.vertices[1]._key != 'G'
- FILTER p.edges[1].label != 'left_blub'
- RETURN v._key
- @END_EXAMPLE_AQL
- @endDocuBlock GRAPHTRAV_05_traverse_5a
-
- @startDocuBlockInline GRAPHTRAV_05_traverse_5b
- @EXAMPLE_AQL{GRAPHTRAV_05_traverse_5b}
- @DATASET{traversalGraph}
- FOR v,e,p IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph'
- FILTER p.vertices[1]._key != 'G' AND p.edges[1].label != 'left_blub'
- RETURN v._key
- @END_EXAMPLE_AQL
- @endDocuBlock GRAPHTRAV_05_traverse_5b
-
-As you can see, combining two `FILTER` statements with an `AND` has the same result.
-
-Comparing OUTBOUND / INBOUND / ANY
-----------------------------------
-
-All our previous examples traversed the graph in *OUTBOUND* edge direction.
-You may however want to also traverse in reverse direction (*INBOUND*) or
-both (*ANY*). Since `circles/A` only has outbound edges, we start our queries
-from `circles/E`:
-
- @startDocuBlockInline GRAPHTRAV_06_traverse_6a
- @EXAMPLE_AQL{GRAPHTRAV_06_traverse_6a}
- @DATASET{traversalGraph}
- FOR v IN 1..3 OUTBOUND 'circles/E' GRAPH 'traversalGraph'
- RETURN v._key
- @END_EXAMPLE_AQL
- @endDocuBlock GRAPHTRAV_06_traverse_6a
-
- @startDocuBlockInline GRAPHTRAV_06_traverse_6b
- @EXAMPLE_AQL{GRAPHTRAV_06_traverse_6b}
- @DATASET{traversalGraph}
- FOR v IN 1..3 INBOUND 'circles/E' GRAPH 'traversalGraph'
- RETURN v._key
- @END_EXAMPLE_AQL
- @endDocuBlock GRAPHTRAV_06_traverse_6b
-
- @startDocuBlockInline GRAPHTRAV_06_traverse_6c
- @EXAMPLE_AQL{GRAPHTRAV_06_traverse_6c}
- @DATASET{traversalGraph}
- FOR v IN 1..3 ANY 'circles/E' GRAPH 'traversalGraph'
- RETURN v._key
- @END_EXAMPLE_AQL
- @endDocuBlock GRAPHTRAV_06_traverse_6c
-
-The first traversal will only walk in the forward (*OUTBOUND*) direction.
-Therefore from **E** we only can see **F**. Walking in reverse direction
-(*INBOUND*), we see the path to **A**: **B** → **A**.
-
-Walking in forward and reverse direction (*ANY*) we can see a more diverse result.
-First of all, we see the simple paths to **F** and **A**. However, these vertices
-have edges in other directions and they will be traversed.
-
-**Note**: The traverser may use identical edges multiple times. For instance,
-if it walks from **E** to **F**, it will continue to walk from **F** to **E**
-using the same edge once again. Due to this we will see duplicate nodes in the result.
-
-Please note that the direction can't be passed in by a bind parameter.
-
-Use the AQL explainer for optimizations
----------------------------------------
-
-Now let's have a look what the optimizer does behind the curtain and inspect
-traversal queries using [the explainer](../ExecutionAndPerformance/Optimizer.md):
-
- @startDocuBlockInline GRAPHTRAV_07_traverse_7
- @EXAMPLE_AQL{GRAPHTRAV_07_traverse_7}
- @DATASET{traversalGraph}
- @EXPLAIN{TRUE}
- FOR v,e,p IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph'
- LET localScopeVar = RAND() > 0.5
- FILTER p.edges[0].theTruth != localScopeVar
- RETURN v._key
- @END_EXAMPLE_AQL
- @endDocuBlock GRAPHTRAV_07_traverse_7
-
- @startDocuBlockInline GRAPHTRAV_07_traverse_8
- @EXAMPLE_AQL{GRAPHTRAV_07_traverse_8}
- @DATASET{traversalGraph}
- @EXPLAIN{TRUE}
- FOR v,e,p IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph'
- FILTER p.edges[0].label == 'right_foo'
- RETURN v._key
- @END_EXAMPLE_AQL
- @endDocuBlock GRAPHTRAV_07_traverse_8
-
-We now see two queries: In one we add a variable *localScopeVar*, which is outside
-the scope of the traversal itself - it is not known inside of the traverser.
-Therefore, this filter can only be executed after the traversal, which may be
-undesired in large graphs. The second query on the other hand only operates on the
-path, and therefore this condition can be used during the execution of the traversal.
-Paths that are filtered out by this condition won't be processed at all.
-
-And finally clean it up again:
-
- @startDocuBlockInline GRAPHTRAV_99_drop_graph
- @EXAMPLE_ARANGOSH_OUTPUT{GRAPHTRAV_99_drop_graph}
- ~examples.loadGraph("traversalGraph");
- var examples = require("@arangodb/graph-examples/example-graph.js");
- examples.dropGraph("traversalGraph");
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock GRAPHTRAV_99_drop_graph
-
-If this traversal is not powerful enough for your needs, like you cannot describe
-your conditions as AQL filter statements, then you might want to have a look at
-[manually crafted traversers](../../Manual/Graphs/Traversals/index.html).
-
-Also see how to [combine graph traversals](../Examples/CombiningGraphTraversals.md).
diff --git a/Documentation/Books/AQL/Graphs/TraversalsExplained.md b/Documentation/Books/AQL/Graphs/TraversalsExplained.md
deleted file mode 100644
index 4ac6337fe414..000000000000
--- a/Documentation/Books/AQL/Graphs/TraversalsExplained.md
+++ /dev/null
@@ -1,82 +0,0 @@
-Traversals explained
-====================
-
-General query idea
-------------------
-
-A traversal starts at one specific document (*startVertex*) and follows all
-edges connected to this document. For all documents (*vertices*) that are
-targeted by these edges it will again follow all edges connected to them and
-so on. It is possible to define how many of these follow iterations should be
-executed at least (*min* depth) and at most (*max* depth).
-
-For all vertices that were visited during this process in the range between
-*min* depth and *max* depth iterations you will get a result in form of a
-set with three items:
-
-1. The visited vertex.
-2. The edge pointing to it.
-3. The complete path from startVertex to the visited vertex as object with an
- attribute *edges* and an attribute *vertices*, each a list of the coresponding
- elements. These lists are sorted, which means the first element in *vertices*
- is the *startVertex* and the last is the visited vertex, and the n-th element
- in *edges* connects the n-th element with the (n+1)-th element in *vertices*.
-
-Example execution
------------------
-
-Let's take a look at a simple example to explain how it works.
-This is the graph that we are going to traverse:
-
-![traversal graph](traversal_graph.png)
-
-We use the following parameters for our query:
-
-1. We start at the vertex **A**.
-2. We use a *min* depth of 1.
-3. We use a *max* depth of 2.
-4. We follow only in *OUTBOUND* direction of edges
-
-![traversal graph step 1](traversal_graph1.png)
-
-Now it walks to one of the direct neighbors of **A**, say **B** (note: ordering
-is not guaranteed!):
-
-![traversal graph step 2](traversal_graph2.png)
-
-The query will remember the state (red circle) and will emit the first result
-**A** → **B** (black box). This will also prevent the traverser to be trapped
-in cycles. Now again it will visit one of the direct neighbors of **B**, say **E**:
-
-![traversal graph step 3](traversal_graph3.png)
-
-We have limited the query with a *max* depth of *2*, so it will not pick any
-neighbor of **E**, as the path from **A** to **E** already requires *2* steps.
-Instead, we will go back one level to **B** and continue with any other direct
-neighbor there:
-
-![traversal graph step 4](traversal_graph4.png)
-
-Again after we produced this result we will step back to **B**.
-But there is no neighbor of **B** left that we have not yet visited.
-Hence we go another step back to **A** and continue with any other neighbor there.
-
-![traversal graph step 5](traversal_graph5.png)
-
-And identical to the iterations before we will visit **H**:
-
-![traversal graph step 6](traversal_graph6.png)
-
-And **J**:
-
-![traversal graph step 7](traversal_graph7.png)
-
-After these steps there is no further result left. So all together this query
-has returned the following paths:
-
-1. **A** → **B**
-2. **A** → **B** → **E**
-3. **A** → **B** → **C**
-4. **A** → **G**
-5. **A** → **G** → **H**
-6. **A** → **G** → **J**
diff --git a/Documentation/Books/AQL/Graphs/train_map.png b/Documentation/Books/AQL/Graphs/train_map.png
deleted file mode 100644
index e4f1c6a0e96f..000000000000
Binary files a/Documentation/Books/AQL/Graphs/train_map.png and /dev/null differ
diff --git a/Documentation/Books/AQL/Graphs/traversal_graph.png b/Documentation/Books/AQL/Graphs/traversal_graph.png
deleted file mode 100644
index 3d8325bc1519..000000000000
Binary files a/Documentation/Books/AQL/Graphs/traversal_graph.png and /dev/null differ
diff --git a/Documentation/Books/AQL/Graphs/traversal_graph1.png b/Documentation/Books/AQL/Graphs/traversal_graph1.png
deleted file mode 100644
index 99f8d232551f..000000000000
Binary files a/Documentation/Books/AQL/Graphs/traversal_graph1.png and /dev/null differ
diff --git a/Documentation/Books/AQL/Graphs/traversal_graph2.png b/Documentation/Books/AQL/Graphs/traversal_graph2.png
deleted file mode 100644
index 8bc6984293a5..000000000000
Binary files a/Documentation/Books/AQL/Graphs/traversal_graph2.png and /dev/null differ
diff --git a/Documentation/Books/AQL/Graphs/traversal_graph3.png b/Documentation/Books/AQL/Graphs/traversal_graph3.png
deleted file mode 100644
index c71af0fdcc85..000000000000
Binary files a/Documentation/Books/AQL/Graphs/traversal_graph3.png and /dev/null differ
diff --git a/Documentation/Books/AQL/Graphs/traversal_graph4.png b/Documentation/Books/AQL/Graphs/traversal_graph4.png
deleted file mode 100644
index b9a62df2b131..000000000000
Binary files a/Documentation/Books/AQL/Graphs/traversal_graph4.png and /dev/null differ
diff --git a/Documentation/Books/AQL/Graphs/traversal_graph5.png b/Documentation/Books/AQL/Graphs/traversal_graph5.png
deleted file mode 100644
index 410c6f2e280f..000000000000
Binary files a/Documentation/Books/AQL/Graphs/traversal_graph5.png and /dev/null differ
diff --git a/Documentation/Books/AQL/Graphs/traversal_graph6.png b/Documentation/Books/AQL/Graphs/traversal_graph6.png
deleted file mode 100644
index faa0ab98cb5c..000000000000
Binary files a/Documentation/Books/AQL/Graphs/traversal_graph6.png and /dev/null differ
diff --git a/Documentation/Books/AQL/Graphs/traversal_graph7.png b/Documentation/Books/AQL/Graphs/traversal_graph7.png
deleted file mode 100644
index ca633dda1057..000000000000
Binary files a/Documentation/Books/AQL/Graphs/traversal_graph7.png and /dev/null differ
diff --git a/Documentation/Books/AQL/Invocation/README.md b/Documentation/Books/AQL/Invocation/README.md
deleted file mode 100644
index 2328af293126..000000000000
--- a/Documentation/Books/AQL/Invocation/README.md
+++ /dev/null
@@ -1,27 +0,0 @@
-How to invoke AQL
-=================
-
-AQL queries can be executed using:
-
-- the web interface,
-- the `db` object (either in arangosh or in a Foxx service)
-- or the raw HTTP API.
-
-There are always calls to the server's API under the hood, but the web interface
-and the `db` object abstract away the low-level communication details and are
-thus easier to use.
-
-The ArangoDB Web Interface has a [specific tab for AQL queries execution](../Invocation/WithWebInterface.md).
-
-You can run [AQL queries from the ArangoDB Shell](../Invocation/WithArangosh.md)
-with the [_query](WithArangosh.html#with-dbquery) and
-[_createStatement](WithArangosh.html#with-createstatement-arangostatement) methods
-of the [`db` object](../../Manual/Appendix/References/DBObject.html). This chapter
-also describes how to use bind parameters, statistics, counting and cursors with
-arangosh.
-
-If you are using Foxx, see [how to write database queries](../../Manual/Foxx/GettingStarted.html#writing-database-queries)
-for examples including tagged template strings.
-
-If you want to run AQL queries from your application via the HTTP REST API,
-see the full API description at [HTTP Interface for AQL Query Cursors](../../HTTP/AqlQueryCursor/index.html).
diff --git a/Documentation/Books/AQL/Invocation/WithArangosh.md b/Documentation/Books/AQL/Invocation/WithArangosh.md
deleted file mode 100644
index 38c8631a641c..000000000000
--- a/Documentation/Books/AQL/Invocation/WithArangosh.md
+++ /dev/null
@@ -1,375 +0,0 @@
-Executing queries from Arangosh
-===============================
-
-Within the ArangoDB shell, the *_query* and *_createStatement* methods of the
-*db* object can be used to execute AQL queries. This chapter also describes
-how to use bind parameters, counting, statistics and cursors.
-
-With db._query
---------------
-
-One can execute queries with the *_query* method of the *db* object.
-This will run the specified query in the context of the currently
-selected database and return the query results in a cursor. The results of the cursor
-can be printed using its *toArray* method:
-
- @startDocuBlockInline 01_workWithAQL_all
- @EXAMPLE_ARANGOSH_OUTPUT{01_workWithAQL_all}
- ~addIgnoreCollection("mycollection")
- db._create("mycollection")
- db.mycollection.save({ _key: "testKey", Hello : "World" })
- db._query('FOR my IN mycollection RETURN my._key').toArray()
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock 01_workWithAQL_all
-
-### db._query Bind parameters
-
-To pass bind parameters into a query, they can be specified as second argument to the
-*_query* method:
-
- @startDocuBlockInline 02_workWithAQL_bindValues
- @EXAMPLE_ARANGOSH_OUTPUT{02_workWithAQL_bindValues}
- |db._query(
- | 'FOR c IN @@collection FILTER c._key == @key RETURN c._key', {
- | '@collection': 'mycollection',
- | 'key': 'testKey'
- }).toArray();
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock 02_workWithAQL_bindValues
-
-### ES6 template strings
-
-It is also possible to use ES6 template strings for generating AQL queries. There is
-a template string generator function named *aql*; we call it once to demonstrate
-its result, and once putting it directly into the query:
-
-```js
-var key = 'testKey';
-aql`FOR c IN mycollection FILTER c._key == ${key} RETURN c._key`;
-{
- "query" : "FOR c IN mycollection FILTER c._key == @value0 RETURN c._key",
- "bindVars" : {
- "value0" : "testKey"
- }
-}
-```
-
- @startDocuBlockInline 02_workWithAQL_aqlQuery
- @EXAMPLE_ARANGOSH_OUTPUT{02_workWithAQL_aqlQuery}
- var key = 'testKey';
- |db._query(
- | aql`FOR c IN mycollection FILTER c._key == ${key} RETURN c._key`
- ).toArray();
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock 02_workWithAQL_aqlQuery
-
-Arbitrary JavaScript expressions can be used in queries that are generated with the
-*aql* template string generator. Collection objects are handled automatically:
-
- @startDocuBlockInline 02_workWithAQL_aqlCollectionQuery
- @EXAMPLE_ARANGOSH_OUTPUT{02_workWithAQL_aqlCollectionQuery}
- var key = 'testKey';
- |db._query(aql`FOR doc IN ${ db.mycollection } RETURN doc`
- ).toArray();
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock 02_workWithAQL_aqlCollectionQuery
-
-Note: data-modification AQL queries normally do not return a result (unless the AQL query
-contains an extra *RETURN* statement). When not using a *RETURN* statement in the query, the
-*toArray* method will return an empty array.
-
-### Statistics and extra Information
-
-It is always possible to retrieve statistics for a query with the *getExtra* method:
-
- @startDocuBlockInline 03_workWithAQL_getExtra
- @EXAMPLE_ARANGOSH_OUTPUT{03_workWithAQL_getExtra}
- |db._query(`FOR i IN 1..100
- | INSERT { _key: CONCAT('test', TO_STRING(i)) }
- | INTO mycollection`
- ).getExtra();
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock 03_workWithAQL_getExtra
-
-The meaning of the statistics values is described in [Execution statistics](../ExecutionAndPerformance/QueryStatistics.md).
-You also will find warnings in here; If you're designing queries on the shell be sure to also look at it.
-
-### Setting a memory limit
-
-To set a memory limit for the query, pass *options* to the *_query* method.
-The memory limit specifies the maximum number of bytes that the query is
-allowed to use. When a single AQL query reaches the specified limit value,
-the query will be aborted with a *resource limit exceeded* exception. In a
-cluster, the memory accounting is done per shard, so the limit value is
-effectively a memory limit per query per shard.
-
- @startDocuBlockInline 02_workWithAQL_memoryLimit
- @EXAMPLE_ARANGOSH_OUTPUT{02_workWithAQL_memoryLimit}
- |db._query(
- | 'FOR i IN 1..100000 SORT i RETURN i', {}, {
- | memoryLimit: 100000
- }).toArray(); // xpError(ERROR_RESOURCE_LIMIT)
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock 02_workWithAQL_memoryLimit
-
-If no memory limit is specified, then the server default value (controlled by
-startup option *--query.memory-limit* will be used for restricting the maximum amount
-of memory the query can use. A memory limit value of *0* means that the maximum
-amount of memory for the query is not restricted.
-
-### Setting options
-
-There are further options that can be passed in the *options* attribute of the *_query* method:
-
-- *failOnWarning*: when set to *true*, this will make the query throw an exception and
- abort in case a warning occurs. This option should be used in development to catch
- errors early. If set to *false*, warnings will not be propagated to exceptions and
- will be returned with the query results. There is also a server configuration option
- `--query.fail-on-warning` for setting the default value for *failOnWarning* so it does
- not need to be set on a per-query level.
-
-- *cache*: if set to *true*, this will put the query result into the query result cache
- if the query result is eligible for caching and the query cache is running in demand
- mode. If set to *false*, the query result will not be inserted into the query result
- cache. Note that query results will never be inserted into the query result cache if
- the query result cache is disabled, and that they will be automatically inserted into
- the query result cache when it is active in non-demand mode.
-
-- *profile*: if set to *true* or *1*, returns extra timing information for the query. The timing
- information is accessible via the *getExtra* method of the query result. Set to *2* the query will include execution stats per query plan node in sub-attribute *stats.nodes* of the *extra* return attribute.
- Additionally the query plan is returned in the sub-attribute *extra.plan*.
-
-- *maxWarningCount*: limits the number of warnings that are returned by the query if
- *failOnWarning* is not set to *true*. The default value is *10*.
-
-- *maxNumberOfPlans*: limits the number of query execution plans the optimizer will
- create at most. Reducing the number of query execution plans may speed up query plan
- creation and optimization for complex queries, but normally there is no need to adjust
- this value.
-
-- *stream*: Specify *true* and the query will be executed in a **streaming** fashion. The query result is
- not stored on the server, but calculated on the fly. *Beware*: long-running queries will
- need to hold the collection locks for as long as the query cursor exists. It is advisable
- to *only* use this option on short-running queries *or* without exclusive locks (write locks on MMFiles).
- When set to *false* the query will be executed right away in its entirety.
- In that case query results are either returned right away (if the result set is small enough),
- or stored on the arangod instance and accessible via the cursor API.
-
- Please note that the query options `cache`, `count` and `fullCount` will not work on streaming
- queries. Additionally query statistics, warnings and profiling data will only be available
- after the query is finished.
- The default value is *false*
-
-The following additional attributes can be passed to queries in the RocksDB storage engine:
-
-- *maxTransactionSize*: transaction size limit in bytes
-
-- *intermediateCommitSize*: maximum total size of operations after which an intermediate
- commit is performed automatically
-
-- *intermediateCommitCount*: maximum number of operations after which an intermediate
- commit is performed automatically
-
-In the ArangoDB Enterprise Edition there is an additional parameter:
-
-- *skipInaccessibleCollections* AQL queries (especially graph traversals) will treat
- collection to which a user has **no access** rights as if these collections were empty.
- Instead of returning a *forbidden access* error, your queries will execute normally.
- This is intended to help with certain use-cases: A graph contains several collections
- and different users execute AQL queries on that graph. You can now naturally limit the
- accessible results by changing the access rights of users on collections.
-
-With _createStatement (ArangoStatement)
----------------------------------------
-
-The *_query* method is a shorthand for creating an ArangoStatement object,
-executing it and iterating over the resulting cursor. If more control over the
-result set iteration is needed, it is recommended to first create an
-ArangoStatement object as follows:
-
- @startDocuBlockInline 04_workWithAQL_statements1
- @EXAMPLE_ARANGOSH_OUTPUT{04_workWithAQL_statements1}
- |stmt = db._createStatement( {
- "query": "FOR i IN [ 1, 2 ] RETURN i * 2" } );
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock 04_workWithAQL_statements1
-
-To execute the query, use the *execute* method of the statement:
-
- @startDocuBlockInline 05_workWithAQL_statements2
- @EXAMPLE_ARANGOSH_OUTPUT{05_workWithAQL_statements2}
- ~var stmt = db._createStatement( { "query": "FOR i IN [ 1, 2 ] RETURN i * 2" } );
- c = stmt.execute();
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock 05_workWithAQL_statements2
-
-### Cursors
-
-Once the query executed the query results are available in a cursor.
-The cursor can return all its results at once using the *toArray* method.
-This is a short-cut that you can use if you want to access the full result
-set without iterating over it yourself.
-
- @startDocuBlockInline 05_workWithAQL_statements3
- @EXAMPLE_ARANGOSH_OUTPUT{05_workWithAQL_statements3}
- ~var stmt = db._createStatement( { "query": "FOR i IN [ 1, 2 ] RETURN i * 2" } );
- ~var c = stmt.execute();
- c.toArray();
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock 05_workWithAQL_statements3
-
-
-
-Cursors can also be used to iterate over the result set document-by-document.
-To do so, use the *hasNext* and *next* methods of the cursor:
-
- @startDocuBlockInline 05_workWithAQL_statements4
- @EXAMPLE_ARANGOSH_OUTPUT{05_workWithAQL_statements4}
- ~var stmt = db._createStatement( { "query": "FOR i IN [ 1, 2 ] RETURN i * 2" } );
- ~var c = stmt.execute();
- while (c.hasNext()) { require("@arangodb").print(c.next()); }
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock 05_workWithAQL_statements4
-
-Please note that you can iterate over the results of a cursor only once, and that
-the cursor will be empty when you have fully iterated over it. To iterate over
-the results again, the query needs to be re-executed.
-
-Additionally, the iteration can be done in a forward-only fashion. There is no
-backwards iteration or random access to elements in a cursor.
-
-### ArangoStatement parameters binding
-
-To execute an AQL query using bind parameters, you need to create a statement first
-and then bind the parameters to it before execution:
-
- @startDocuBlockInline 05_workWithAQL_statements5
- @EXAMPLE_ARANGOSH_OUTPUT{05_workWithAQL_statements5}
- |var stmt = db._createStatement( {
- "query": "FOR i IN [ @one, @two ] RETURN i * 2" } );
- stmt.bind("one", 1);
- stmt.bind("two", 2);
- c = stmt.execute();
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock 05_workWithAQL_statements5
-
-The cursor results can then be dumped or iterated over as usual, e.g.:
-
- @startDocuBlockInline 05_workWithAQL_statements6
- @EXAMPLE_ARANGOSH_OUTPUT{05_workWithAQL_statements6}
- ~var stmt = db._createStatement( { "query": "FOR i IN [ @one, @two ] RETURN i * 2" } );
- ~stmt.bind("one", 1);
- ~stmt.bind("two", 2);
- ~var c = stmt.execute();
- c.toArray();
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock 05_workWithAQL_statements6
-
-or
-
- @startDocuBlockInline 05_workWithAQL_statements7
- @EXAMPLE_ARANGOSH_OUTPUT{05_workWithAQL_statements7}
- ~var stmt = db._createStatement( { "query": "FOR i IN [ @one, @two ] RETURN i * 2" } );
- ~stmt.bind("one", 1);
- ~stmt.bind("two", 2);
- ~var c = stmt.execute();
- while (c.hasNext()) { require("@arangodb").print(c.next()); }
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock 05_workWithAQL_statements7
-
-Please note that bind parameters can also be passed into the *_createStatement* method directly,
-making it a bit more convenient:
-
- @startDocuBlockInline 05_workWithAQL_statements8
- @EXAMPLE_ARANGOSH_OUTPUT{05_workWithAQL_statements8}
- |stmt = db._createStatement( {
- | "query": "FOR i IN [ @one, @two ] RETURN i * 2",
- | "bindVars": {
- | "one": 1,
- | "two": 2
- | }
- } );
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock 05_workWithAQL_statements8
-
-### Counting with a cursor
-
-Cursors also optionally provide the total number of results. By default, they do not.
-To make the server return the total number of results, you may set the *count* attribute to
-*true* when creating a statement:
-
- @startDocuBlockInline 05_workWithAQL_statements9
- @EXAMPLE_ARANGOSH_OUTPUT{05_workWithAQL_statements9}
- |stmt = db._createStatement( {
- | "query": "FOR i IN [ 1, 2, 3, 4 ] RETURN i",
- "count": true } );
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock 05_workWithAQL_statements9
-
-After executing this query, you can use the *count* method of the cursor to get the
-number of total results from the result set:
-
- @startDocuBlockInline 05_workWithAQL_statements10
- @EXAMPLE_ARANGOSH_OUTPUT{05_workWithAQL_statements10}
- ~var stmt = db._createStatement( { "query": "FOR i IN [ 1, 2, 3, 4 ] RETURN i", "count": true } );
- var c = stmt.execute();
- c.count();
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock 05_workWithAQL_statements10
-
-Please note that the *count* method returns nothing if you did not specify the *count*
-attribute when creating the query.
-
-This is intentional so that the server may apply optimizations when executing the query and
-construct the result set incrementally. Incremental creation of the result sets
-is no possible
-if all of the results need to be shipped to the client anyway. Therefore, the client
-has the choice to specify *count* and retrieve the total number of results for a query (and
-disable potential incremental result set creation on the server), or to not retrieve the total
-number of results and allow the server to apply optimizations.
-
-Please note that at the moment the server will always create the full result set for each query so
-specifying or omitting the *count* attribute currently does not have any impact on query execution.
-This may change in the future. Future versions of ArangoDB may create result sets incrementally
-on the server-side and may be able to apply optimizations if a result set is not fully fetched by
-a client.
-
-
-### Using cursors to obtain additional information on internal timings
-
-Cursors can also optionally provide statistics of the internal execution phases. By default, they do not.
-To get to know how long parsing, optimization, instantiation and execution took,
-make the server return that by setting the *profile* attribute to
-*true* when creating a statement:
-
- @startDocuBlockInline 06_workWithAQL_statements11
- @EXAMPLE_ARANGOSH_OUTPUT{06_workWithAQL_statements11}
- |stmt = db._createStatement( {
- | "query": "FOR i IN [ 1, 2, 3, 4 ] RETURN i",
- options: {"profile": true}} );
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock 06_workWithAQL_statements11
-
-After executing this query, you can use the *getExtra()* method of the cursor to get the
-produced statistics:
-
- @startDocuBlockInline 06_workWithAQL_statements12
- @EXAMPLE_ARANGOSH_OUTPUT{06_workWithAQL_statements12}
- ~var stmt = db._createStatement( { "query": "FOR i IN [ 1, 2, 3, 4 ] RETURN i", options: {"profile": true}} );
- var c = stmt.execute();
- c.getExtra();
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock 06_workWithAQL_statements12
-
-Query validation
-----------------
-
-The *_parse* method of the *db* object can be used to parse and validate a
-query syntactically, without actually executing it.
-
- @startDocuBlockInline 06_workWithAQL_statements13
- @EXAMPLE_ARANGOSH_OUTPUT{06_workWithAQL_statements13}
- db._parse( "FOR i IN [ 1, 2 ] RETURN i" );
- @END_EXAMPLE_ARANGOSH_OUTPUT
- @endDocuBlock 06_workWithAQL_statements13
-
diff --git a/Documentation/Books/AQL/Invocation/WithWebInterface.md b/Documentation/Books/AQL/Invocation/WithWebInterface.md
deleted file mode 100644
index c764c3697019..000000000000
--- a/Documentation/Books/AQL/Invocation/WithWebInterface.md
+++ /dev/null
@@ -1,49 +0,0 @@
-AQL with ArangoDB Web Interface
-===============================
-
-
-In the ArangoDB Web Interface the AQL Editor tab allows to execute ad-hoc AQL
-queries.
-
-Type in a query in the main box and execute it by pressing the *Execute* button.
-The query result will be shown in another tab. The editor provides a few example
-queries that can be used as templates.
-
-It also provides a feature to explain a query and inspect its execution plan
-(with the *Explain* button).
-
-Bind parameters can be defined in the right-hand side pane. The format is the
-same as used for bind parameters in the HTTP REST API and in (JavaScript)
-application code.
-
-Here is an example:
-
-```js
-FOR doc IN @@collection
- FILTER CONTAINS(LOWER(doc.author), @search, false)
- RETURN { "name": doc.name, "descr": doc.description, "author": doc.author }
-```
-
-Bind parameters (table view mode):
-
-| Key | Value |
-|-------------|--------|
-| @collection | _apps |
-| search | arango |
-
-Bind parameters (JSON view mode):
-
-```json
-{
- "@collection": "_apps",
- "search": "arango"
-}
-```
-
-How bind parameters work can be found in [AQL Fundamentals](../Fundamentals/BindParameters.md).
-
-Queries can also be saved in the AQL editor along with their bind parameter values
-for later reuse. This data is stored in the user profile in the current database
-(in the *_users* system table).
-
-Also see the detailed description of the [Web Interface](../../Manual/Programs/WebInterface/index.html).
diff --git a/Documentation/Books/AQL/Operations/Collect.md b/Documentation/Books/AQL/Operations/Collect.md
deleted file mode 100644
index 682427a7c6e4..000000000000
--- a/Documentation/Books/AQL/Operations/Collect.md
+++ /dev/null
@@ -1,338 +0,0 @@
-COLLECT
-=======
-
-The *COLLECT* keyword can be used to group an array by one or multiple group
-criteria.
-
-The *COLLECT* statement will eliminate all local variables in the current
-scope. After *COLLECT* only the variables introduced by *COLLECT* itself are
-available.
-
-The general syntaxes for *COLLECT* are:
-
-```
-COLLECT variableName = expression options
-COLLECT variableName = expression INTO groupsVariable options
-COLLECT variableName = expression INTO groupsVariable = projectionExpression options
-COLLECT variableName = expression INTO groupsVariable KEEP keepVariable options
-COLLECT variableName = expression WITH COUNT INTO countVariable options
-COLLECT variableName = expression AGGREGATE variableName = aggregateExpression options
-COLLECT AGGREGATE variableName = aggregateExpression options
-COLLECT WITH COUNT INTO countVariable options
-```
-
-`options` is optional in all variants.
-
-Grouping syntaxes
------------------
-
-The first syntax form of *COLLECT* only groups the result by the defined group
-criteria specified in *expression*. In order to further process the results
-produced by *COLLECT*, a new variable (specified by *variableName*) is introduced.
-This variable contains the group value.
-
-Here's an example query that find the distinct values in *u.city* and makes
-them available in variable *city*:
-
-```
-FOR u IN users
- COLLECT city = u.city
- RETURN {
- "city" : city
- }
-```
-
-The second form does the same as the first form, but additionally introduces a
-variable (specified by *groupsVariable*) that contains all elements that fell into the
-group. This works as follows: The *groupsVariable* variable is an array containing
-as many elements as there are in the group. Each member of that array is
-a JSON object in which the value of every variable that is defined in the
-AQL query is bound to the corresponding attribute. Note that this considers
-all variables that are defined before the *COLLECT* statement, but not those on
-the top level (outside of any *FOR*), unless the *COLLECT* statement is itself
-on the top level, in which case all variables are taken. Furthermore note
-that it is possible that the optimizer moves *LET* statements out of *FOR*
-statements to improve performance.
-
-```
-FOR u IN users
- COLLECT city = u.city INTO groups
- RETURN {
- "city" : city,
- "usersInCity" : groups
- }
-```
-
-In the above example, the array *users* will be grouped by the attribute
-*city*. The result is a new array of documents, with one element per distinct
-*u.city* value. The elements from the original array (here: *users*) per city are
-made available in the variable *groups*. This is due to the *INTO* clause.
-
-*COLLECT* also allows specifying multiple group criteria. Individual group
-criteria can be separated by commas:
-
-```
-FOR u IN users
- COLLECT country = u.country, city = u.city INTO groups
- RETURN {
- "country" : country,
- "city" : city,
- "usersInCity" : groups
- }
-```
-
-In the above example, the array *users* is grouped by country first and then
-by city, and for each distinct combination of country and city, the users
-will be returned.
-
-
-Discarding obsolete variables
------------------------------
-
-The third form of *COLLECT* allows rewriting the contents of the *groupsVariable*
-using an arbitrary *projectionExpression*:
-
-```
-FOR u IN users
- COLLECT country = u.country, city = u.city INTO groups = u.name
- RETURN {
- "country" : country,
- "city" : city,
- "userNames" : groups
- }
-```
-
-In the above example, only the *projectionExpression* is *u.name*. Therefore,
-only this attribute is copied into the *groupsVariable* for each document.
-This is probably much more efficient than copying all variables from the scope into
-the *groupsVariable* as it would happen without a *projectionExpression*.
-
-The expression following *INTO* can also be used for arbitrary computations:
-
-```
-FOR u IN users
- COLLECT country = u.country, city = u.city INTO groups = {
- "name" : u.name,
- "isActive" : u.status == "active"
- }
- RETURN {
- "country" : country,
- "city" : city,
- "usersInCity" : groups
- }
-```
-
-*COLLECT* also provides an optional *KEEP* clause that can be used to control
-which variables will be copied into the variable created by `INTO`. If no
-*KEEP* clause is specified, all variables from the scope will be copied as
-sub-attributes into the *groupsVariable*.
-This is safe but can have a negative impact on performance if there
-are many variables in scope or the variables contain massive amounts of data.
-
-The following example limits the variables that are copied into the *groupsVariable*
-to just *name*. The variables *u* and *someCalculation* also present in the scope
-will not be copied into *groupsVariable* because they are not listed in the *KEEP* clause:
-
-```
-FOR u IN users
- LET name = u.name
- LET someCalculation = u.value1 + u.value2
- COLLECT city = u.city INTO groups KEEP name
- RETURN {
- "city" : city,
- "userNames" : groups[*].name
- }
-```
-
-*KEEP* is only valid in combination with *INTO*. Only valid variable names can
-be used in the *KEEP* clause. *KEEP* supports the specification of multiple
-variable names.
-
-
-Group length calculation
-------------------------
-
-*COLLECT* also provides a special *WITH COUNT* clause that can be used to
-determine the number of group members efficiently.
-
-The simplest form just returns the number of items that made it into the
-*COLLECT*:
-
-```
-FOR u IN users
- COLLECT WITH COUNT INTO length
- RETURN length
-```
-
-The above is equivalent to, but less efficient than:
-
-```
-RETURN LENGTH(users)
-```
-
-The *WITH COUNT* clause can also be used to efficiently count the number
-of items in each group:
-
-```
-FOR u IN users
- COLLECT age = u.age WITH COUNT INTO length
- RETURN {
- "age" : age,
- "count" : length
- }
-```
-
-Note: the *WITH COUNT* clause can only be used together with an *INTO* clause.
-
-
-Aggregation
------------
-
-A `COLLECT` statement can be used to perform aggregation of data per group. To
-only determine group lengths, the `WITH COUNT INTO` variant of `COLLECT` can be
-used as described before.
-
-For other aggregations, it is possible to run aggregate functions on the `COLLECT`
-results:
-
-```
-FOR u IN users
- COLLECT ageGroup = FLOOR(u.age / 5) * 5 INTO g
- RETURN {
- "ageGroup" : ageGroup,
- "minAge" : MIN(g[*].u.age),
- "maxAge" : MAX(g[*].u.age)
- }
-```
-
-The above however requires storing all group values during the collect operation for
-all groups, which can be inefficient.
-
-The special `AGGREGATE` variant of `COLLECT` allows building the aggregate values
-incrementally during the collect operation, and is therefore often more efficient.
-
-With the `AGGREGATE` variant the above query becomes:
-
-```
-FOR u IN users
- COLLECT ageGroup = FLOOR(u.age / 5) * 5
- AGGREGATE minAge = MIN(u.age), maxAge = MAX(u.age)
- RETURN {
- ageGroup,
- minAge,
- maxAge
- }
-```
-
-The `AGGREGATE` keyword can only be used after the `COLLECT` keyword. If used, it
-must directly follow the declaration of the grouping keys. If no grouping keys
-are used, it must follow the `COLLECT` keyword directly:
-
-
-```
-FOR u IN users
- COLLECT AGGREGATE minAge = MIN(u.age), maxAge = MAX(u.age)
- RETURN {
- minAge,
- maxAge
- }
-```
-
-Only specific expressions are allowed on the right-hand side of each `AGGREGATE`
-assignment:
-
-- on the top level, an aggregate expression must be a call to one of the supported
- aggregation functions `LENGTH`, `MIN`, `MAX`, `SUM`, `AVERAGE`, `STDDEV_POPULATION`,
- `STDDEV_SAMPLE`, `VARIANCE_POPULATION`, `VARIANCE_SAMPLE`, `UNIQUE`, `SORTED_UNIQUE`
- or `COUNT_DISTINCT`. The following aliases are allowed too: `COUNT` (for `LENGTH`),
- `AVG` (for `AVERAGE`), `STDDEV` (for `STDDEV_POPULATION`), `VARIANCE` (for `VARIANCE_POPULATION`),
- `COUNT_UNIQUE` (for `COUNT_DISTINCT`).
-
-- an aggregate expression must not refer to variables introduced by the `COLLECT` itself
-
-
-COLLECT variants
-----------------
-
-Since ArangoDB 2.6, there are two variants of *COLLECT* that the optimizer can
-choose from: the *sorted* variant and the *hash* variant. The *hash* variant only becomes a
-candidate for *COLLECT* statements that do not use an *INTO* clause.
-
-The optimizer will always generate a plan that employs the *sorted* method. The *sorted* method
-requires its input to be sorted by the group criteria specified in the *COLLECT* clause.
-To ensure correctness of the result, the AQL optimizer will automatically insert a *SORT*
-statement into the query in front of the *COLLECT* statement. The optimizer may be able to
-optimize away that *SORT* statement later if a sorted index is present on the group criteria.
-
-In case a *COLLECT* statement qualifies for using the *hash* variant, the optimizer will create an extra
-plan for it at the beginning of the planning phase. In this plan, no extra *SORT* statement will be
-added in front of the *COLLECT*. This is because the *hash* variant of *COLLECT* does not require
-sorted input. Instead, a *SORT* statement will be added after the *COLLECT* to sort its output.
-This *SORT* statement may be optimized away again in later stages.
-If the sort order of the *COLLECT* is irrelevant to the user, adding the extra instruction *SORT null*
-after the *COLLECT* will allow the optimizer to remove the sorts altogether:
-
-```
-FOR u IN users
- COLLECT age = u.age
- SORT null /* note: will be optimized away */
- RETURN age
-```
-
-Which *COLLECT* variant is used by the optimizer depends on the optimizer's cost estimations. The
-created plans with the different *COLLECT* variants will be shipped through the regular optimization
-pipeline. In the end, the optimizer will pick the plan with the lowest estimated total cost as usual.
-
-In general, the *sorted* variant of *COLLECT* should be preferred in cases when there is a sorted index
-present on the group criteria. In this case the optimizer can eliminate the *SORT* statement in front
-of the *COLLECT*, so that no *SORT* will be left.
-
-If there is no sorted index available on the group criteria, the up-front sort required by the *sorted*
-variant can be expensive. In this case it is likely that the optimizer will prefer the *hash* variant
-of *COLLECT*, which does not require its input to be sorted.
-
-Which variant of *COLLECT* was actually used can be figured out by looking into the execution plan of
-a query, specifically the *AggregateNode* and its *aggregationOptions* attribute.
-
-
-Setting COLLECT options
------------------------
-
-*options* can be used in a *COLLECT* statement to inform the optimizer about the preferred *COLLECT*
-method. When specifying the following appendix to a *COLLECT* statement, the optimizer will always use
-the *sorted* variant of *COLLECT* and not even create a plan using the *hash* variant:
-
-```
-OPTIONS { method: "sorted" }
-```
-
-It is also possible to specify *hash* as the preferred method. In this case the optimizer will create
-a plan using the *hash* method only if the COLLECT statement qualifies (not all COLLECT statements
-can use the *hash* method). In case the COLLECT statement qualifies, there will be only a one plan
-that uses the *hash* method. If it does not qualify, the optimizer will use the *sorted* method.
-
-If no method is specified, then the optimizer will create a plan that uses the *sorted* method, and
-an additional plan using the *hash* method if the COLLECT statement qualifies for it.
-
-
-COLLECT vs. RETURN DISTINCT
----------------------------
-
-In order to make a result set unique, one can either use *COLLECT* or *RETURN DISTINCT*. Behind the
-scenes, both variants will work by creating an *AggregateNode*. For both variants, the optimizer
-may try the sorted and the hashed variant of *COLLECT*. The difference is therefore mainly syntactical,
-with *RETURN DISTINCT* saving a bit of typing when compared to an equivalent *COLLECT*:
-
-```
-FOR u IN users
- RETURN DISTINCT u.age
-```
-
-```
-FOR u IN users
- COLLECT age = u.age
- RETURN age
-```
-
-However, *COLLECT* is vastly more flexible than *RETURN DISTINCT*. Additionally, the order of results is
-undefined for a *RETURN DISTINCT*, whereas for a *COLLECT* the results will be sorted.
diff --git a/Documentation/Books/AQL/Operations/Filter.md b/Documentation/Books/AQL/Operations/Filter.md
deleted file mode 100644
index 597bcea1c724..000000000000
--- a/Documentation/Books/AQL/Operations/Filter.md
+++ /dev/null
@@ -1,111 +0,0 @@
-FILTER
-======
-
-The *FILTER* statement can be used to restrict the results to elements that
-match an arbitrary logical condition.
-
-General syntax
---------------
-
-```
-FILTER condition
-```
-
-*condition* must be a condition that evaluates to either *false* or *true*. If
-the condition result is false, the current element is skipped, so it will not be
-processed further and not be part of the result. If the condition is true, the
-current element is not skipped and can be further processed.
-See [Operators](../Operators.md) for a list of comparison operators, logical
-operators etc. that you can use in conditions.
-
-```
-FOR u IN users
- FILTER u.active == true && u.age < 39
- RETURN u
-```
-
-It is allowed to specify multiple *FILTER* statements in a query, even in
-the same block. If multiple *FILTER* statements are used, their results will be
-combined with a logical AND, meaning all filter conditions must be true to
-include an element.
-
-```
-FOR u IN users
- FILTER u.active == true
- FILTER u.age < 39
- RETURN u
-```
-
-In the above example, all array elements of *users* that have an attribute
-*active* with value *true* and that have an attribute *age* with a value less
-than *39* (including *null* ones) will be included in the result. All other
-elements of *users* will be skipped and not be included in the result produced
-by *RETURN*. You may refer to the chapter [Accessing Data from Collections](../Fundamentals/DocumentData.md)
-for a description of the impact of non-existent or null attributes.
-
-Order of operations
--------------------
-
-Note that the positions of *FILTER* statements can influence the result of a query.
-There are 16 active users in the [test data](../Examples/README.md#example-data)
-for instance:
-
-```js
-FOR u IN users
- FILTER u.active == true
- RETURN u
-```
-
-We can limit the result set to 5 users at most:
-
-```js
-FOR u IN users
- FILTER u.active == true
- LIMIT 5
- RETURN u
-```
-
-This may return the user documents of Jim, Diego, Anthony, Michael and Chloe for
-instance. Which ones are returned is undefined, since there is no *SORT* statement
-to ensure a particular order. If we add a second *FILTER* statement to only return
-women...
-
-```js
-FOR u IN users
- FILTER u.active == true
- LIMIT 5
- FILTER u.gender == "f"
- RETURN u
-```
-
-... it might just return the Chloe document, because the *LIMIT* is applied before
-the second *FILTER*. No more than 5 documents arrive at the second *FILTER* block,
-and not all of them fulfill the gender criterion, eventhough there are more than
-5 active female users in the collection. A more deterministic result can be achieved
-by adding a *SORT* block:
-
-```js
-FOR u IN users
- FILTER u.active == true
- SORT u.age ASC
- LIMIT 5
- FILTER u.gender == "f"
- RETURN u
-```
-
-This will return the users Mariah and Mary. If sorted by age in *DESC* order,
-then the Sophia, Emma and Madison documents are returned. A *FILTER* after a
-*LIMIT* is not very common however, and you probably want such a query instead:
-
-```js
-FOR u IN users
- FILTER u.active == true AND u.gender == "f"
- SORT u.age ASC
- LIMIT 5
- RETURN u
-```
-
-The significance of where *FILTER* blocks are placed allows that this single
-keyword can assume the roles of two SQL keywords, *WHERE* as well as *HAVING*.
-AQL's *FILTER* thus works with *COLLECT* aggregates the same as with any other
-intermediate result, document attribute etc.
diff --git a/Documentation/Books/AQL/Operations/For.md b/Documentation/Books/AQL/Operations/For.md
deleted file mode 100644
index 7f739225f5e1..000000000000
--- a/Documentation/Books/AQL/Operations/For.md
+++ /dev/null
@@ -1,110 +0,0 @@
-FOR
-===
-
-
-The *FOR* keyword can be to iterate over all elements of an array.
-The general syntax is:
-
-```js
-FOR variableName IN expression
-```
-
-There is also a special variant for graph traversals:
-
-```js
-FOR vertexVariableName, edgeVariableName, pathVariableName IN traversalExpression
-```
-
-For this special case see [the graph traversals chapter](../Graphs/Traversals.md).
-
-For views, there is a special (optional) `SEARCH` keyword:
-
-```js
-FOR variableName IN viewName SEARCH searchExpression
-```
-
-Details can be found in [the views chapter](../Views/README.md).
-
-
-For all other cases read on:
-
-Each array element returned by *expression* is visited exactly once. It is
-required that *expression* returns an array in all cases. The empty array is
-allowed, too. The current array element is made available for further processing
-in the variable specified by *variableName*.
-
-```js
-FOR u IN users
- RETURN u
-```
-
-This will iterate over all elements from the array *users* (note: this array
-consists of all documents from the collection named "users" in this case) and
-make the current array element available in variable *u*. *u* is not modified in
-this example but simply pushed into the result using the *RETURN* keyword.
-
-Note: When iterating over collection-based arrays as shown here, the order of
-documents is undefined unless an explicit sort order is defined using a *SORT*
-statement.
-
-The variable introduced by *FOR* is available until the scope the *FOR* is
-placed in is closed.
-
-Another example that uses a statically declared array of values to iterate over:
-
-```js
-FOR year IN [ 2011, 2012, 2013 ]
- RETURN { "year" : year, "isLeapYear" : year % 4 == 0 && (year % 100 != 0 || year % 400 == 0) }
-```
-
-Nesting of multiple *FOR* statements is allowed, too. When *FOR* statements are
-nested, a cross product of the array elements returned by the individual *FOR*
-statements will be created.
-
-```js
-FOR u IN users
- FOR l IN locations
- RETURN { "user" : u, "location" : l }
-```
-
-In this example, there are two array iterations: an outer iteration over the array
-*users* plus an inner iteration over the array *locations*. The inner array is
-traversed as many times as there are elements in the outer array. For each
-iteration, the current values of *users* and *locations* are made available for
-further processing in the variable *u* and *l*.
-
-## Options
-
-For collections and views, the `FOR` construct supports an optional `OPTIONS`
-suffix to modify behavior. The general syntax is:
-
-```js
-FOR variableName IN expression OPTIONS {option: value, ...}
-```
-
-### Index hints
-
-For collections, index hints are provided though this inline options mechanism.
-Hints can be specified in two different formats.
-
-The first format option is the simplest, just a single index name. This should
-be sufficient for many cases. Whenever there is a choice to potentially use an
-index for this `FOR` loop, the optimizer will first check if the specified index
-can be used. If so, it will use it, regardless of whether it would normally use
-a different index. If it cannot use that index, then it will fall back to its
-normal logic to select another index. If the optional `forceIndexHint: true` is
-specified, then it will not fall back, and instead generate an error.
-
-```js
-OPTIONS {indexHint: 'byName'[, forceIndexHint: ]}
-```
-
-The second is an array of index names, in order of preference. When specified
-this way, the optimizer will behave much in the same way as above, but will
-check the feasibility of each of the specified indices, in the order they are
-given, falling back to its normal logic or failing only if none of the specified
-indices are feasible.
-
-```js
-OPTIONS {indexHint: ['byName', 'byColor'][, forceIndexHint: ]}
-```
diff --git a/Documentation/Books/AQL/Operations/Insert.md b/Documentation/Books/AQL/Operations/Insert.md
deleted file mode 100644
index 7719945ab542..000000000000
--- a/Documentation/Books/AQL/Operations/Insert.md
+++ /dev/null
@@ -1,143 +0,0 @@
-INSERT
-======
-
-The *INSERT* keyword can be used to insert new documents into a collection. On a
-single server, an insert operation is executed transactionally in an all-or-nothing
-fashion.
-
-If the RocksDB engine is used and intermediate commits are enabled, a query may
-execute intermediate transaction commits in case the running transaction (AQL
-query) hits the specified size thresholds. In this case, the query's operations
-carried out so far will be committed and not rolled back in case of a later abort/rollback.
-That behavior can be controlled by adjusting the intermediate commit settings for
-the RocksDB engine.
-
-For sharded collections, the entire query and/or insert operation may not be transactional,
-especially if it involves different shards and/or database servers.
-
-Each *INSERT* operation is restricted to a single collection, and the
-[collection name](../../Manual/Appendix/Glossary.html#collection-name) must not be dynamic.
-Only a single *INSERT* statement per collection is allowed per AQL query, and
-it cannot be followed by read or write operations that access the same collection, by
-traversal operations, or AQL functions that can read documents.
-
-The syntax for an insert operation is:
-
-```
-INSERT document INTO collection [ OPTIONS options ]
-```
-
-**Note**: The *IN* keyword is allowed in place of *INTO* and has the same meaning.
-
-*collection* must contain the name of the collection into which the documents should
-be inserted. *document* is the document to be inserted, and it may or may not contain
-a *_key* attribute. If no *_key* attribute is provided, ArangoDB will auto-generate
-a value for *_key* value. Inserting a document will also auto-generate a document
-revision number for the document.
-
-```js
-FOR i IN 1..100
- INSERT { value: i } INTO numbers
-```
-
-An insert operation can also be performed without a *FOR* loop to insert a
-single document:
-
-```js
-INSERT { value: 1 } INTO numbers
-```
-
-When inserting into an [edge collection](../../Manual/Appendix/Glossary.html#edge-collection),
-it is mandatory to specify the attributes *_from* and *_to* in document:
-
-```js
-FOR u IN users
- FOR p IN products
- FILTER u._key == p.recommendedBy
- INSERT { _from: u._id, _to: p._id } INTO recommendations
-```
-
-Setting query options
----------------------
-
-The *OPTIONS* keyword followed by an object with query options can optionally
-be provided in an *INSERT* operation.
-
-It can be used to suppress query errors that may occur when violating unique
-key constraints:
-
-```js
-FOR i IN 1..1000
- INSERT {
- _key: CONCAT('test', i),
- name: "test",
- foobar: true
- } INTO users OPTIONS { ignoreErrors: true }
-```
-
-To make sure data are durable when an insert query returns, there is the *waitForSync*
-query option:
-
-```js
-FOR i IN 1..1000
- INSERT {
- _key: CONCAT('test', i),
- name: "test",
- foobar: true
- } INTO users OPTIONS { waitForSync: true }
-```
-
-If you want to replace existing documents with documents having the same key
-there is the *overwrite* query option. This will let you safely replace the
-documents instead of raising an "unique constraint violated error":
-
-```js
-FOR i IN 1..1000
- INSERT {
- _key: CONCAT('test', i),
- name: "test",
- foobar: true
- } INTO users OPTIONS { overwrite: true }
-```
-
-In contrast to the MMFiles engine, the RocksDB engine does not require collection-level
-locks. Different write operations on the same collection do not block each other, as
-long as there are no _write-write conficts_ on the same documents. From an application
-development perspective it can be desired to have exclusive write access on collections,
-to simplify the development. Note that writes do not block reads in RocksDB.
-Exclusive access can also speed up modification queries, because we avoid conflict checks.
-
-Use the *exclusive* option to achieve this effect on a per query basis:
-
-```js
-FOR doc IN collection
- INSERT { myval: doc.val + 1 } INTO users
- OPTIONS { exclusive: true }
-```
-
-Returning the inserted documents
---------------------------------
-
-The inserted documents can also be returned by the query. In this case, the `INSERT`
-statement can be a `RETURN` statement (intermediate `LET` statements are allowed, too).
-To refer to the inserted documents, the `INSERT` statement introduces a pseudo-value
-named `NEW`.
-
-The documents contained in `NEW` will contain all attributes, even those auto-generated by
-the database (e.g. `_id`, `_key`, `_rev`).
-
-
-```js
-INSERT document INTO collection RETURN NEW
-```
-
-Following is an example using a variable named `inserted` to return the inserted
-documents. For each inserted document, the document key is returned:
-
-```js
-FOR i IN 1..100
- INSERT { value: i }
- INTO users
- LET inserted = NEW
- RETURN inserted._key
-```
diff --git a/Documentation/Books/AQL/Operations/Let.md b/Documentation/Books/AQL/Operations/Let.md
deleted file mode 100644
index 567ca9b85245..000000000000
--- a/Documentation/Books/AQL/Operations/Let.md
+++ /dev/null
@@ -1,61 +0,0 @@
-LET
-===
-
-The *LET* statement can be used to assign an arbitrary value to a variable.
-The variable is then introduced in the scope the *LET* statement is placed in.
-
-The general syntax is:
-
-```
-LET variableName = expression
-```
-
-Variables are immutable in AQL, which means they can not be re-assigned:
-
-```js
-LET a = [1, 2, 3] // initial assignment
-
-a = PUSH(a, 4) // syntax error, unexpected identifier
-LET a = PUSH(a, 4) // parsing error, variable 'a' is assigned multiple times
-LET b = PUSH(a, 4) // allowed, result: [1, 2, 3, 4]
-```
-
-*LET* statements are mostly used to declare complex computations and to avoid
-repeated computations of the same value at multiple parts of a query.
-
-```
-FOR u IN users
- LET numRecommendations = LENGTH(u.recommendations)
- RETURN {
- "user" : u,
- "numRecommendations" : numRecommendations,
- "isPowerUser" : numRecommendations >= 10
- }
-```
-
-In the above example, the computation of the number of recommendations is
-factored out using a *LET* statement, thus avoiding computing the value twice in
-the *RETURN* statement.
-
-Another use case for *LET* is to declare a complex computation in a subquery,
-making the whole query more readable.
-
-```
-FOR u IN users
- LET friends = (
- FOR f IN friends
- FILTER u.id == f.userId
- RETURN f
- )
- LET memberships = (
- FOR m IN memberships
- FILTER u.id == m.userId
- RETURN m
- )
- RETURN {
- "user" : u,
- "friends" : friends,
- "numFriends" : LENGTH(friends),
- "memberShips" : memberships
- }
-```
diff --git a/Documentation/Books/AQL/Operations/Limit.md b/Documentation/Books/AQL/Operations/Limit.md
deleted file mode 100644
index aa461bcbf829..000000000000
--- a/Documentation/Books/AQL/Operations/Limit.md
+++ /dev/null
@@ -1,53 +0,0 @@
-LIMIT
-=====
-
-The *LIMIT* statement allows slicing the result array using an
-offset and a count. It reduces the number of elements in the result to at most
-the specified number. Two general forms of *LIMIT* are followed:
-
-```js
-LIMIT count
-LIMIT offset, count
-```
-
-The first form allows specifying only the *count* value whereas the second form
-allows specifying both *offset* and *count*. The first form is identical using
-the second form with an *offset* value of *0*.
-
-```js
-FOR u IN users
- LIMIT 5
- RETURN u
-```
-
-Above query returns the first five documents of the *users* collection.
-It could also be written as `LIMIT 0, 5` for the same result.
-Which documents it actually returns is rather arbitrary, because no explicit
-sorting order is specified however. Therefore, a limit should be usually
-accompanied by a `SORT` operation.
-
-The *offset* value specifies how many elements from the result shall be
-skipped. It must be 0 or greater. The *count* value specifies how many
-elements should be at most included in the result.
-
-```js
-FOR u IN users
- SORT u.firstName, u.lastName, u.id DESC
- LIMIT 2, 5
- RETURN u
-```
-
-In above example, the documents of *users* are sorted, the first two results
-get skipped and it returns the next five user documents.
-
-Note that variables, expressions and subqueries can not be used for *offset* and
-*count*. The values for *offset* and *count* must be known at query compile time,
-which means that you can only use number literals, bind parameters or expressions
-that can be resolved at query compile time.
-
-Where a *LIMIT* is used in relation to other operations in a query has meaning.
-*LIMIT* operations before *FILTER*s in particular can change the result
-significantly, because the operations are executed in the order in which they
-are written in the query. See [FILTER](Filter.md#order-of-operations) for a
-detailed example.
-
diff --git a/Documentation/Books/AQL/Operations/README.md b/Documentation/Books/AQL/Operations/README.md
deleted file mode 100644
index 4a36190db129..000000000000
--- a/Documentation/Books/AQL/Operations/README.md
+++ /dev/null
@@ -1,18 +0,0 @@
-High-level operations
-=====================
-
-The following high-level operations are described here after:
-
-* [FOR](For.md): Iterate over all elements of an array.
-* [RETURN](Return.md): Produce the result of a query.
-* [FILTER](Filter.md): Restrict the results to elements that match arbitrary logical conditions.
-* [SORT](Sort.md): Force a sort of the array of already produced intermediate results.
-* [LIMIT](Limit.md): Reduce the number of elements in the result to at most the specified number, optionally skip elements (pagination).
-* [LET](Let.md): Assign an arbitrary value to a variable.
-* [COLLECT](Collect.md): Group an array by one or multiple group criteria. Can also count and aggregate.
-* [REMOVE](Remove.md): Remove documents from a collection.
-* [UPDATE](Update.md): Partially update documents in a collection.
-* [REPLACE](Replace.md): Completely replace documents in a collection.
-* [INSERT](Insert.md): Insert new documents into a collection.
-* [UPSERT](Upsert.md): Update/replace an existing document, or create it in the case it does not exist.
-* [WITH](With.md): Specify collections used in a query (at query begin only).
diff --git a/Documentation/Books/AQL/Operations/Remove.md b/Documentation/Books/AQL/Operations/Remove.md
deleted file mode 100644
index c67e89f5e6d3..000000000000
--- a/Documentation/Books/AQL/Operations/Remove.md
+++ /dev/null
@@ -1,158 +0,0 @@
-
-REMOVE
-======
-
-The *REMOVE* keyword can be used to remove documents from a collection. On a
-single server, the document removal is executed transactionally in an
-all-or-nothing fashion.
-
-If the RocksDB engine is used and intermediate commits are enabled, a query may
-execute intermediate transaction commits in case the running transaction (AQL
-query) hits the specified size thresholds. In this case, the query's operations
-carried out so far will be committed and not rolled back in case of a later abort/rollback.
-That behavior can be controlled by adjusting the intermediate commit settings for
-the RocksDB engine.
-
-For sharded collections, the entire query and/or remove operation may not be transactional,
-especially if it involves different shards and/or database servers.
-
-Each *REMOVE* operation is restricted to a single collection, and the
-[collection name](../../Manual/Appendix/Glossary.html#collection-name) must not be dynamic.
-Only a single *REMOVE* statement per collection is allowed per AQL query, and
-it cannot be followed by read or write operations that access the same collection, by
-traversal operations, or AQL functions that can read documents.
-
-The syntax for a remove operation is:
-
-```
-REMOVE keyExpression IN collection options
-```
-
-*collection* must contain the name of the collection to remove the documents
-from. *keyExpression* must be an expression that contains the document identification.
-This can either be a string (which must then contain the
-[document key](../../Manual/Appendix/Glossary.html#document-key)) or a
-document, which must contain a *_key* attribute.
-
-The following queries are thus equivalent:
-
-```
-FOR u IN users
- REMOVE { _key: u._key } IN users
-
-FOR u IN users
- REMOVE u._key IN users
-
-FOR u IN users
- REMOVE u IN users
-```
-
-**Note**: A remove operation can remove arbitrary documents, and the documents
-do not need to be identical to the ones produced by a preceding *FOR* statement:
-
-```
-FOR i IN 1..1000
- REMOVE { _key: CONCAT('test', i) } IN users
-
-FOR u IN users
- FILTER u.active == false
- REMOVE { _key: u._key } IN backup
-```
-
-A single document can be removed as well, using a document key string or a
-document with `_key` attribute:
-
-```
-REMOVE 'john' IN users
-```
-
-```
-LET doc = DOCUMENT('users/john')
-REMOVE doc IN users
-```
-
-The restriction of a single remove operation per query and collection
-applies. The following query causes an *access after data-modification*
-error because of the third remove operation:
-
-```
-REMOVE 'john' IN users
-REMOVE 'john' IN backups // OK, different collection
-REMOVE 'mary' IN users // Error, users collection again
-```
-
-Setting query options
----------------------
-
-*options* can be used to suppress query errors that may occur when trying to
-remove non-existing documents. For example, the following query will fail if one
-of the to-be-deleted documents does not exist:
-
-```
-FOR i IN 1..1000
- REMOVE { _key: CONCAT('test', i) } IN users
-```
-
-By specifying the *ignoreErrors* query option, these errors can be suppressed so
-the query completes:
-
-```
-FOR i IN 1..1000
- REMOVE { _key: CONCAT('test', i) } IN users OPTIONS { ignoreErrors: true }
-```
-
-To make sure data has been written to disk when a query returns, there is the *waitForSync*
-query option:
-
-```
-FOR i IN 1..1000
- REMOVE { _key: CONCAT('test', i) } IN users OPTIONS { waitForSync: true }
-```
-
-In order to not accidentially remove documents that have been updated since you last fetched
-them, you can use the option *ignoreRevs* to either let ArangoDB compare the `_rev` values and
-only succeed if they still match, or let ArangoDB ignore them (default):
-
-```
-FOR i IN 1..1000
- REMOVE { _key: CONCAT('test', i), _rev: "1287623" } IN users OPTIONS { ignoreRevs: false }
-```
-
-In contrast to the MMFiles engine, the RocksDB engine does not require collection-level
-locks. Different write operations on the same collection do not block each other, as
-long as there are no _write-write conficts_ on the same documents. From an application
-development perspective it can be desired to have exclusive write access on collections,
-to simplify the development. Note that writes do not block reads in RocksDB.
-Exclusive access can also speed up modification queries, because we avoid conflict checks.
-
-Use the *exclusive* option to achieve this effect on a per query basis:
-
-```js
- FOR doc IN collection
- REPLACE doc._key
- WITH { replaced: true }
- OPTIONS { exclusive: true }
-```
-
-
-Returning the removed documents
--------------------------------
-
-The removed documents can also be returned by the query. In this case, the `REMOVE`
-statement must be followed by a `RETURN` statement (intermediate `LET` statements
-are allowed, too).`REMOVE` introduces the pseudo-value `OLD` to refer to the removed
-documents:
-
-```
-REMOVE keyExpression IN collection options RETURN OLD
-```
-
-Following is an example using a variable named `removed` for capturing the removed
-documents. For each removed document, the document key will be returned.
-
-```
-FOR u IN users
- REMOVE u IN users
- LET removed = OLD
- RETURN removed._key
-```
diff --git a/Documentation/Books/AQL/Operations/Replace.md b/Documentation/Books/AQL/Operations/Replace.md
deleted file mode 100644
index e782ab48aa94..000000000000
--- a/Documentation/Books/AQL/Operations/Replace.md
+++ /dev/null
@@ -1,169 +0,0 @@
-REPLACE
-=======
-
-The *REPLACE* keyword can be used to completely replace documents in a collection. On a
-single server, the replace operation is executed transactionally in an all-or-nothing
-fashion.
-
-If the RocksDB engine is used and intermediate commits are enabled, a query may
-execute intermediate transaction commits in case the running transaction (AQL
-query) hits the specified size thresholds. In this case, the query's operations
-carried out so far will be committed and not rolled back in case of a later abort/rollback.
-That behavior can be controlled by adjusting the intermediate commit settings for
-the RocksDB engine.
-
-For sharded collections, the entire query and/or replace operation may not be transactional,
-especially if it involves different shards and/or database servers.
-
-Each *REPLACE* operation is restricted to a single collection, and the
-[collection name](../../Manual/Appendix/Glossary.html#collection-name) must not be dynamic.
-Only a single *REPLACE* statement per collection is allowed per AQL query, and
-it cannot be followed by read or write operations that access the same collection, by
-traversal operations, or AQL functions that can read documents.
-The system attributes *_id*, *_key* and *_rev* cannot be replaced, *_from* and *_to* can.
-
-The two syntaxes for a replace operation are:
-
-```
-REPLACE document IN collection options
-REPLACE keyExpression WITH document IN collection options
-```
-
-*collection* must contain the name of the collection in which the documents should
-be replaced. *document* is the replacement document. When using the first syntax, *document*
-must also contain the *_key* attribute to identify the document to be replaced.
-
-```
-FOR u IN users
- REPLACE { _key: u._key, name: CONCAT(u.firstName, u.lastName), status: u.status } IN users
-```
-
-The following query is invalid because it does not contain a *_key* attribute and
-thus it is not possible to determine the documents to be replaced:
-
-```
-FOR u IN users
- REPLACE { name: CONCAT(u.firstName, u.lastName, status: u.status) } IN users
-```
-
-When using the second syntax, *keyExpression* provides the document identification.
-This can either be a string (which must then contain the document key) or a
-document, which must contain a *_key* attribute.
-
-The following queries are equivalent:
-
-```
-FOR u IN users
- REPLACE { _key: u._key, name: CONCAT(u.firstName, u.lastName) } IN users
-
-FOR u IN users
- REPLACE u._key WITH { name: CONCAT(u.firstName, u.lastName) } IN users
-
-FOR u IN users
- REPLACE { _key: u._key } WITH { name: CONCAT(u.firstName, u.lastName) } IN users
-
-FOR u IN users
- REPLACE u WITH { name: CONCAT(u.firstName, u.lastName) } IN users
-```
-
-A replace will fully replace an existing document, but it will not modify the values
-of internal attributes (such as *_id*, *_key*, *_from* and *_to*). Replacing a document
-will modify a document's revision number with a server-generated value.
-
-A replace operation may update arbitrary documents which do not need to be identical
-to the ones produced by a preceding *FOR* statement:
-
-```
-FOR i IN 1..1000
- REPLACE CONCAT('test', i) WITH { foobar: true } IN users
-
-FOR u IN users
- FILTER u.active == false
- REPLACE u WITH { status: 'inactive', name: u.name } IN backup
-```
-
-Setting query options
----------------------
-
-*options* can be used to suppress query errors that may occur when trying to
-replace non-existing documents or when violating unique key constraints:
-
-```
-FOR i IN 1..1000
- REPLACE { _key: CONCAT('test', i) } WITH { foobar: true } IN users OPTIONS { ignoreErrors: true }
-```
-
-To make sure data are durable when a replace query returns, there is the *waitForSync*
-query option:
-
-```
-FOR i IN 1..1000
- REPLACE { _key: CONCAT('test', i) } WITH { foobar: true } IN users OPTIONS { waitForSync: true }
-```
-
-In order to not accidentially overwrite documents that have been updated since you last fetched
-them, you can use the option *ignoreRevs* to either let ArangoDB compare the `_rev` value and only
-succeed if they still match, or let ArangoDB ignore them (default):
-
-```
-FOR i IN 1..1000
- REPLACE { _key: CONCAT('test', i), _rev: "1287623" } WITH { foobar: true } IN users OPTIONS { ignoreRevs: false }
-```
-
-
-In contrast to the MMFiles engine, the RocksDB engine does not require collection-level
-locks. Different write operations on the same collection do not block each other, as
-long as there are no _write-write conficts_ on the same documents. From an application
-development perspective it can be desired to have exclusive write access on collections,
-to simplify the development. Note that writes do not block reads in RocksDB.
-Exclusive access can also speed up modification queries, because we avoid conflict checks.
-
-Use the *exclusive* option to achieve this effect on a per query basis:
-
-```js
-FOR doc IN collection
- REPLACE doc._key
- WITH { replaced: true } IN collection
- OPTIONS { exclusive: true }
-```
-
-Returning the modified documents
---------------------------------
-
-The modified documents can also be returned by the query. In this case, the `REPLACE`
-statement must be followed by a `RETURN` statement (intermediate `LET` statements are
-allowed, too). The `OLD` pseudo-value can be used to refer to document revisions before
-the replace, and `NEW` refers to document revisions after the replace.
-
-Both `OLD` and `NEW` will contain all document attributes, even those not specified
-in the replace expression.
-
-
-```
-REPLACE document IN collection options RETURN OLD
-REPLACE document IN collection options RETURN NEW
-REPLACE keyExpression WITH document IN collection options RETURN OLD
-REPLACE keyExpression WITH document IN collection options RETURN NEW
-```
-
-Following is an example using a variable named `previous` to return the original
-documents before modification. For each replaced document, the document key will be
-returned:
-
-```
-FOR u IN users
- REPLACE u WITH { value: "test" }
- IN users
- LET previous = OLD
- RETURN previous._key
-```
-
-The following query uses the `NEW` pseudo-value to return the replaced
-documents (without some of their system attributes):
-
-```
-FOR u IN users
- REPLACE u WITH { value: "test" } IN users
- LET replaced = NEW
- RETURN UNSET(replaced, '_key', '_id', '_rev')
-```
diff --git a/Documentation/Books/AQL/Operations/Return.md b/Documentation/Books/AQL/Operations/Return.md
deleted file mode 100644
index 8b2dd311ff3a..000000000000
--- a/Documentation/Books/AQL/Operations/Return.md
+++ /dev/null
@@ -1,202 +0,0 @@
-RETURN
-======
-
-The *RETURN* statement can be used to produce the result of a query.
-It is mandatory to specify a *RETURN* statement at the end of each block in a
-data-selection query, otherwise the query result would be undefined. Using
-*RETURN* on the main level in data-modification queries is optional.
-
-The general syntax for *RETURN* is:
-
-```
-RETURN expression
-```
-
-The *expression* returned by *RETURN* is produced for each iteration in the block the
-*RETURN* statement is placed in. That means the result of a *RETURN* statement
-is **always an array**. This includes an empty array if no documents matched the
-query and a single return value returned as array with one element.
-
-To return all elements from the currently iterated array without modification,
-the following simple form can be used:
-
-```
-FOR variableName IN expression
- RETURN variableName
-```
-
-As *RETURN* allows specifying an expression, arbitrary computations can be
-performed to calculate the result elements. Any of the variables valid in the
-scope the *RETURN* is placed in can be used for the computations.
-
-To iterate over all documents of a collection called *users* and return the
-full documents, you can write:
-
-```js
-FOR u IN users
- RETURN u
-```
-
-In each iteration of the for-loop, a document of the *users* collection is
-assigned to a variable *u* and returned unmodified in this example. To return
-only one attribute of each document, you could use a different return expression:
-
-```js
-FOR u IN users
- RETURN u.name
-```
-
-Or to return multiple attributes, an object can be constructed like this:
-
-```js
-FOR u IN users
- RETURN { name: u.name, age: u.age }
-```
-
-Note: *RETURN* will close the current scope and eliminate all local variables in it.
-This is important to remember when working with [subqueries](../Examples/CombiningQueries.md).
-
-[Dynamic attribute names](../Fundamentals/DataTypes.md#objects--documents) are
-supported as well:
-
-```js
-FOR u IN users
- RETURN { [ u._id ]: u.age }
-```
-
-The document *_id* of every user is used as expression to compute the
-attribute key in this example:
-
-```json
-[
- {
- "users/9883": 32
- },
- {
- "users/9915": 27
- },
- {
- "users/10074": 69
- }
-]
-```
-
-The result contains one object per user with a single key/value pair each.
-This is usually not desired. For a single object, that maps user IDs to ages,
-the individual results need to be merged and returned with another `RETURN`:
-
-```js
-RETURN MERGE(
- FOR u IN users
- RETURN { [ u._id ]: u.age }
-)
-```
-
-```json
-[
- {
- "users/10074": 69,
- "users/9883": 32,
- "users/9915": 27
- }
-]
-```
-
-Keep in mind that if the key expression evaluates to the same value multiple
-times, only one of the key/value pairs with the duplicate name will survive
-[MERGE()](../Functions/Document.md#merge). To avoid this, you can go without
-dynamic attribute names, use static names instead and return all document
-properties as attribute values:
-
-```js
-FOR u IN users
- RETURN { name: u.name, age: u.age }
-```
-
-```json
-[
- {
- "name": "John Smith",
- "age": 32
- },
- {
- "name": "James Hendrix",
- "age": 69
- },
- {
- "name": "Katie Foster",
- "age": 27
- }
-]
-```
-
-RETURN DISTINCT
----------------
-
-Since ArangoDB 2.7, *RETURN* can optionally be followed by the *DISTINCT* keyword.
-The *DISTINCT* keyword will ensure uniqueness of the values returned by the
-*RETURN* statement:
-
-```
-FOR variableName IN expression
- RETURN DISTINCT expression
-```
-
-If the *DISTINCT* is applied on an expression that itself is an array or a subquery,
-the *DISTINCT* will not make the values in each array or subquery result unique, but instead
-ensure that the result contains only distinct arrays or subquery results. To make
-the result of an array or a subquery unique, simply apply the *DISTINCT* for the
-array or the subquery.
-
-For example, the following query will apply *DISTINCT* on its subquery results,
-but not inside the subquery:
-
-```
-FOR what IN 1..2
- RETURN DISTINCT (
- FOR i IN [ 1, 2, 3, 4, 1, 3 ]
- RETURN i
- )
-```
-
-Here we'll have a *FOR* loop with two iterations that each execute a subquery. The
-*DISTINCT* here is applied on the two subquery results. Both subqueries return the
-same result value (that is [ 1, 2, 3, 4, 1, 3 ]), so after *DISTINCT* there will
-only be one occurrence of the value [ 1, 2, 3, 4, 1, 3 ] left:
-
-```
-[
- [ 1, 2, 3, 4, 1, 3 ]
-]
-```
-
-If the goal is to apply the *DISTINCT* inside the subquery, it needs to be moved
-there:
-
-```
-FOR what IN 1..2
- LET sub = (
- FOR i IN [ 1, 2, 3, 4, 1, 3 ]
- RETURN DISTINCT i
- )
- RETURN sub
-```
-
-In the above case, the *DISTINCT* will make the subquery results unique, so that
-each subquery will return a unique array of values ([ 1, 2, 3, 4 ]). As the subquery
-is executed twice and there is no *DISTINCT* on the top-level, that array will be
-returned twice:
-
-```
-[
- [ 1, 2, 3, 4 ],
- [ 1, 2, 3, 4 ]
-]
-```
-
-Note: the order of results was undefined for *RETURN DISTINCT* until before ArangoDB
-3.3. Starting with ArangoDB 3.3, *RETURN DISTINCT* will not change the order of the
-results it is applied on.
-
-Note: *RETURN DISTINCT* is not allowed on the top-level of a query if there is no *FOR*
-loop preceding it.
diff --git a/Documentation/Books/AQL/Operations/Sort.md b/Documentation/Books/AQL/Operations/Sort.md
deleted file mode 100644
index 622990c0f8c3..000000000000
--- a/Documentation/Books/AQL/Operations/Sort.md
+++ /dev/null
@@ -1,62 +0,0 @@
-
-SORT
-====
-
-The *SORT* statement will force a sort of the array of already produced
-intermediate results in the current block. *SORT* allows specifying one or
-multiple sort criteria and directions. The general syntax is:
-
-```
-SORT expression direction
-```
-
-Example query that is sorting by lastName (in ascending order), then firstName
-(in ascending order), then by id (in descending order):
-
-```
-FOR u IN users
- SORT u.lastName, u.firstName, u.id DESC
- RETURN u
-```
-
-Specifying the *direction* is optional. The default (implicit) direction for a
-sort expression is the ascending order. To explicitly specify the sort direction,
-the keywords *ASC* (ascending) and *DESC* can be used. Multiple sort criteria can be
-separated using commas. In this case the direction is specified for each
-expression sperately. For example
-
-```
-SORT doc.lastName, doc.firstName
-```
-
-will first sort documents by lastName in ascending order and then by
-firstName in ascending order.
-
-```
-SORT doc.lastName DESC, doc.firstName
-```
-
-will first sort documents by lastName in descending order and then by
-firstName in ascending order.
-
-```
-SORT doc.lastName, doc.firstName DESC
-```
-
-will first sort documents by lastName in ascending order and then by
-firstName in descending order.
-
-
-Note: when iterating over collection-based arrays, the order of documents is
-always undefined unless an explicit sort order is defined using *SORT*.
-
-
-Note that constant *SORT* expressions can be used to indicate that no particular
-sort order is desired. Constant *SORT* expressions will be optimized away by the AQL
-optimizer during optimization, but specifying them explicitly may enable further
-optimizations if the optimizer does not need to take into account any particular
-sort order. This is especially the case after a *COLLECT* statement, which is
-supposed to produce a sorted result. Specifying an extra *SORT null* after the
-*COLLECT* statement allows to AQL optimizer to remove the post-sorting of the
-collect results altogether.
-
diff --git a/Documentation/Books/AQL/Operations/Update.md b/Documentation/Books/AQL/Operations/Update.md
deleted file mode 100644
index 41cd753e33fd..000000000000
--- a/Documentation/Books/AQL/Operations/Update.md
+++ /dev/null
@@ -1,298 +0,0 @@
-UPDATE
-======
-
-The *UPDATE* keyword can be used to partially update documents in a collection. On a
-single server, updates are executed transactionally in an all-or-nothing fashion.
-
-If the RocksDB engine is used and intermediate commits are enabled, a query may
-execute intermediate transaction commits in case the running transaction (AQL
-query) hits the specified size thresholds. In this case, the query's operations
-carried out so far will be committed and not rolled back in case of a later abort/rollback.
-That behavior can be controlled by adjusting the intermediate commit settings for
-the RocksDB engine.
-
-For sharded collections, the entire query and/or update operation may not be transactional,
-especially if it involves different shards and/or database servers.
-
-Each *UPDATE* operation is restricted to a single collection, and the
-[collection name](../../Manual/Appendix/Glossary.html#collection-name) must not be dynamic.
-Only a single *UPDATE* statement per collection is allowed per AQL query, and
-it cannot be followed by read or write operations that access the same collection, by
-traversal operations, or AQL functions that can read documents.
-The system attributes *_id*, *_key* and *_rev* cannot be updated, *_from* and *_to* can.
-
-The two syntaxes for an update operation are:
-
-```
-UPDATE document IN collection options
-UPDATE keyExpression WITH document IN collection options
-```
-
-*collection* must contain the name of the collection in which the documents should
-be updated. *document* must be a document that contains the attributes and values
-to be updated. When using the first syntax, *document* must also contain the *_key*
-attribute to identify the document to be updated.
-
-```js
-FOR u IN users
- UPDATE { _key: u._key, name: CONCAT(u.firstName, " ", u.lastName) } IN users
-```
-
-The following query is invalid because it does not contain a *_key* attribute and
-thus it is not possible to determine the documents to be updated:
-
-```js
-FOR u IN users
- UPDATE { name: CONCAT(u.firstName, " ", u.lastName) } IN users
-```
-
-When using the second syntax, *keyExpression* provides the document identification.
-This can either be a string (which must then contain the document key) or a
-document, which must contain a *_key* attribute.
-
-An object with `_id` attribute but without `_key` attribute as well as a
-document ID as string like `"users/john"` do not work. However, you can use
-`DOCUMENT(id)` to fetch the document via its ID and `PARSE_IDENTIFIER(id).key`
-to get the document key as string.
-
-The following queries are equivalent:
-
-```js
-FOR u IN users
- UPDATE u._key WITH { name: CONCAT(u.firstName, " ", u.lastName) } IN users
-
-FOR u IN users
- UPDATE { _key: u._key } WITH { name: CONCAT(u.firstName, " ", u.lastName) } IN users
-
-FOR u IN users
- UPDATE u WITH { name: CONCAT(u.firstName, " ", u.lastName) } IN users
-```
-
-An update operation may update arbitrary documents which do not need to be identical
-to the ones produced by a preceding *FOR* statement:
-
-```js
-FOR i IN 1..1000
- UPDATE CONCAT('test', i) WITH { foobar: true } IN users
-
-FOR u IN users
- FILTER u.active == false
- UPDATE u WITH { status: 'inactive' } IN backup
-```
-
-Using the current value of a document attribute
------------------------------------------------
-
-The pseudo-variable `OLD` is not supported inside of `WITH` clauses (it is
-available after `UPDATE`). To access the current attribute value, you can
-usually refer to a document via the variable of the `FOR` loop, which is used
-to iterate over a collection:
-
-```js
-FOR doc IN users
- UPDATE doc WITH {
- fullName: CONCAT(doc.firstName, " ", doc.lastName)
- } IN users
-```
-
-If there is no loop, because a single document is updated only, then there
-might not be a variable like above (`doc`), which would let you refer to the
-document which is being updated:
-
-```js
-UPDATE "john" WITH { ... } IN users
-```
-
-```js
-LET key = PARSE_IDENTIFIER("users/john").key
-UPDATE key WITH { ... } IN users
-```
-
-To access the current value in this situation, the document has to be retrieved
-and stored in a variable first:
-
-```js
-LET doc = DOCUMENT("users/john")
-UPDATE doc WITH {
- fullName: CONCAT(doc.firstName, " ", doc.lastName)
-} IN users
-```
-
-An existing attribute can be modified based on its current value this way,
-to increment a counter for instance:
-
-```js
-UPDATE doc WITH {
- karma: doc.karma + 1
-} IN users
-```
-
-If the attribute `karma` doesn't exist yet, `doc.karma` is evaluated to *null*.
-The expression `null + 1` results in the new attribute `karma` being set to *1*.
-If the attribute does exist, then it is increased by *1*.
-
-Arrays can be mutated too of course:
-
-```js
-UPDATE doc WITH {
- hobbies: PUSH(doc.hobbies, "swimming")
-} IN users
-```
-
-If the attribute `hobbies` doesn't exist yet, it is conveniently initialized
-as `[ "swimming" ]` and otherwise extended.
-
-Setting query options
----------------------
-
-*options* can be used to suppress query errors that may occur when trying to
-update non-existing documents or violating unique key constraints:
-
-```js
-FOR i IN 1..1000
- UPDATE {
- _key: CONCAT('test', i)
- } WITH {
- foobar: true
- } IN users OPTIONS { ignoreErrors: true }
-```
-
-An update operation will only update the attributes specified in *document* and
-leave other attributes untouched. Internal attributes (such as *_id*, *_key*, *_rev*,
-*_from* and *_to*) cannot be updated and are ignored when specified in *document*.
-Updating a document will modify the document's revision number with a server-generated value.
-
-When updating an attribute with a null value, ArangoDB will not remove the attribute
-from the document but store a null value for it. To get rid of attributes in an update
-operation, set them to null and provide the *keepNull* option:
-
-```js
-FOR u IN users
- UPDATE u WITH {
- foobar: true,
- notNeeded: null
- } IN users OPTIONS { keepNull: false }
-```
-
-The above query will remove the *notNeeded* attribute from the documents and update
-the *foobar* attribute normally.
-
-There is also the option *mergeObjects* that controls whether object contents will be
-merged if an object attribute is present in both the *UPDATE* query and in the
-to-be-updated document.
-
-The following query will set the updated document's *name* attribute to the exact
-same value that is specified in the query. This is due to the *mergeObjects* option
-being set to *false*:
-
-```js
-FOR u IN users
- UPDATE u WITH {
- name: { first: "foo", middle: "b.", last: "baz" }
- } IN users OPTIONS { mergeObjects: false }
-```
-
-Contrary, the following query will merge the contents of the *name* attribute in the
-original document with the value specified in the query:
-
-```js
-FOR u IN users
- UPDATE u WITH {
- name: { first: "foo", middle: "b.", last: "baz" }
- } IN users OPTIONS { mergeObjects: true }
-```
-
-Attributes in *name* that are present in the to-be-updated document but not in the
-query will now be preserved. Attributes that are present in both will be overwritten
-with the values specified in the query.
-
-Note: the default value for *mergeObjects* is *true*, so there is no need to specify it
-explicitly.
-
-To make sure data are durable when an update query returns, there is the *waitForSync*
-query option:
-
-```js
-FOR u IN users
- UPDATE u WITH {
- foobar: true
- } IN users OPTIONS { waitForSync: true }
-```
-
-In order to not accidentially overwrite documents that have been updated since you last fetched
-them, you can use the option *ignoreRevs* to either let ArangoDB compare the `_rev` value and
-only succeed if they still match, or let ArangoDB ignore them (default):
-
-```js
-FOR i IN 1..1000
- UPDATE { _key: CONCAT('test', i), _rev: "1287623" }
- WITH { foobar: true } IN users
- OPTIONS { ignoreRevs: false }
-```
-
-In contrast to the MMFiles engine, the RocksDB engine does not require collection-level
-locks. Different write operations on the same collection do not block each other, as
-long as there are no _write-write conficts_ on the same documents. From an application
-development perspective it can be desired to have exclusive write access on collections,
-to simplify the development. Note that writes do not block reads in RocksDB.
-Exclusive access can also speed up modification queries, because we avoid conflict checks.
-
-Use the *exclusive* option to achieve this effect on a per query basis:
-
-```js
-FOR doc IN collection
- UPDATE doc
- WITH { updated: true } IN collection
- OPTIONS { exclusive: true }
-```
-
-
-Returning the modified documents
---------------------------------
-
-The modified documents can also be returned by the query. In this case, the `UPDATE`
-statement needs to be followed a `RETURN` statement (intermediate `LET` statements
-are allowed, too). These statements can refer to the pseudo-values `OLD` and `NEW`.
-The `OLD` pseudo-value refers to the document revisions before the update, and `NEW`
-refers to document revisions after the update.
-
-Both `OLD` and `NEW` will contain all document attributes, even those not specified
-in the update expression.
-
-```
-UPDATE document IN collection options RETURN OLD
-UPDATE document IN collection options RETURN NEW
-UPDATE keyExpression WITH document IN collection options RETURN OLD
-UPDATE keyExpression WITH document IN collection options RETURN NEW
-```
-
-Following is an example using a variable named `previous` to capture the original
-documents before modification. For each modified document, the document key is returned.
-
-```js
-FOR u IN users
- UPDATE u WITH { value: "test" }
- IN users
- LET previous = OLD
- RETURN previous._key
-```
-
-The following query uses the `NEW` pseudo-value to return the updated documents,
-without some of the system attributes:
-
-```js
-FOR u IN users
- UPDATE u WITH { value: "test" }
- IN users
- LET updated = NEW
- RETURN UNSET(updated, "_key", "_id", "_rev")
-```
-
-It is also possible to return both `OLD` and `NEW`:
-
-```js
-FOR u IN users
- UPDATE u WITH { value: "test" }
- IN users
- RETURN { before: OLD, after: NEW }
-```
diff --git a/Documentation/Books/AQL/Operations/Upsert.md b/Documentation/Books/AQL/Operations/Upsert.md
deleted file mode 100644
index b64262eace80..000000000000
--- a/Documentation/Books/AQL/Operations/Upsert.md
+++ /dev/null
@@ -1,141 +0,0 @@
-UPSERT
-======
-
-The *UPSERT* keyword can be used for checking whether certain documents exist,
-and to update/replace them in case they exist, or create them in case they do not exist.
-On a single server, upserts are executed transactionally in an all-or-nothing fashion.
-
-If the RocksDB engine is used and intermediate commits are enabled, a query may
-execute intermediate transaction commits in case the running transaction (AQL
-query) hits the specified size thresholds. In this case, the query's operations
-carried out so far will be committed and not rolled back in case of a later abort/rollback.
-That behavior can be controlled by adjusting the intermediate commit settings for
-the RocksDB engine.
-
-For sharded collections, the entire query and/or upsert operation may not be transactional,
-especially if it involves different shards and/or database servers.
-
-Each *UPSERT* operation is restricted to a single collection, and the
-[collection name](../../Manual/Appendix/Glossary.html#collection-name) must not be dynamic.
-Only a single *UPSERT* statement per collection is allowed per AQL query, and
-it cannot be followed by read or write operations that access the same collection, by
-traversal operations, or AQL functions that can read documents.
-
-The syntax for an upsert operation is:
-
-```
-UPSERT searchExpression INSERT insertExpression UPDATE updateExpression IN collection options
-UPSERT searchExpression INSERT insertExpression REPLACE updateExpression IN collection options
-```
-
-When using the *UPDATE* variant of the upsert operation, the found document will be
-partially updated, meaning only the attributes specified in *updateExpression* will be
-updated or added. When using the *REPLACE* variant of upsert, existing documents will
-be replaced with the contexts of *updateExpression*.
-
-Updating a document will modify the document's revision number with a server-generated value.
-The system attributes *_id*, *_key* and *_rev* cannot be updated, *_from* and *_to* can.
-
-The *searchExpression* contains the document to be looked for. It must be an object
-literal without dynamic attribute names. In case no such document can be found in
-*collection*, a new document will be inserted into the collection as specified in the
-*insertExpression*.
-
-In case at least one document in *collection* matches the *searchExpression*, it will
-be updated using the *updateExpression*. When more than one document in the collection
-matches the *searchExpression*, it is undefined which of the matching documents will
-be updated. It is therefore often sensible to make sure by other means (such as unique
-indexes, application logic etc.) that at most one document matches *searchExpression*.
-
-The following query will look in the *users* collection for a document with a specific
-*name* attribute value. If the document exists, its *logins* attribute will be increased
-by one. If it does not exist, a new document will be inserted, consisting of the
-attributes *name*, *logins*, and *dateCreated*:
-
-```
-UPSERT { name: 'superuser' }
-INSERT { name: 'superuser', logins: 1, dateCreated: DATE_NOW() }
-UPDATE { logins: OLD.logins + 1 } IN users
-```
-
-Note that in the *UPDATE* case it is possible to refer to the previous version of the
-document using the *OLD* pseudo-value.
-
-
-Setting query options
----------------------
-
-As in several above examples, the *ignoreErrors* option can be used to suppress query
-errors that may occur when trying to violate unique key constraints.
-
-When updating or replacing an attribute with a null value, ArangoDB will not remove the
-attribute from the document but store a null value for it. To get rid of attributes in
-an upsert operation, set them to null and provide the *keepNull* option.
-
-There is also the option *mergeObjects* that controls whether object contents will be
-merged if an object attribute is present in both the *UPDATE* query and in the
-to-be-updated document.
-
-Note: the default value for *mergeObjects* is *true*, so there is no need to specify it
-explicitly.
-
-To make sure data are durable when an update query returns, there is the *waitForSync*
-query option.
-
-In order to not accidentially update documents that have been written and updated since
-you last fetched them you can use the option *ignoreRevs* to either let ArangoDB compare
-the `_rev` value and only succeed if they still match, or let ArangoDB ignore them (default):
-
-```
-FOR i IN 1..1000
- UPSERT { _key: CONCAT('test', i)}
- INSERT {foobar: false}
- UPDATE {_rev: "1287623", foobar: true }
- IN users OPTIONS { ignoreRevs: false }
-```
-
-*NOTE*: You need to add the `_rev` value in the updateExpression, it will not be used within
-the searchExpression. Even worse, if you use an outdated `_rev` in the searchExpression
-UPSERT will trigger the INSERT path instead of the UPDATE path, because it has not found a document
-exactly matching the searchExpression.
-
-In contrast to the MMFiles engine, the RocksDB engine does not require collection-level
-locks. Different write operations on the same collection do not block each other, as
-long as there are no _write-write conficts_ on the same documents. From an application
-development perspective it can be desired to have exclusive write access on collections,
-to simplify the development. Note that writes do not block reads in RocksDB.
-Exclusive access can also speed up modification queries, because we avoid conflict checks.
-
-Use the *exclusive* option to achieve this effect on a per query basis:
-
-```js
-FOR i IN 1..1000
- UPSERT { _key: CONCAT('test', i) }
- INSERT { foobar: false }
- UPDATE { foobar: true }
- IN users OPTIONS { exclusive: true }
-```
-
-Returning documents
--------------------
-
-`UPSERT` statements can optionally return data. To do so, they need to be followed
-by a `RETURN` statement (intermediate `LET` statements are allowed, too). These statements
-can optionally perform calculations and refer to the pseudo-values `OLD` and `NEW`.
-In case the upsert performed an insert operation, `OLD` will have a value of *null*.
-In case the upsert performed an update or replace operation, `OLD` will contain the
-previous version of the document, before update/replace.
-
-`NEW` will always be populated. It will contain the inserted document in case the
-upsert performed an insert, or the updated/replaced document in case it performed an
-update/replace.
-
-This can also be used to check whether the upsert has performed an insert or an update
-internally:
-
-```
-UPSERT { name: 'superuser' }
-INSERT { name: 'superuser', logins: 1, dateCreated: DATE_NOW() }
-UPDATE { logins: OLD.logins + 1 } IN users
-RETURN { doc: NEW, type: OLD ? 'update' : 'insert' }
-```
diff --git a/Documentation/Books/AQL/Operations/With.md b/Documentation/Books/AQL/Operations/With.md
deleted file mode 100644
index 82d96dc54560..000000000000
--- a/Documentation/Books/AQL/Operations/With.md
+++ /dev/null
@@ -1,42 +0,0 @@
-
-WITH
-====
-
-An AQL query can optionally start with a *WITH* statement and the list of
-collections used by the query. All collections specified in *WITH* will be
-read-locked at query start, in addition to the other collections the query
-uses and that are detected by the AQL query parser.
-
-Specifying further collections in *WITH* can be useful for queries that
-dynamically access collections (e.g. via traversals or via dynamic
-document access functions such as `DOCUMENT()`). Such collections may be
-invisible to the AQL query parser at query compile time, and thus will not
-be read-locked automatically at query start. In this case, the AQL execution
-engine will lazily lock these collections whenever they are used, which can
-lead to deadlock with other queries. In case such deadlock is detected, the
-query will automatically be aborted and changes will be rolled back. In this
-case the client application can try sending the query again.
-However, if client applications specify the list of used collections for all
-their queries using *WITH*, then no deadlocks will happen and no queries will
-be aborted due to deadlock situations.
-
-From ArangoDB 3.1 onwards `WITH` is required for traversals in a
-clustered environment in order to avoid deadlocks.
-
-Note that for queries that access only a single collection or that have all
-collection names specified somewhere else in the query string, there is no
-need to use *WITH*. *WITH* is only useful when the AQL query parser cannot
-automatically figure out which collections are going to be used by the query.
-*WITH* is only useful for queries that dynamically access collections, e.g.
-via traversals, shortest path operations or the *DOCUMENT()* function.
-
-```
-WITH managers, usersHaveManagers
-FOR v, e, p IN OUTBOUND 'users/1' GRAPH 'userGraph'
- RETURN { v, e, p }
-```
-
-Note that constant *WITH* is also a keyword that is used in other contexts,
-for example in *UPDATE* statements. If *WITH* is used to specify the extra
-list of collections, then it must be placed at the very start of the query
-string.
diff --git a/Documentation/Books/AQL/Operators.md b/Documentation/Books/AQL/Operators.md
deleted file mode 100644
index 045e8c7dfbd0..000000000000
--- a/Documentation/Books/AQL/Operators.md
+++ /dev/null
@@ -1,346 +0,0 @@
-Operators
-=========
-
-AQL supports a number of operators that can be used in expressions. There are
-comparison, logical, arithmetic, and the ternary operator.
-
-Comparison operators
---------------------
-
-Comparison (or relational) operators compare two operands. They can be used with
-any input data types, and will return a boolean result value.
-
-The following comparison operators are supported:
-
-- *==* equality
-- *!=* inequality
-- *<* less than
-- *<=* less or equal
-- *>* greater than
-- *>=* greater or equal
-- *IN* test if a value is contained in an array
-- *NOT IN* test if a value is not contained in an array
-- *LIKE* tests if a string value matches a pattern
-- *=~* tests if a string value matches a regular expression
-- *!~* tests if a string value does not match a regular expression
-
-Each of the comparison operators returns a boolean value if the comparison can
-be evaluated and returns *true* if the comparison evaluates to true, and *false*
-otherwise.
-
-The comparison operators accept any data types for the first and second operands.
-However, *IN* and *NOT IN* will only return a meaningful result if their right-hand
-operand is an array, and *LIKE* will only execute if both operands are string values.
-The comparison operators will not perform any implicit type casts if the compared
-operands have different or non-sensible types.
-
-Some examples for comparison operations in AQL:
-
-```
-0 == null // false
-1 > 0 // true
-true != null // true
-45 <= "yikes!" // true
-65 != "65" // true
-65 == 65 // true
-1.23 > 1.32 // false
-1.5 IN [ 2, 3, 1.5 ] // true
-"foo" IN null // false
-42 NOT IN [ 17, 40, 50 ] // true
-"abc" == "abc" // true
-"abc" == "ABC" // false
-"foo" LIKE "f%" // true
-"foo" =~ "^f[o].$" // true
-"foo" !~ "[a-z]+bar$" // true
-```
-
-The *LIKE* operator checks whether its left operand matches the pattern specified
-in its right operand. The pattern can consist of regular characters and wildcards.
-The supported wildcards are *_* to match a single arbitrary character, and *%* to
-match any number of arbitrary characters. Literal *%* and *_* need to be escaped
-with a backslash. Backslashes need to be escaped themselves, which effectively
-means that two reverse solidus characters need to preceed a literal percent sign
-or underscore. In arangosh, additional escaping is required, making it four
-backslashes in total preceeding the to-be-escaped character.
-
-```
-"abc" LIKE "a%" // true
-"abc" LIKE "_bc" // true
-"a_b_foo" LIKE "a\\_b\\_foo" // true
-```
-
-The pattern matching performed by the *LIKE* operator is case-sensitive.
-
-The regular expression operators *=~* and *!~* expect their left-hand operands to
-be strings, and their right-hand operands to be strings containing valid regular
-expressions as specified in the documentation for the AQL function
-[REGEX_TEST()](Functions/String.md#regextest).
-
-Array comparison operators
---------------------------
-
-The comparison operators also exist as *array variant*. In the array
-variant, the operator is prefixed with one of the keywords *ALL*, *ANY*
-or *NONE*. Using one of these keywords changes the operator behavior to
-execute the comparison operation for all, any, or none of its left hand
-argument values. It is therefore expected that the left hand argument
-of an array operator is an array.
-
-Examples:
-
-```
-[ 1, 2, 3 ] ALL IN [ 2, 3, 4 ] // false
-[ 1, 2, 3 ] ALL IN [ 1, 2, 3 ] // true
-[ 1, 2, 3 ] NONE IN [ 3 ] // false
-[ 1, 2, 3 ] NONE IN [ 23, 42 ] // true
-[ 1, 2, 3 ] ANY IN [ 4, 5, 6 ] // false
-[ 1, 2, 3 ] ANY IN [ 1, 42 ] // true
-[ 1, 2, 3 ] ANY == 2 // true
-[ 1, 2, 3 ] ANY == 4 // false
-[ 1, 2, 3 ] ANY > 0 // true
-[ 1, 2, 3 ] ANY <= 1 // true
-[ 1, 2, 3 ] NONE < 99 // false
-[ 1, 2, 3 ] NONE > 10 // true
-[ 1, 2, 3 ] ALL > 2 // false
-[ 1, 2, 3 ] ALL > 0 // true
-[ 1, 2, 3 ] ALL >= 3 // false
-["foo", "bar"] ALL != "moo" // true
-["foo", "bar"] NONE == "bar" // false
-["foo", "bar"] ANY == "foo" // true
-```
-
-Note that these operators are not optimized yet. Indexes will not be utilized.
-
-Logical operators
------------------
-
-The following logical operators are supported in AQL:
-
-- *&&* logical and operator
-- *||* logical or operator
-- *!* logical not/negation operator
-
-AQL also supports the following alternative forms for the logical operators:
-
-- *AND* logical and operator
-- *OR* logical or operator
-- *NOT* logical not/negation operator
-
-The alternative forms are aliases and functionally equivalent to the regular
-operators.
-
-The two-operand logical operators in AQL will be executed with short-circuit
-evaluation (except if one of the operands is or includes a subquery. In this
-case the subquery will be pulled out an evaluated before the logical operator).
-
-The result of the logical operators in AQL is defined as follows:
-
-- `lhs && rhs` will return `lhs` if it is `false` or would be `false` when converted
- into a boolean. If `lhs` is `true` or would be `true` when converted to a boolean,
- `rhs` will be returned.
-- `lhs || rhs` will return `lhs` if it is `true` or would be `true` when converted
- into a boolean. If `lhs` is `false` or would be `false` when converted to a boolean,
- `rhs` will be returned.
-- `! value` will return the negated value of `value` converted into a boolean
-
-Some examples for logical operations in AQL:
-
-```js
-u.age > 15 && u.address.city != ""
-true || false
-NOT u.isInvalid
-1 || ! 0
-```
-
-Passing non-boolean values to a logical operator is allowed. Any non-boolean operands
-will be casted to boolean implicitly by the operator, without making the query abort.
-
-The *conversion to a boolean value* works as follows:
-- `null` will be converted to `false`
-- boolean values remain unchanged
-- all numbers unequal to zero are `true`, zero is `false`
-- an empty string is `false`, all other strings are `true`
-- arrays (`[ ]`) and objects / documents (`{ }`) are `true`, regardless of their contents
-
-The result of *logical and* and *logical or* operations can now have any data
-type and is not necessarily a boolean value.
-
-For example, the following logical operations will return boolean values:
-
-```js
-25 > 1 && 42 != 7 // true
-22 IN [ 23, 42 ] || 23 NOT IN [ 22, 7 ] // true
-25 != 25 // false
-```
-
-whereas the following logical operations will not return boolean values:
-
-```js
-1 || 7 // 1
-null || "foo" // "foo"
-null && true // null
-true && 23 // 23
-```
-
-Arithmetic operators
---------------------
-
-Arithmetic operators perform an arithmetic operation on two numeric
-operands. The result of an arithmetic operation is again a numeric value.
-
-AQL supports the following arithmetic operators:
-
-- *+* addition
-- *-* subtraction
-- \* multiplication
-- */* division
-- *%* modulus
-
-Unary plus and unary minus are supported as well:
-
-```js
-LET x = -5
-LET y = 1
-RETURN [-x, +y]
-// [5, 1]
-```
-
-For exponentiation, there is a [numeric function](Functions/Numeric.md#pow) *POW()*.
-The syntax `base ** exp` is not supported.
-
-For string concatenation, you must use the [string function](Functions/String.md#concat)
-*CONCAT()*. Combining two strings with a plus operator (`"foo" + "bar"`) will not work!
-Also see [Common Errors](CommonErrors.md).
-
-Some example arithmetic operations:
-
-```
-1 + 1
-33 - 99
-12.4 * 4.5
-13.0 / 0.1
-23 % 7
--15
-+9.99
-```
-
-The arithmetic operators accept operands of any type. Passing non-numeric values to an
-arithmetic operator will cast the operands to numbers using the type casting rules
-applied by the [TO_NUMBER()](Functions/TypeCast.md#tonumber) function:
-
-- `null` will be converted to `0`
-- `false` will be converted to `0`, true will be converted to `1`
-- a valid numeric value remains unchanged, but NaN and Infinity will be converted to `0`
-- string values are converted to a number if they contain a valid string representation
- of a number. Any whitespace at the start or the end of the string is ignored. Strings
- with any other contents are converted to the number `0`
-- an empty array is converted to `0`, an array with one member is converted to the numeric
- representation of its sole member. Arrays with more members are converted to the number
- `0`.
-- objects / documents are converted to the number `0`.
-
-An arithmetic operation that produces an invalid value, such as `1 / 0` (division by zero)
-will also produce a result value of `null`. The query is not aborted, but you may see a
-warning.
-
-Here are a few examples:
-
-```
-1 + "a" // 1
-1 + "99" // 100
-1 + null // 1
-null + 1 // 1
-3 + [ ] // 3
-24 + [ 2 ] // 26
-24 + [ 2, 4 ] // 0
-25 - null // 25
-17 - true // 16
-23 * { } // 0
-5 * [ 7 ] // 35
-24 / "12" // 2
-1 / 0 // 0
-```
-
-Ternary operator
-----------------
-
-AQL also supports a ternary operator that can be used for conditional
-evaluation. The ternary operator expects a boolean condition as its first
-operand, and it returns the result of the second operand if the condition
-evaluates to true, and the third operand otherwise.
-
-*Examples*
-
-```js
-u.age > 15 || u.active == true ? u.userId : null
-```
-
-There is also a shortcut variant of the ternary operator with just two
-operands. This variant can be used when the expression for the boolean
-condition and the return value should be the same:
-
-*Examples*
-
-```js
-u.value ? : 'value is null, 0 or not present'
-```
-
-
-Range operator
---------------
-
-AQL supports expressing simple numeric ranges with the *..* operator.
-This operator can be used to easily iterate over a sequence of numeric
-values.
-
-The *..* operator will produce an array of the integer values in the
-defined range, with both bounding values included.
-
-*Examples*
-
-```
-2010..2013
-```
-
-will produce the following result:
-
-```json
-[ 2010, 2011, 2012, 2013 ]
-```
-
-Using the range operator is equivalent to writing an array with the integer
-values in the range specified by the bounds of the range. If the bounds of
-the range operator are non-integers, they will be converted to integer
-values first.
-
-There is also a [RANGE() function](Functions/Numeric.md#range).
-
-Array operators
----------------
-
-AQL provides array operators [\*] for
-[array variable expansion](Advanced/ArrayOperators.md#array-expansion) and
-[\*\*] for [array contraction](Advanced/ArrayOperators.md#array-contraction).
-
-Operator precedence
--------------------
-
-The operator precedence in AQL is similar as in other familiar languages (lowest precedence first):
-
-- *? :* ternary operator
-- *||* logical or
-- *&&* logical and
-- *==*, *!=* equality and inequality
-- *IN* in operator
-- *<*, *<=*, *>=*, *>* less than, less equal,
- greater equal, greater than
-- *+*, *-* addition, subtraction
-- \* , */*, *%* multiplication, division, modulus
-- *!*, *+*, *-* logical negation, unary plus, unary minus
-- [\*] expansion
-- *()* function call
-- *.* member access
-- *[]* indexed value access
-
-The parentheses *(* and *)* can be used to enforce a different operator
-evaluation order.
diff --git a/Documentation/Books/AQL/README.md b/Documentation/Books/AQL/README.md
deleted file mode 100644
index 61886a0af3b8..000000000000
--- a/Documentation/Books/AQL/README.md
+++ /dev/null
@@ -1,42 +0,0 @@
-Introduction
-============
-
-The ArangoDB query language (AQL) can be used to retrieve and modify data that
-are stored in ArangoDB.
-
-{% hint 'info' %}
-Want to learn AQL for the first time? Be sure to check out the
-[**Tutorial**](Tutorial/README.md) before you head off to the
-in-depth documentation!
-{% endhint %}
-
-The general workflow when executing a query is as follows:
-
-- A client application ships an AQL query to the ArangoDB server. The query text
- contains everything ArangoDB needs to compile the result set
-- ArangoDB will parse the query, execute it and compile the results. If the
- query is invalid or cannot be executed, the server will return an error that
- the client can process and react to. If the query can be executed
- successfully, the server will return the query results (if any) to the client
-
-AQL is mainly a declarative language, meaning that a query expresses what result
-should be achieved but not how it should be achieved. AQL aims to be
-human-readable and therefore uses keywords from the English language. Another
-design goal of AQL was client independency, meaning that the language and syntax
-are the same for all clients, no matter what programming language the clients
-may use. Further design goals of AQL were the support of complex query patterns
-and the different data models ArangoDB offers.
-
-In its purpose, AQL is similar to the Structured Query Language (SQL). AQL supports
-reading and modifying collection data, but it doesn't support data-definition
-operations such as creating and dropping databases, collections and indexes.
-It is a pure data manipulation language (DML), not a data definition language
-(DDL) or a data control language (DCL).
-
-The syntax of AQL queries is different to SQL, even if some keywords overlap.
-Nevertheless, AQL should be easy to understand for anyone with an SQL background.
-
-For some example queries, please refer to the chapters
-[Data Queries](DataQueries.md),
-[Usual query patterns](Examples/README.md)
-and [Tutorial](Tutorial/README.md).
diff --git a/Documentation/Books/AQL/SUMMARY.md b/Documentation/Books/AQL/SUMMARY.md
deleted file mode 100644
index 1ecdf5593e70..000000000000
--- a/Documentation/Books/AQL/SUMMARY.md
+++ /dev/null
@@ -1,79 +0,0 @@
-
-
-# Summary
-* [Introduction](README.md)
-* [Tutorial](Tutorial/README.md)
- * [Basic CRUD](Tutorial/CRUD.md)
- * [Matching documents](Tutorial/Filter.md)
- * [Sorting and limiting](Tutorial/SortLimit.md)
- * [Joining together](Tutorial/Join.md)
- * [Graph traversal](Tutorial/Traversal.md)
- * [Geospatial queries](Tutorial/Geospatial.md)
-* [How to invoke AQL](Invocation/README.md)
- * [with Arangosh](Invocation/WithArangosh.md)
- * [with the Web Interface](Invocation/WithWebInterface.md)
-* [AQL Fundamentals](Fundamentals/README.md)
- * [AQL Syntax](Fundamentals/Syntax.md)
- * [Data types](Fundamentals/DataTypes.md)
- * [Bind Parameters](Fundamentals/BindParameters.md)
- * [Type and value order](Fundamentals/TypeValueOrder.md)
- * [Accessing data from collections](Fundamentals/DocumentData.md)
- * [Query Results](Fundamentals/QueryResults.md)
- * [Query Errors](Fundamentals/QueryErrors.md)
-* [Operators](Operators.md)
-* [Data Queries](DataQueries.md)
-* [High level Operations](Operations/README.md)
- * [FOR](Operations/For.md)
- * [RETURN](Operations/Return.md)
- * [FILTER](Operations/Filter.md)
- * [SORT](Operations/Sort.md)
- * [LIMIT](Operations/Limit.md)
- * [LET](Operations/Let.md)
- * [COLLECT](Operations/Collect.md)
- * [REMOVE](Operations/Remove.md)
- * [UPDATE](Operations/Update.md)
- * [REPLACE](Operations/Replace.md)
- * [INSERT](Operations/Insert.md)
- * [UPSERT](Operations/Upsert.md)
- * [WITH](Operations/With.md)
-* [Functions](Functions/README.md)
- * [Array](Functions/Array.md)
- * [Date](Functions/Date.md)
- * [Document / Object](Functions/Document.md)
- * [Fulltext](Functions/Fulltext.md)
- * [Geo](Functions/Geo.md)
- * [Miscellaneous](Functions/Miscellaneous.md)
- * [Numeric](Functions/Numeric.md)
- * [String](Functions/String.md)
- * [Type check & cast](Functions/TypeCast.md)
-* [Graphs](Graphs/README.md)
- * [Traversals explained](Graphs/TraversalsExplained.md)
- * [Traversals](Graphs/Traversals.md)
- * [Shortest Path](Graphs/ShortestPath.md)
- * [k Shortest Paths](Graphs/KShortestPaths.md)
-* [ArangoSearch Views](Views/README.md)
- * [Usage](Views/ArangoSearch/README.md)
-* [Advanced Features](Advanced/README.md)
- * [Array Operators](Advanced/ArrayOperators.md)
-* [Usual Query Patterns](Examples/README.md)
- * [Counting](Examples/Counting.md)
- * [Data-modification queries](Examples/DataModificationQueries.md)
- * [Subqueries](Examples/CombiningQueries.md)
- * [Projections and filters](Examples/ProjectionsAndFilters.md)
- * [Joins](Examples/Join.md)
- * [Grouping](Examples/Grouping.md)
- * [Traversals](Examples/CombiningGraphTraversals.md)
- * [Remove vertex](Examples/RemoveVertex.md)
- * [Multiple path search](Examples/MultiplePaths.md)
- * [Queries without collections](Examples/QueriesNoCollections.md)
-* [User Functions](Extending/README.md)
- * [Conventions](Extending/Conventions.md)
- * [Registering Functions](Extending/Functions.md)
-* [Execution and Performance](ExecutionAndPerformance/README.md)
- * [Query statistics](ExecutionAndPerformance/QueryStatistics.md)
- * [Parsing queries](ExecutionAndPerformance/ParsingQueries.md)
- * [Explaining queries](ExecutionAndPerformance/ExplainingQueries.md)
- * [Query Profiling](ExecutionAndPerformance/QueryProfiler.md)
- * [Query Optimization](ExecutionAndPerformance/Optimizer.md)
- * [Caching query results](ExecutionAndPerformance/QueryCache.md)
-* [Common Errors](CommonErrors.md)
diff --git a/Documentation/Books/AQL/Tutorial/CRUD.md b/Documentation/Books/AQL/Tutorial/CRUD.md
deleted file mode 100644
index 6cbea324b2d9..000000000000
--- a/Documentation/Books/AQL/Tutorial/CRUD.md
+++ /dev/null
@@ -1,343 +0,0 @@
-CRUD
-====
-
-- [**C**reate documents](#create-documents)
-- [**R**ead documents](#read-documents)
-- [**U**pdate documents](#update-documents)
-- [**D**elete documents](#delete-documents)
-
-Create documents
-----------------
-
-Before we can insert documents with AQL, we need a place to put them in - a
-collection. Collections can be managed via the web interface, arangosh or a
-driver. It is not possible to do so with AQL however.
-
-
-![Add Collection](Collection_Add.png)
-
-![Create Characters collection](Characters_Collection_Creation.png)
-
-Click on *COLLECTIONS* in the web interface, then *Add Collection* and type
-`Characters` as name. Confirm with *Save*. The new collection should appear
-in the list.
-
-Next, click on *QUERIES*. To create the first document for collection with AQL,
-use the following AQL query, which you can paste into the query textbox and
-run by clicking *Execute*:
-
-![Insert query in query editor](Query_Insert.png)
-
-```js
-INSERT {
- "name": "Ned",
- "surname": "Stark",
- "alive": true,
- "age": 41,
- "traits": ["A","H","C","N","P"]
-} INTO Characters
-```
-
-The syntax is `INSERT document INTO collectionName`. The document is an object
-like you may know it from JavaScript or JSON, which is comprised of attribute
-key and value pairs. The quotes around the attribute keys are optional in AQL.
-Keys are always character sequences (strings), whereas attribute values can
-have [different types](../Fundamentals/DataTypes.md):
-
-- null
-- boolean (true, false)
-- number (integer and floating point)
-- string
-- array
-- object
-
-Name and surname of the character document we inserted are both string values.
-The alive state uses a boolean. Age is a numeric value. The traits are an array
-of strings. The entire document is an object.
-
-Let's add a bunch of other characters in a single query:
-
-```js
-LET data = [
- { "name": "Robert", "surname": "Baratheon", "alive": false, "traits": ["A","H","C"] },
- { "name": "Jaime", "surname": "Lannister", "alive": true, "age": 36, "traits": ["A","F","B"] },
- { "name": "Catelyn", "surname": "Stark", "alive": false, "age": 40, "traits": ["D","H","C"] },
- { "name": "Cersei", "surname": "Lannister", "alive": true, "age": 36, "traits": ["H","E","F"] },
- { "name": "Daenerys", "surname": "Targaryen", "alive": true, "age": 16, "traits": ["D","H","C"] },
- { "name": "Jorah", "surname": "Mormont", "alive": false, "traits": ["A","B","C","F"] },
- { "name": "Petyr", "surname": "Baelish", "alive": false, "traits": ["E","G","F"] },
- { "name": "Viserys", "surname": "Targaryen", "alive": false, "traits": ["O","L","N"] },
- { "name": "Jon", "surname": "Snow", "alive": true, "age": 16, "traits": ["A","B","C","F"] },
- { "name": "Sansa", "surname": "Stark", "alive": true, "age": 13, "traits": ["D","I","J"] },
- { "name": "Arya", "surname": "Stark", "alive": true, "age": 11, "traits": ["C","K","L"] },
- { "name": "Robb", "surname": "Stark", "alive": false, "traits": ["A","B","C","K"] },
- { "name": "Theon", "surname": "Greyjoy", "alive": true, "age": 16, "traits": ["E","R","K"] },
- { "name": "Bran", "surname": "Stark", "alive": true, "age": 10, "traits": ["L","J"] },
- { "name": "Joffrey", "surname": "Baratheon", "alive": false, "age": 19, "traits": ["I","L","O"] },
- { "name": "Sandor", "surname": "Clegane", "alive": true, "traits": ["A","P","K","F"] },
- { "name": "Tyrion", "surname": "Lannister", "alive": true, "age": 32, "traits": ["F","K","M","N"] },
- { "name": "Khal", "surname": "Drogo", "alive": false, "traits": ["A","C","O","P"] },
- { "name": "Tywin", "surname": "Lannister", "alive": false, "traits": ["O","M","H","F"] },
- { "name": "Davos", "surname": "Seaworth", "alive": true, "age": 49, "traits": ["C","K","P","F"] },
- { "name": "Samwell", "surname": "Tarly", "alive": true, "age": 17, "traits": ["C","L","I"] },
- { "name": "Stannis", "surname": "Baratheon", "alive": false, "traits": ["H","O","P","M"] },
- { "name": "Melisandre", "alive": true, "traits": ["G","E","H"] },
- { "name": "Margaery", "surname": "Tyrell", "alive": false, "traits": ["M","D","B"] },
- { "name": "Jeor", "surname": "Mormont", "alive": false, "traits": ["C","H","M","P"] },
- { "name": "Bronn", "alive": true, "traits": ["K","E","C"] },
- { "name": "Varys", "alive": true, "traits": ["M","F","N","E"] },
- { "name": "Shae", "alive": false, "traits": ["M","D","G"] },
- { "name": "Talisa", "surname": "Maegyr", "alive": false, "traits": ["D","C","B"] },
- { "name": "Gendry", "alive": false, "traits": ["K","C","A"] },
- { "name": "Ygritte", "alive": false, "traits": ["A","P","K"] },
- { "name": "Tormund", "surname": "Giantsbane", "alive": true, "traits": ["C","P","A","I"] },
- { "name": "Gilly", "alive": true, "traits": ["L","J"] },
- { "name": "Brienne", "surname": "Tarth", "alive": true, "age": 32, "traits": ["P","C","A","K"] },
- { "name": "Ramsay", "surname": "Bolton", "alive": true, "traits": ["E","O","G","A"] },
- { "name": "Ellaria", "surname": "Sand", "alive": true, "traits": ["P","O","A","E"] },
- { "name": "Daario", "surname": "Naharis", "alive": true, "traits": ["K","P","A"] },
- { "name": "Missandei", "alive": true, "traits": ["D","L","C","M"] },
- { "name": "Tommen", "surname": "Baratheon", "alive": true, "traits": ["I","L","B"] },
- { "name": "Jaqen", "surname": "H'ghar", "alive": true, "traits": ["H","F","K"] },
- { "name": "Roose", "surname": "Bolton", "alive": true, "traits": ["H","E","F","A"] },
- { "name": "The High Sparrow", "alive": true, "traits": ["H","M","F","O"] }
-]
-
-FOR d IN data
- INSERT d INTO Characters
-```
-
-The `LET` keyword defines a variable with name *data* and an array of objects
-as value, so `LET variableName = valueExpression` and the expression being a
-literal array definition like `[ {...}, {...}, ... ]`.
-
-`FOR variableName IN expression` is used to iterate over each element of the
-*data* array. In each loop, one element is assigned to the variable *d*.
-This variable is then used in the `INSERT` statement instead of a literal
-object definition. What is does is basically:
-
-```js
-INSERT {
- "name": "Robert",
- "surname": "Baratheon",
- "alive": false,
- "traits": ["A","H","C"]
-} INTO Characters
-
-INSERT {
- "name": "Jaime",
- "surname": "Lannister",
- "alive": true,
- "age": 36,
- "traits": ["A","F","B"]
-} INTO Characters
-
-...
-```
-
-Note: AQL does not permit multiple `INSERT` operations that target the same
-collection in in a single query.
-It is allowed as body of a `FOR` loop however, inserting multiple documents
-like we did with above query.
-
-Read documents
---------------
-
-There are a couple of documents in the *Characters* collection by now. We can
-retrieve them all using a `FOR` loop again. This time however, we use it to
-go through all documents in the collection instead of an array:
-
-```js
-FOR c IN Characters
- RETURN c
-```
-
-The syntax of the loop is `FOR variableName IN collectionName`. For each
-document in the collection, *c* is assigned a document, which is then returned
-as per the loop body. The query returns all characters we previously stored.
-
-Among them should be *Ned Stark*, similar to this example:
-
-```json
- {
- "_key": "2861650",
- "_id": "Characters/2861650",
- "_rev": "_V1bzsXa---",
- "name": "Ned",
- "surname": "Stark",
- "alive": true,
- "age": 41,
- "traits": ["A","H","C","N","P"]
- },
-```
-
-The document features the four attributes we stored, plus three more added by
-the database system. Each document needs a unique `_key`, which identifies it
-within a collection. The `_id` is a computed property, a concatenation of the
-collection name, a forward slash `/` and the document key. It uniquely identies
-a document within a database. `_rev` is a revision ID managed by the system.
-
-Document keys can be provided by the user upon document creation, or a unique
-value is assigned automatically. It can not be changed later. All three system
-attributes starting with an underscore `_` are read-only.
-
-We can use either the document key or the document ID to retrieve a specific
-document with the help of an AQL function `DOCUMENT()`:
-
-```js
-RETURN DOCUMENT("Characters", "2861650")
-// --- or ---
-RETURN DOCUMENT("Characters/2861650")
-```
-
-```json
-[
- {
- "_key": "2861650",
- "_id": "Characters/2861650",
- "_rev": "_V1bzsXa---",
- "name": "Ned",
- "surname": "Stark",
- "alive": true,
- "age": 41,
- "traits": ["A","H","C","N","P"]
- }
-]
-```
-
-Note: Document keys will be different for you. Change the queries accordingly.
-Here, `"2861650"` is the key for the *Ned Stark* document, and `"2861653"` for
-*Catelyn Stark*.
-
-The `DOCUMENT()` function also allows to fetch multiple documents at once:
-
-```js
-RETURN DOCUMENT("Characters", ["2861650", "2861653"])
-// --- or ---
-RETURN DOCUMENT(["Characters/2861650", "Characters/2861653"])
-```
-
-```json
-[
- [
- {
- "_key": "2861650",
- "_id": "Characters/2861650",
- "_rev": "_V1bzsXa---",
- "name": "Ned",
- "surname": "Stark",
- "alive": true,
- "age": 41,
- "traits": ["A","H","C","N","P"]
- },
- {
- "_key": "2861653",
- "_id": "Characters/2861653",
- "_rev": "_V1bzsXa--B",
- "name": "Catelyn",
- "surname": "Stark",
- "alive": false,
- "age": 40,
- "traits": ["D","H","C"]
- }
- ]
-]
-```
-
-See the [`DOCUMENT()` function](../Functions/Miscellaneous.md#document)
-documentation for more details.
-
-Update documents
-----------------
-
-According to our *Ned Stark* document, he is alive. When we get to know that he
-died, we need to change the `alive` attribute. Let us modify the existing document:
-
-```js
-UPDATE "2861650" WITH { alive: false } IN Characters
-```
-
-The syntax is `UPDATE documentKey WITH object IN collectionName`. It updates the
-specified document with the attributes listed (or adds them if they don't exist),
-but leaves the rest untouched. To replace the entire document content, you may
-use `REPLACE` instead of `UPDATE`:
-
-```js
-REPLACE "2861650" WITH {
- name: "Ned",
- surname: "Stark",
- alive: false,
- age: 41,
- traits: ["A","H","C","N","P"]
-} IN Characters
-```
-
-This also works in a loop, to add a new attribute to all documents for instance:
-
-```js
-FOR c IN Characters
- UPDATE c WITH { season: 1 } IN Characters
-```
-
-A variable is used instead of a literal document key, to update each document.
-The query adds an attribute `season` to the documents' top-level. You can
-inspect the result by re-running the query that returns all documents in
-collection:
-
-```js
-FOR c IN Characters
- RETURN c
-```
-
-```json
-[
- [
- {
- "_key": "2861650",
- "_id": "Characters/2861650",
- "_rev": "_V1bzsXa---",
- "name": "Ned",
- "surname": "Stark",
- "alive": false,
- "age": 41,
- "traits": ["A","H","C","N","P"],
- "season": 1
- },
- {
- "_key": "2861653",
- "_id": "Characters/2861653",
- "_rev": "_V1bzsXa--B",
- "name": "Catelyn",
- "surname": "Stark",
- "alive": false,
- "age": 40,
- "traits": ["D","H","C"],
- "season": 1
- },
- {
- ...
- }
- ]
-]
-```
-
-Delete documents
-----------------
-
-To fully remove documents from a collection, there is the `REMOVE` operation.
-It works similar to the other modification operations, yet without a `WITH` clause:
-
-```js
-REMOVE "2861650" IN Characters
-```
-
-It can also be used in a loop body to effectively truncate a collection:
-
-```js
-FOR c IN Characters
- REMOVE c IN Characters
-```
-
-Note: re-run the [insert queries](#create-documents) at the top with all
-character documents before you continue with the next chapter, to have data
-to work with again.
diff --git a/Documentation/Books/AQL/Tutorial/Characters_Collection_Creation.png b/Documentation/Books/AQL/Tutorial/Characters_Collection_Creation.png
deleted file mode 100644
index 9bf38ff69dbc..000000000000
Binary files a/Documentation/Books/AQL/Tutorial/Characters_Collection_Creation.png and /dev/null differ
diff --git a/Documentation/Books/AQL/Tutorial/Characters_Table.png b/Documentation/Books/AQL/Tutorial/Characters_Table.png
deleted file mode 100644
index d5f9ddf29124..000000000000
Binary files a/Documentation/Books/AQL/Tutorial/Characters_Table.png and /dev/null differ
diff --git a/Documentation/Books/AQL/Tutorial/ChildOf_Collection_Creation.png b/Documentation/Books/AQL/Tutorial/ChildOf_Collection_Creation.png
deleted file mode 100644
index c1c240a5d038..000000000000
Binary files a/Documentation/Books/AQL/Tutorial/ChildOf_Collection_Creation.png and /dev/null differ
diff --git a/Documentation/Books/AQL/Tutorial/ChildOf_Graph.png b/Documentation/Books/AQL/Tutorial/ChildOf_Graph.png
deleted file mode 100644
index b1efb45f317b..000000000000
Binary files a/Documentation/Books/AQL/Tutorial/ChildOf_Graph.png and /dev/null differ
diff --git a/Documentation/Books/AQL/Tutorial/Collection_Add.png b/Documentation/Books/AQL/Tutorial/Collection_Add.png
deleted file mode 100644
index db818c803cb9..000000000000
Binary files a/Documentation/Books/AQL/Tutorial/Collection_Add.png and /dev/null differ
diff --git a/Documentation/Books/AQL/Tutorial/Comparison_DataModels.png b/Documentation/Books/AQL/Tutorial/Comparison_DataModels.png
deleted file mode 100644
index bb0d4cec89d8..000000000000
Binary files a/Documentation/Books/AQL/Tutorial/Comparison_DataModels.png and /dev/null differ
diff --git a/Documentation/Books/AQL/Tutorial/Filter.md b/Documentation/Books/AQL/Tutorial/Filter.md
deleted file mode 100644
index b16472d5f069..000000000000
--- a/Documentation/Books/AQL/Tutorial/Filter.md
+++ /dev/null
@@ -1,139 +0,0 @@
-Matching documents
-==================
-
-So far, we either looked up a single document, or returned the entire character
-collection. For the lookup, we used the `DOCUMENT()` function, which means we
-can only find documents by their key or ID.
-
-To find documents that fulfill certain criteria more complex than key equality,
-there is the `FILTER` operation in AQL, which enables us to formulate arbitrary
-conditions for documents to match.
-
-Equality condition
-------------------
-
-```js
-FOR c IN Characters
- FILTER c.name == "Ned"
- RETURN c
-```
-
-The filter condition reads like: "the attribute *name* of a character document
-must be equal to the string *Ned*". If the condition applies, character
-document gets returned. This works with any attribute likewise:
-
-```js
-FOR c IN Characters
- FILTER c.surname == "Stark"
- RETURN c
-```
-
-Range conditions
-----------------
-
-Strict equality is one possible condition we can state. There are plenty of
-other conditions we can formulate however. For example, we could ask for all
-young characters:
-
-```js
-FOR c IN Characters
- FILTER c.age >= 13
- RETURN c.name
-```
-
-```json
-[
- "Joffrey",
- "Tyrion",
- "Samwell",
- "Ned",
- "Catelyn",
- "Cersei",
- "Jon",
- "Sansa",
- "Brienne",
- "Theon",
- "Davos",
- "Jaime",
- "Daenerys"
-]
-```
-
-The operator `>=` stands for *greater-or-equal*, so every character of age 13
-or older is returned (only their name in the example). We can return names
-and age of all characters younger than 13 by changing the operator to
-*less-than* and using the object syntax to define a subset of attributes to
-return:
-
-```js
-FOR c IN Characters
- FILTER c.age < 13
- RETURN { name: c.name, age: c.age }
-```
-
-```json
-[
- { "name": "Tommen", "age": null },
- { "name": "Arya", "age": 11 },
- { "name": "Roose", "age": null },
- ...
-]
-```
-
-You may notice that it returns name and age of 30 characters, most with an
-age of `null`. The reason for this is, that `null` is the fallback value if
-an attribute is requested by the query, but no such attribute exists in the
-document, and the `null` is compares to numbers as lower (see
-[Type and value order](../Fundamentals/TypeValueOrder.md)). Hence, it
-accidentally fulfills the age criterion `c.age < 13` (`null < 13`).
-
-Multiple conditions
--------------------
-
-To not let documents pass the filter without an age attribute, we can add a
-second criterion:
-
-```js
-FOR c IN Characters
- FILTER c.age < 13
- FILTER c.age != null
- RETURN { name: c.name, age: c.age }
-```
-
-```json
-[
- { "name": "Arya", "age": 11 },
- { "name": "Bran", "age": 10 }
-]
-```
-
-This could equally be written with a boolean `AND` operator as:
-
-```js
-FOR c IN Characters
- FILTER c.age < 13 AND c.age != null
- RETURN { name: c.name, age: c.age }
-```
-
-And the second condition could as well be `c.age > null`.
-
-Alternative conditions
-----------------------
-
-If you want documents to fulfill one or another condition, possibly for
-different attributes as well, use `OR`:
-
-```js
-FOR c IN Characters
- FILTER c.name == "Jon" OR c.name == "Joffrey"
- RETURN { name: c.name, surname: c.surname }
-```
-
-```json
-[
- { "name": "Joffrey", "surname": "Baratheon" },
- { "name": "Jon", "surname": "Snow" }
-]
-```
-
-See more details about [Filter operations](../Operations/Filter.md).
diff --git a/Documentation/Books/AQL/Tutorial/Geospatial.md b/Documentation/Books/AQL/Tutorial/Geospatial.md
deleted file mode 100644
index 421c73f2adcb..000000000000
--- a/Documentation/Books/AQL/Tutorial/Geospatial.md
+++ /dev/null
@@ -1,182 +0,0 @@
-Geospatial queries
-==================
-
-Geospatial coordinates consisting of a latitude and longitude value
-can be stored either as two separate attributes, or as a single
-attribute in the form of an array with both numeric values.
-ArangoDB can [index such coordinates](../../Manual/Indexing/Geo.html)
-for fast geospatial queries.
-
-Locations data
---------------
-
-Let us insert some filming locations into a new collection *Locations*,
-which you need to create first, then run below AQL query:
-
-![Create Locations collection](Locations_Collection_Creation.png)
-
-```js
-LET places = [
- { "name": "Dragonstone", "coordinate": [ 55.167801, -6.815096 ] },
- { "name": "King's Landing", "coordinate": [ 42.639752, 18.110189 ] },
- { "name": "The Red Keep", "coordinate": [ 35.896447, 14.446442 ] },
- { "name": "Yunkai", "coordinate": [ 31.046642, -7.129532 ] },
- { "name": "Astapor", "coordinate": [ 31.50974, -9.774249 ] },
- { "name": "Winterfell", "coordinate": [ 54.368321, -5.581312 ] },
- { "name": "Vaes Dothrak", "coordinate": [ 54.16776, -6.096125 ] },
- { "name": "Beyond the wall", "coordinate": [ 64.265473, -21.094093 ] }
-]
-
-FOR place IN places
- INSERT place INTO Locations
-```
-
-Visualization of the coordinates on a map with their labels:
-
-![Locations on map](Locations_Map.png)
-
-Geospatial index
-----------------
-
-To query based on coordinates, a [geo index](../../Manual/Indexing/Geo.html)
-is required. It determines which fields contain the latitude and longitude
-values.
-
-- Go to *COLLECTIONS*
-- Click on the *Locations* collection
-- Switch to the *Indexes* tab at top
-- Click the green button with a plus on the right-hand side
-- Change the type to *Geo Index*
-- Enter `coordinate` into the *Fields* field
-- Click *Create* to confirm
-
-![Create geospatial index on coordinate attribute](Locations_GeoIndex_Creation.png)
-
-![Indexes of Locations collection](Locations_Indexes.png)
-
-Find nearby locations
----------------------
-
-A `FOR` loop is used again, but this time to iterate over the results of a
-function call to `NEAR()` to find the *n* closest coordinates to a reference
-point, and return the documents with the nearby locations. The default for
-*n* is 100, which means 100 documents are returned at most, the closest
-matches first.
-
-In below example, the limit is set to 3. The origin (the reference point) is
-a coordinate somewhere downtown in Dublin, Ireland:
-
-```js
-FOR loc IN NEAR(Locations, 53.35, -6.26, 3)
- RETURN {
- name: loc.name,
- latitude: loc.coordinate[0],
- longitude: loc.coordinate[1]
- }
-```
-
-```json
-[
- {
- "name": "Vaes Dothrak",
- "latitude": 54.16776,
- "longitude": -6.096125
- },
- {
- "name": "Winterfell",
- "latitude": 54.368321,
- "longitude": -5.581312
- },
- {
- "name": "Dragonstone",
- "latitude": 55.167801,
- "longitude": -6.815096
- }
-]
-```
-
-The query returns the location name, as well as the coordinate. The coordinate
-is returned as two separate attributes. You may use a simpler `RETURN loc`
-instead if you want.
-
-Find locations within radius
-----------------------------
-
-`NEAR()` can be swapped out with `WITHIN()`, to search for locations within a
-given radius from a reference point. The syntax is the same as for `NEAR()`,
-except for the fourth parameter, which specifies the radius instead of a limit.
-The unit for the radius is meters. The example uses a radius of 200,000
-meters (200 kilometers):
-
-```js
-FOR loc IN WITHIN(Locations, 53.35, -6.26, 200 * 1000)
- RETURN {
- name: loc.name,
- latitude: loc.coordinate[0],
- longitude: loc.coordinate[1]
- }
-```
-
-```json
-[
- {
- "name": "Vaes Dothrak",
- "latitude": 54.16776,
- "longitude": -6.096125
- },
- {
- "name": "Winterfell",
- "latitude": 54.368321,
- "longitude": -5.581312
- }
-]
-```
-
-Return the distance
--------------------
-
-Both `NEAR()` and `WITHIN()` can return the distance to the reference point
-by adding an optional fifth parameter. It has to be a string, which will be
-used as attribute name for an additional attribute with the distance in meters:
-
-```js
-FOR loc IN NEAR(Locations, 53.35, -6.26, 3, "distance")
- RETURN {
- name: loc.name,
- latitude: loc.coordinate[0],
- longitude: loc.coordinate[1],
- distance: loc.distance / 1000
- }
-```
-
-```json
-[
- {
- "name": "Vaes Dothrak",
- "latitude": 54.16776,
- "longitude": -6.096125,
- "distance": 91.56658640314431
- },
- {
- "name": "Winterfell",
- "latitude": 54.368321,
- "longitude": -5.581312,
- "distance": 121.66399816395028
- },
- {
- "name": "Dragonstone",
- "latitude": 55.167801,
- "longitude": -6.815096,
- "distance": 205.31879386198324
- }
-]
-```
-
-The extra attribute, here called *distance*, is returned as part of the *loc*
-variable, as if it was part of the location document. The value is divided
-by 1000 in the example query, to convert the unit to kilometers, simply to
-make it better readable.
-
-
diff --git a/Documentation/Books/AQL/Tutorial/Join.md b/Documentation/Books/AQL/Tutorial/Join.md
deleted file mode 100644
index 7bc71f39cfa1..000000000000
--- a/Documentation/Books/AQL/Tutorial/Join.md
+++ /dev/null
@@ -1,322 +0,0 @@
-Joining together
-================
-
-References to other documents
------------------------------
-
-The character data we imported has an attribute *traits* for each character,
-which is an array of strings. It does not store character features directly
-however:
-
-```json
-{
- "name": "Ned",
- "surname": "Stark",
- "alive": false,
- "age": 41,
- "traits": ["A","H","C","N","P"]
-}
-```
-
-It is rather a list of letters without an apparent meaning. The idea here is
-that *traits* is supposed to store documents keys of another collection, which
-we can use to resolve the letters to labels such as "strong". The benefit of
-using another collection for the actual traits is, that we can easily query
-for all existing traits later on and store labels in multiple languages for
-instance in a central place. If we would embed traits directly...
-
-```json
-{
- "name": "Ned",
- "surname": "Stark",
- "alive": false,
- "age": 41,
- "traits": [
- {
- "de": "stark",
- "en": "strong"
- },
- {
- "de": "einflussreich",
- "en": "powerful"
- },
- {
- "de": "loyal",
- "en": "loyal"
- },
- {
- "de": "rational",
- "en": "rational"
- },
- {
- "de": "mutig",
- "en": "brave"
- }
- ]
-}
-```
-
-... it becomes really hard to maintain traits. If you were to rename or
-translate one of them, you would need to find all other character documents
-with the same trait and perform the changes there too. If we only refer to a
-trait in another collection, it is as easy as updating a single document.
-
-
-
-![Data model comparison](Comparison_DataModels.png)
-
-Importing traits
-----------------
-
-Below you find the traits data. Follow the pattern shown in
-[Create documents](CRUD.md#create-documents) to import it:
-
-- Create a document collection *Traits*
-- Assign the data to a variable in AQL, `LET data = [ ... ]`
-- Use a `FOR` loop to iterate over each array element of the data
-- `INSERT` the element `INTO Traits`
-
-![Create Traits collection](Traits_Collection_Creation.png)
-
-```json
-[
- { "_key": "A", "en": "strong", "de": "stark" },
- { "_key": "B", "en": "polite", "de": "freundlich" },
- { "_key": "C", "en": "loyal", "de": "loyal" },
- { "_key": "D", "en": "beautiful", "de": "schön" },
- { "_key": "E", "en": "sneaky", "de": "hinterlistig" },
- { "_key": "F", "en": "experienced", "de": "erfahren" },
- { "_key": "G", "en": "corrupt", "de": "korrupt" },
- { "_key": "H", "en": "powerful", "de": "einflussreich" },
- { "_key": "I", "en": "naive", "de": "naiv" },
- { "_key": "J", "en": "unmarried", "de": "unverheiratet" },
- { "_key": "K", "en": "skillful", "de": "geschickt" },
- { "_key": "L", "en": "young", "de": "jung" },
- { "_key": "M", "en": "smart", "de": "klug" },
- { "_key": "N", "en": "rational", "de": "rational" },
- { "_key": "O", "en": "ruthless", "de": "skrupellos" },
- { "_key": "P", "en": "brave", "de": "mutig" },
- { "_key": "Q", "en": "mighty", "de": "mächtig" },
- { "_key": "R", "en": "weak", "de": "schwach" }
-]
-```
-
-Resolving traits
-----------------
-
-Let's start simple by returning only the traits attribute of each character:
-
-```js
-FOR c IN Characters
- RETURN c.traits
-```
-
-```json
-[
- { "traits": ["A","H","C","N","P"] },
- { "traits": ["D","H","C"] },
- ...
-]
-```
-
-
-Also see the [Fundamentals of Objects / Documents](../Fundamentals/DataTypes.md#objects--documents)
-about attribute access.
-
-We can use the *traits* array together with the `DOCUMENT()` function to use
-the elements as document keys and look them up in the *Traits* collection:
-
-```js
-FOR c IN Characters
- RETURN DOCUMENT("Traits", c.traits)
-```
-
-```json
-[
- [
- {
- "_key": "A",
- "_id": "Traits/A",
- "_rev": "_V5oRUS2---",
- "en": "strong",
- "de": "stark"
- },
- {
- "_key": "H",
- "_id": "Traits/H",
- "_rev": "_V5oRUS6--E",
- "en": "powerful",
- "de": "einflussreich"
- },
- {
- "_key": "C",
- "_id": "Traits/C",
- "_rev": "_V5oRUS6--_",
- "en": "loyal",
- "de": "loyal"
- },
- {
- "_key": "N",
- "_id": "Traits/N",
- "_rev": "_V5oRUT---D",
- "en": "rational",
- "de": "rational"
- },
- {
- "_key": "P",
- "_id": "Traits/P",
- "_rev": "_V5oRUTC---",
- "en": "brave",
- "de": "mutig"
- }
- ],
- [
- {
- "_key": "D",
- "_id": "Traits/D",
- "_rev": "_V5oRUS6--A",
- "en": "beautiful",
- "de": "schön"
- },
- {
- "_key": "H",
- "_id": "Traits/H",
- "_rev": "_V5oRUS6--E",
- "en": "powerful",
- "de": "einflussreich"
- },
- {
- "_key": "C",
- "_id": "Traits/C",
- "_rev": "_V5oRUS6--_",
- "en": "loyal",
- "de": "loyal"
- }
- ],
- ...
-]
-```
-
-The [DOCUMENT() function](../Functions/Miscellaneous.md#document) can be used
-to look up a single or multiple documents via document identifiers. In our
-example, we pass the collection name from which we want to fetch documents
-as first argument (`"Traits"`) and an array of document keys (`_key` attribute)
-as second argument. In return we get an array of the full trait documents
-for each character.
-
-This is a bit too much information, so let's only return English labels using
-the [array expansion](../Advanced/ArrayOperators.md#array-expansion) notation:
-
-```js
-FOR c IN Characters
- RETURN DOCUMENT("Traits", c.traits)[*].en
-```
-
-```json
-[
- [
- "strong",
- "powerful",
- "loyal",
- "rational",
- "brave"
- ],
- [
- "beautiful",
- "powerful",
- "loyal"
- ],
- ...
-]
-```
-
-Merging characters and traits
------------------------------
-
-Great, we resolved the letters to meaningful traits! But we also need to know
-to which character they belong. Thus, we need to merge both the character
-document and the data from the trait documents:
-
-```js
-FOR c IN Characters
- RETURN MERGE(c, { traits: DOCUMENT("Traits", c.traits)[*].en } )
-```
-
-```json
-[
- {
- "_id": "Characters/2861650",
- "_key": "2861650",
- "_rev": "_V1bzsXa---",
- "age": 41,
- "alive": false,
- "name": "Ned",
- "surname": "Stark",
- "traits": [
- "strong",
- "powerful",
- "loyal",
- "rational",
- "brave"
- ]
- },
- {
- "_id": "Characters/2861653",
- "_key": "2861653",
- "_rev": "_V1bzsXa--B",
- "age": 40,
- "alive": false,
- "name": "Catelyn",
- "surname": "Stark",
- "traits": [
- "beautiful",
- "powerful",
- "loyal"
- ]
- },
- ...
-]
-```
-
-The `MERGE()` functions merges objects together. Because we used an object
-`{ traits: ... }` which has the same attribute name *traits* as the original
-character attribute, the latter got overwritten by the merge operation.
-
-Join another way
-----------------
-
-The `DOCUMENT()` function utilizes primary indices to look up documents quickly.
-It is limited to find documents via their identifiers however. For a use case
-like in our example it is sufficient to accomplish a simple join.
-
-There is another, more flexible syntax for joins: nested `FOR` loops over
-multiple collections, with a `FILTER` condition to match up attributes.
-In case of the traits key array, there needs to be a third loop to iterate
-over the keys:
-
-```js
-FOR c IN Characters
- RETURN MERGE(c, {
- traits: (
- FOR key IN c.traits
- FOR t IN Traits
- FILTER t._key == key
- RETURN t.en
- )
- })
-```
-
-For each character, it loops over its *traits* attribute (e.g. `["D","H","C"]`)
-and for each document reference in this array, it loops over the *Traits*
-collections. There is a condition to match the document key with the key
-reference. The inner `FOR` loop and the `FILTER` get transformed to a primary
-index lookup in this case instead of building up a Cartesian product only to
-filter away everything but a single match: Document keys within a collection
-are unique, thus there can only be one match.
-
-Each written-out, English trait is returned and all the traits are then merged
-with the character document. The result is identical to the query using
-`DOCUMENT()`. However, this approach with a nested `FOR` loop and a `FILTER`
-is not limited to primary keys. You can do this with any other attribute as well.
-For an efficient lookup, make sure you add a hash index for this attribute.
-If its values are unique, then also set the index option to unique.
diff --git a/Documentation/Books/AQL/Tutorial/Locations_Collection_Creation.png b/Documentation/Books/AQL/Tutorial/Locations_Collection_Creation.png
deleted file mode 100644
index a85082d26c8f..000000000000
Binary files a/Documentation/Books/AQL/Tutorial/Locations_Collection_Creation.png and /dev/null differ
diff --git a/Documentation/Books/AQL/Tutorial/Locations_GeoIndex_Creation.png b/Documentation/Books/AQL/Tutorial/Locations_GeoIndex_Creation.png
deleted file mode 100644
index ec886021763f..000000000000
Binary files a/Documentation/Books/AQL/Tutorial/Locations_GeoIndex_Creation.png and /dev/null differ
diff --git a/Documentation/Books/AQL/Tutorial/Locations_Indexes.png b/Documentation/Books/AQL/Tutorial/Locations_Indexes.png
deleted file mode 100644
index 1b15fdc177b7..000000000000
Binary files a/Documentation/Books/AQL/Tutorial/Locations_Indexes.png and /dev/null differ
diff --git a/Documentation/Books/AQL/Tutorial/Locations_Map.png b/Documentation/Books/AQL/Tutorial/Locations_Map.png
deleted file mode 100644
index f292bd6ab162..000000000000
Binary files a/Documentation/Books/AQL/Tutorial/Locations_Map.png and /dev/null differ
diff --git a/Documentation/Books/AQL/Tutorial/Locations_Table.png b/Documentation/Books/AQL/Tutorial/Locations_Table.png
deleted file mode 100644
index bfa70d2c447b..000000000000
Binary files a/Documentation/Books/AQL/Tutorial/Locations_Table.png and /dev/null differ
diff --git a/Documentation/Books/AQL/Tutorial/Query_Insert.png b/Documentation/Books/AQL/Tutorial/Query_Insert.png
deleted file mode 100644
index 9966e0989f8e..000000000000
Binary files a/Documentation/Books/AQL/Tutorial/Query_Insert.png and /dev/null differ
diff --git a/Documentation/Books/AQL/Tutorial/README.md b/Documentation/Books/AQL/Tutorial/README.md
deleted file mode 100644
index 25745e92f055..000000000000
--- a/Documentation/Books/AQL/Tutorial/README.md
+++ /dev/null
@@ -1,55 +0,0 @@
-AQL tutorial
-============
-
-This is an introduction to ArangoDB's query language AQL, built around a small
-dataset of characters from the novel and fantasy drama television series
-Game of Thrones (as of season 1). It includes character traits in two languages,
-some family relations, and last but not least a small set of filming locations,
-which makes for an interesting mix of data to work with.
-
-There is no need to import the data before you start. It is provided as part
-of the AQL queries in this tutorial. You can interact with ArangoDB using its
-[web interface](../../Manual/GettingStarted/WebInterface.html) to manage
-collections and execute the queries.
-
-Chapters
---------
-
-- [Basic CRUD](CRUD.md)
-- [Matching documents](Filter.md)
-- [Sorting and limiting](SortLimit.md)
-- [Joining together](Join.md)
-- [Graph traversal](Traversal.md)
-- [Geospatial queries](Geospatial.md)
-
-
-
-Dataset
--------
-
-### Characters
-
-The dataset features 43 characters with their name, surname, age, alive status
-and trait references. The surname and age properties are not always present.
-The column *traits (resolved)* is not part of the actual data used in this
-tutorial, but included for your convenience.
-
-![Characters table](Characters_Table.png)
-
-### Traits
-
-There are 18 unique traits. Each trait has a random letter as document key.
-The trait labels come in English and German.
-
-![Traits table](Traits_Table.png)
-
-### Locations
-
-This small collection of 8 filming locations comes with two attributes, a
-*name* and a *coordinate*. The coordinates are modeled as number arrays,
-comprised of a latitude and a longitude value each.
-
-![Locations table](Locations_Table.png)
diff --git a/Documentation/Books/AQL/Tutorial/SortLimit.md b/Documentation/Books/AQL/Tutorial/SortLimit.md
deleted file mode 100644
index d9b28f172372..000000000000
--- a/Documentation/Books/AQL/Tutorial/SortLimit.md
+++ /dev/null
@@ -1,184 +0,0 @@
-Sorting and limiting
-====================
-
-Cap the result count
---------------------
-
-It may not always be necessary to return all documents, that a `FOR` loop
-would normally return. In those cases, we can limit the amount of documents
-with a `LIMIT()` operation:
-
-```js
-FOR c IN Characters
- LIMIT 5
- RETURN c.name
-```
-
-```json
-[
- "Joffrey",
- "Tommen",
- "Tyrion",
- "Roose",
- "Tywin"
-]
-```
-
-`LIMIT` is followed by a number for the maximum document count. There is a
-second syntax however, which allows you to skip a certain amount of record
-and return the next *n* documents:
-
-```js
-FOR c IN Characters
- LIMIT 2, 5
- RETURN c.name
-```
-
-```json
-[
- "Tyrion",
- "Roose",
- "Tywin",
- "Samwell",
- "Melisandre"
-]
-```
-
-See how the second query skipped the first two names and returned the next
-five (both results feature Tyrion, Roose and Tywin).
-
-Sort by name
-------------
-
-The order in which matching records were returned by the queries shown until
-here was basically random. To return them in a defined order, we can add a
-`SORT()` operation. It can have a big impact on the result if combined with
-a `LIMIT()`, because the result becomes predictable if you sort first.
-
-```js
-FOR c IN Characters
- SORT c.name
- LIMIT 10
- RETURN c.name
-```
-
-```json
-[
- "Arya",
- "Bran",
- "Brienne",
- "Bronn",
- "Catelyn",
- "Cersei",
- "Daario",
- "Daenerys",
- "Davos",
- "Ellaria"
-]
-```
-
-See how it sorted by name, then returned the ten alphabetically first coming
-names. We can reverse the sort order with `DESC` like descending:
-
-```js
-FOR c IN Characters
- SORT c.name DESC
- LIMIT 10
- RETURN c.name
-```
-
-```json
-[
- "Ygritte",
- "Viserys",
- "Varys",
- "Tywin",
- "Tyrion",
- "Tormund",
- "Tommen",
- "Theon",
- "The High Sparrow",
- "Talisa"
-]
-```
-
-The first sort was ascending, which is the default order. Because it is the
-default, it is not required to explicitly ask for `ASC` order.
-
-Sort by multiple attributes
----------------------------
-
-Assume we want to sort by surname. Many of the characters share a surname.
-The result order among characters with the same surname is undefined. We can
-first sort by surname, then name to determine the order:
-
-```js
-FOR c IN Characters
- FILTER c.surname
- SORT c.surname, c.name
- LIMIT 10
- RETURN {
- surname: c.surname,
- name: c.name
- }
-```
-
-```json
-[
- { "surname": "Baelish", "name": "Petyr" },
- { "surname": "Baratheon", "name": "Joffrey" },
- { "surname": "Baratheon", "name": "Robert" },
- { "surname": "Baratheon", "name": "Stannis" },
- { "surname": "Baratheon", "name": "Tommen" },
- { "surname": "Bolton", "name": "Ramsay" },
- { "surname": "Bolton", "name": "Roose" },
- { "surname": "Clegane", "name": "Sandor" },
- { "surname": "Drogo", "name": "Khal" },
- { "surname": "Giantsbane", "name": "Tormund" }
-]
-```
-
-Overall, the documents are sorted by last name. If the *surname* is the same
-for two characters, the *name* values are compared and the result sorted.
-
-Note that a filter is applied before sorting, to only let documents through,
-that actually feature a surname value (many don't have it and would cause
-`null` values in the result).
-
-Sort by age
------------
-
-The order can also be determined by a numeric value, such as the age:
-
-```js
-FOR c IN Characters
- FILTER c.age
- SORT c.age
- LIMIT 10
- RETURN {
- name: c.name,
- age: c.age
- }
-```
-
-```json
-[
- { "name": "Bran", "age": 10 },
- { "name": "Arya", "age": 11 },
- { "name": "Sansa", "age": 13 },
- { "name": "Jon", "age": 16 },
- { "name": "Theon", "age": 16 },
- { "name": "Daenerys", "age": 16 },
- { "name": "Samwell", "age": 17 },
- { "name": "Joffrey", "age": 19 },
- { "name": "Tyrion", "age": 32 },
- { "name": "Brienne", "age": 32 }
-]
-```
-
-A filter is applied to avoid documents without age attribute. The remaining
-documents are sorted by age in ascending order, and the name and age of the
-ten youngest characters are returned.
-
-See the [SORT operation](../Operations/Sort.md) and
-[LIMIT operation](../Operations/Limit.md) documentation for more details.
diff --git a/Documentation/Books/AQL/Tutorial/Traits_Collection_Creation.png b/Documentation/Books/AQL/Tutorial/Traits_Collection_Creation.png
deleted file mode 100644
index 899ce51ccfd0..000000000000
Binary files a/Documentation/Books/AQL/Tutorial/Traits_Collection_Creation.png and /dev/null differ
diff --git a/Documentation/Books/AQL/Tutorial/Traits_Table.png b/Documentation/Books/AQL/Tutorial/Traits_Table.png
deleted file mode 100644
index d3869349edbc..000000000000
Binary files a/Documentation/Books/AQL/Tutorial/Traits_Table.png and /dev/null differ
diff --git a/Documentation/Books/AQL/Tutorial/Traversal.md b/Documentation/Books/AQL/Tutorial/Traversal.md
deleted file mode 100644
index ce54a920f5dd..000000000000
--- a/Documentation/Books/AQL/Tutorial/Traversal.md
+++ /dev/null
@@ -1,307 +0,0 @@
-Traversal
-=========
-
-Relations such as between parents and children can be modeled as graph.
-In ArangoDB, two documents (a parent and a child character document) can be
-linked by an edge document. Edge documents are stored in edge collections and
-have two additional attributes: `_from` and `_to`. They reference any two
-documents by their document IDs (`_id`).
-
-ChildOf relations
------------------
-
-Our characters have the following relations between parents and children
-(first names only for a better overview):
-
-```
- Robb -> Ned
- Sansa -> Ned
- Arya -> Ned
- Bran -> Ned
- Jon -> Ned
- Robb -> Catelyn
- Sansa -> Catelyn
- Arya -> Catelyn
- Bran -> Catelyn
- Jaime -> Tywin
- Cersei -> Tywin
- Tyrion -> Tywin
- Joffrey -> Jaime
- Joffrey -> Cersei
-```
-
-Visualized as graph:
-
-![ChildOf graph visualization](ChildOf_Graph.png)
-
-Creating the edges
-------------------
-
-To create the required edge documents to store these relations in the database,
-we can run a query that combines joining and filtering to match up the right
-character documents, then use their `_id` attribute to insert an edge into an
-edge collection *ChildOf*.
-
-First off, create a new collection with the name *ChildOf* and make sure you
-change the collection type to **Edge**.
-
-![Create ChildOf edge collection](ChildOf_Collection_Creation.png)
-
-Then run the following query:
-
-```js
-LET data = [
- {
- "parent": { "name": "Ned", "surname": "Stark" },
- "child": { "name": "Robb", "surname": "Stark" }
- }, {
- "parent": { "name": "Ned", "surname": "Stark" },
- "child": { "name": "Sansa", "surname": "Stark" }
- }, {
- "parent": { "name": "Ned", "surname": "Stark" },
- "child": { "name": "Arya", "surname": "Stark" }
- }, {
- "parent": { "name": "Ned", "surname": "Stark" },
- "child": { "name": "Bran", "surname": "Stark" }
- }, {
- "parent": { "name": "Catelyn", "surname": "Stark" },
- "child": { "name": "Robb", "surname": "Stark" }
- }, {
- "parent": { "name": "Catelyn", "surname": "Stark" },
- "child": { "name": "Sansa", "surname": "Stark" }
- }, {
- "parent": { "name": "Catelyn", "surname": "Stark" },
- "child": { "name": "Arya", "surname": "Stark" }
- }, {
- "parent": { "name": "Catelyn", "surname": "Stark" },
- "child": { "name": "Bran", "surname": "Stark" }
- }, {
- "parent": { "name": "Ned", "surname": "Stark" },
- "child": { "name": "Jon", "surname": "Snow" }
- }, {
- "parent": { "name": "Tywin", "surname": "Lannister" },
- "child": { "name": "Jaime", "surname": "Lannister" }
- }, {
- "parent": { "name": "Tywin", "surname": "Lannister" },
- "child": { "name": "Cersei", "surname": "Lannister" }
- }, {
- "parent": { "name": "Tywin", "surname": "Lannister" },
- "child": { "name": "Tyrion", "surname": "Lannister" }
- }, {
- "parent": { "name": "Cersei", "surname": "Lannister" },
- "child": { "name": "Joffrey", "surname": "Baratheon" }
- }, {
- "parent": { "name": "Jaime", "surname": "Lannister" },
- "child": { "name": "Joffrey", "surname": "Baratheon" }
- }
-]
-
-FOR rel in data
- LET parentId = FIRST(
- FOR c IN Characters
- FILTER c.name == rel.parent.name
- FILTER c.surname == rel.parent.surname
- LIMIT 1
- RETURN c._id
- )
- LET childId = FIRST(
- FOR c IN Characters
- FILTER c.name == rel.child.name
- FILTER c.surname == rel.child.surname
- LIMIT 1
- RETURN c._id
- )
- FILTER parentId != null AND childId != null
- INSERT { _from: childId, _to: parentId } INTO ChildOf
- RETURN NEW
-```
-
-The character documents don't have user-defined keys. If they had, it would
-allow us to create the edges more easily like:
-
-```js
-INSERT { _from: "Characters/robb", _to: "Characters/ned" } INTO ChildOf
-```
-
-However, creating the edges programmatically based on character names is a
-good exercise. Breakdown of the query:
-
-- Assign the relations in form of an array of objects with a *parent* and
- a *child* attribute each, both with sub-attributes *name* and *surname*,
- to a variable `data`
-- For each element in this array, assign a relation to a variable `rel` and
- execute the subsequent instructions
-- Assign the result of an expression to a variable `parentId`
- - Take the first element of a sub-query result (sub-queries are enclosed
- by parentheses, but here they are also a function call)
- - For each document in the Characters collection, assign the document
- to a variable `c`
- - Apply two filter conditions: the name in the character document must
- equal the parent name in `rel`, and the surname must also equal the
- surname give in the relations data
- - Stop after the first match for efficiency
- - Return the ID of the character document (the result of the sub-query
- is an array with one element, `FIRST()` takes this element and assigns
- it to the `parentId` variable)
-- Assign the result of an expression to a variable `childId`
- - A sub-query is used to find the child character document and the ID is
- returned, in the same way as the parent document ID (see above)
-- If either or both of the sub-queries were unable to find a match, skip the
- current relation, because two IDs for both ends of an edge are required to
- create one (this is only a precaution)
-- Insert a new edge document into the ChildOf collection, with the edge going
- from `childId` to `parentId` and no other attributes
-- Return the new edge document (optional)
-
-Traverse to the parents
------------------------
-
-Now that edges link character documents (vertices), we have a graph we can
-query to find out who the parents are of another character – or in
-graph terms, we want to start at a vertex and follow the edges to other
-vertices in an [AQL graph traversal](../Graphs/Traversals.md):
-
-```js
-FOR v IN 1..1 OUTBOUND "Characters/2901776" ChildOf
- RETURN v.name
-```
-
-This `FOR` loop doesn't iterate over a collection or an array, it walks the
-graph and iterates over the connected vertices it finds, with the vertex
-document assigned to a variable (here: `v`). It can also emit the edges it
-walked as well as the full path from start to end to
-[another two variables](../Graphs/Traversals.md#syntax).
-
-In above query, the traversal is restricted to a minimum and maximum traversal
-depth of 1 (how many steps to take from the start vertex), and to only follow
-edges in `OUTBOUND` direction. Our edges point from child to parent, and the
-parent is one step away from the child, thus it gives us the parents of the
-child we start at. `"Characters/2901776"` is that start vertex. Note that the
-document ID will be different for you, so please adjust it to your document ID
-of e.g. the Bran Stark document:
-
-```js
-FOR c IN Characters
- FILTER c.name == "Bran"
- RETURN c._id
-```
-
-```json
-[ "Characters/" ]
-```
-
-You may also combine this query with the traversal directly, to easily change
-the start vertex by adjusting the filter condition(s):
-
-```js
-FOR c IN Characters
- FILTER c.name == "Bran"
- FOR v IN 1..1 OUTBOUND c ChildOf
- RETURN v.name
-```
-
-The start vertex is followed by `ChildOf`, which is our edge collection. The
-example query returns only the name of each parent to keep the result short:
-
-```json
-[
- "Ned",
- "Catelyn"
-]
-```
-
-The same result will be returned for Robb, Arya and Sansa as starting point.
-For Jon Snow, it will only be Ned.
-
-Traverse to the children
-------------------------
-
-We can also walk from a parent in reverse edge direction (`INBOUND` that is)
-to the children:
-
-```js
-FOR c IN Characters
- FILTER c.name == "Ned"
- FOR v IN 1..1 INBOUND c ChildOf
- RETURN v.name
-```
-
-```json
-[
- "Robb",
- "Sansa",
- "Jon",
- "Arya",
- "Bran"
-]
-```
-
-Traverse to the grandchildren
------------------------------
-
-For the Lannister family, we have relations that span from parent to
-grandchild. Let's change the traversal depth to return grandchildren,
-which means to go exactly two steps:
-
-```js
-FOR c IN Characters
- FILTER c.name == "Tywin"
- FOR v IN 2..2 INBOUND c ChildOf
- RETURN v.name
-```
-
-```json
-[
- "Joffrey",
- "Joffrey"
-]
-```
-
-It might be a bit unexpected, that Joffrey is returned twice. However, if you
-look at the graph visualization, you can see that multiple paths lead from
-Joffrey (bottom right) to Tywin:
-
-![ChildOf graph visualization](ChildOf_Graph.png)
-
-```
-Tywin <- Jaime <- Joffrey
-Tywin <- Cersei <- Joffrey
-```
-
-As a quick fix, change the last line of the query to `RETURN DISTINCT v.name`
-to return each value only once. Keep in mind though, that there are
-[traversal options](../Graphs/Traversals.md#syntax) to suppress duplicate
-vertices early on.
-
-Also check out the
-[ArangoDB Graph Course](https://www.arangodb.com/arangodb-graph-course)
-which covers the basics, but also explains different traversal options
-and advanced graph queries.
-
-Traverse with variable depth
-----------------------------
-
-To return the parents and grandparents of Joffrey, we can walk edges in
-`OUTBOUND` direction and adjust the traversal depth to go at least 1 step,
-and 2 at most:
-
-```js
-FOR c IN Characters
- FILTER c.name == "Joffrey"
- FOR v IN 1..2 OUTBOUND c ChildOf
- RETURN DISTINCT v.name
-```
-
-```json
-[
- "Cersei",
- "Tywin",
- "Jaime"
-]
-```
-
-If we had deeper family trees, it would only be a matter of changing the depth
-values to query for great-grandchildren and similar relations.
-
-
-
\ No newline at end of file
diff --git a/Documentation/Books/AQL/Views/ArangoSearch/README.md b/Documentation/Books/AQL/Views/ArangoSearch/README.md
deleted file mode 100644
index 416692900afd..000000000000
--- a/Documentation/Books/AQL/Views/ArangoSearch/README.md
+++ /dev/null
@@ -1,653 +0,0 @@
-ArangoSearch Views in AQL
-=========================
-
-Views of type `arangosearch` are an integration layer meant to seamlessly
-integrate with and natively expose the full power of the
-[IResearch library](https://github.com/iresearch-toolkit/iresearch)
-to the ArangoDB user.
-
-They provide the capability to:
-
-- evaluate together documents located in different collections
-- search documents based on AQL boolean expressions and functions
-- sort the result set based on how closely each document matched the search
-
-Overview and Significance
--------------------------
-
-Looking up documents in an ArangoSearch View is done via the `FOR` keyword:
-
-```js
-FOR doc IN someView
- ...
-```
-
-`FOR` operations over ArangoSearch Views have an additional, optional, `SEARCH`
-keyword:
-
-```js
-FOR doc IN someView
- SEARCH searchExpression
-```
-
-ArangoSearch views cannot be used as edge collections in traversals:
-
-```js
-FOR v IN 1..3 ANY startVertex someView /* invalid! */
-```
-
-### SEARCH
-
-`SEARCH` expressions look a lot like `FILTER` operations, but have some noteable
-differences.
-
-First of all, filters and functions in `SEARCH`, when applied to documents
-_emitted from an ArangoSearch View_, work _only_ on attributes linked in the
-view.
-
-For example, given a collection `myCol` with the following documents:
-
-```js
-[
- { someAttr: 'One', anotherAttr: 'One' },
- { someAttr: 'Two', anotherAttr: 'Two' }
-]
-```
-
-with a view, where `someAttr` is indexed by the following view `myView`:
-
-```js
-{
- "type": "arangosearch",
- "links": {
- "myCol": {
- "fields": {
- "someAttr": {}
- }
- }
- }
-}
-```
-
-Then, a search on `someAttr` yields the following result:
-
-```js
-FOR doc IN myView
- SEARCH doc.someAttr == 'One'
- RETURN doc
-```
-
-```js
-[ { someAttr: 'One', anotherAttr: 'One' } ]
-```
-
-While a search on `anotherAttr` yields an empty result:
-
-```js
-FOR doc IN myView
- SEARCH doc.anotherAttr == 'One'
- RETURN doc
-```
-
-```js
-[]
-```
-
-- This only applies to the expression after the `SEARCH` keyword.
-- This only applies to tests regarding documents emitted from a view. Other
- tests are not affected.
-- In order to use `SEARCH` using all attributes of a linked sources, the special
- `includeAllFields` [link property](../../../Manual/Views/ArangoSearch/DetailedOverview.html#link-properties)
- was designed.
-
-### SORT
-
-The document search via the `SEARCH` keyword and the sorting via the
-ArangoSearch functions, namely `BM25()` and `TFIDF()`, are closely intertwined.
-The query given in the `SEARCH` expression is not only used to filter documents,
-but also is used with the sorting functions to decide which document matches
-the query best. Other documents in the view also affect this decision.
-
-Therefore the ArangoSearch sorting functions can work _only_ on documents
-emitted from a view, as both the corresponding `SEARCH` expression and the view
-itself are consulted in order to sort the results.
-
-The `BOOST()` function, described below, can be used to fine-tune the resulting
-ranking by weighing sub-expressions in `SEARCH` differently.
-
-### Arrays and trackListPositions
-
-Unless [**trackListPositions**](../../../Manual/Views/ArangoSearch/DetailedOverview.html#link-properties)
-is set to `true`, which it is not by default, arrays behave differently. Namely
-they behave like a disjunctive superposition of their values - this is best
-shown with an example.
-
-With `trackListPositions: false`, which is the default, and given a document
-`doc` containing
-
-```js
-{ attr: [ 'valueX', 'valueY', 'valueZ' ] }
-```
-
-in a `SEARCH` clause, the expression
-
-```js
-doc.attr == 'valueX'
-```
-
-will be true, as will be
-
-```js
-doc.attr == 'valueY'
-```
-
-and `== valueZ`. With `trackListPositions: true`,
-
-```js
-doc.attr[0] == 'valueX'
-```
-
-would work as usual.
-
-### Comparing analyzed fields
-
-As described in [value analysis](#arangosearch-value-analysis), when a field is
-processed by a specific analyzer, comparison tests are done per word. For
-example, given the field `text` is analyzed with `"text_en"` and contains the
-string `"a quick brown fox jumps over the lazy dog"`, the following expression
-will be true:
-
-```js
-ANALYZER(d.text == 'fox', "text_en")
-```
-
-Note also, that the words analyzed in the text are stemmed, so this is also
-true:
-
-```js
-ANALYZER(d.text == 'jump', "text_en")
-```
-
-So a comparison will actually test if a word is contained in the text. With
-`trackListPositions: false`, this means for arrays if the word is contained in
-any element of the array. For example, given
-
-```js
-d.text = [ "a quick", "brown fox", "jumps over the", "lazy dog"]
-```
-
-the following will be true:
-
-```js
-ANALYZER(d.text == 'jump', "text_en")
-```
-
-ArangoSearch value analysis
----------------------------
-
-A concept of value 'analysis' that is meant to break up a given value into
-a set of sub-values internally tied together by metadata which influences both
-the search and sort stages to provide the most appropriate match for the
-specified conditions, similar to queries to web search engines.
-
-In plain terms this means a user can for example:
-
-- request documents where the 'body' attribute best matches 'a quick brown fox'
-- request documents where the 'dna' attribute best matches a DNA sub sequence
-- request documents where the 'name' attribute best matches gender
-- etc. (via custom analyzers)
-
-To a limited degree the concept of 'analysis' is even available in
-non-ArangoSearch AQL, e.g. the TOKENS(...) function will utilize the power of
-IResearch to break up a value into an AQL array that can be used anywhere in the
-AQL query.
-
-In plain terms this means a user can match a document attribute when its
-value matches at least one entry from a set,
-e.g. to match docs with 'word == quick' OR 'word == brown' OR 'word == fox'
-
- FOR doc IN someCollection
- FILTER doc.word IN TOKENS('a quick brown fox', 'text_en')
- RETURN doc
-
-ArangoSearch filters
---------------------
-
-The basic ArangoSearch functionality can be accessed via the `SEARCH` statement
-with common AQL filters and operators, e.g.:
-
-- `AND`
-- `OR`
-- `NOT`
-- `==`
-- `<=`
-- `>=`
-- `<`
-- `>`
-- `!=`
-- `IN `
-- `IN `
-
-However, the full power of ArangoSearch is harnessed and exposed via functions,
-during both the search and sort stages.
-
-Note, that `SEARCH` statement, in contrast to `FILTER`, is meant to be treated
-as a part of the `FOR` operation, not as an individual statement.
-
-The supported AQL context functions are:
-
-### ANALYZER()
-
-`ANALYZER(searchExpression, analyzer)`
-
-Override analyzer in a context of **searchExpression** with another one,
-denoted by a specified **analyzer** argument, making it available for search
-functions.
-
-- *searchExpression* - any valid search expression
-- *analyzer* - string with the analyzer to imbue, i.e. *"text_en"* or one of the
- other [available string analyzers](../../../Manual/Views/ArangoSearch/Analyzers.html)
-
-By default, context contains `Identity` analyzer.
-
-### BOOST()
-
-`BOOST(searchExpression, boost)`
-
-Override boost in a context of **searchExpression** with a specified value,
-making it available for scorer functions.
-
-- *searchExpression* - any valid search expression
-- *boost* - numeric boost value
-
-By default, context contains boost value equal to `1.0`.
-
-The supported search functions are:
-
-### EXISTS()
-
-Note: Will only match values when the specified attribute has been processed
-with the link property **storeValues** set to **"id"** (by default it's
-**"none"**).
-
-`EXISTS(doc.someAttr)`
-
-Match documents **doc** where the attribute **someAttr** exists in the
-document.
-
-This also works with sub-attributes, e.g.
-
-`EXISTS(doc.someAttr.anotherAttr)`
-
-as long as the field is processed by the view with **storeValues** not
-**none**.
-
-`EXISTS(doc.someAttr, "analyzer", analyzer)`
-
-Match documents where **doc.someAttr** exists in the document _and_ was indexed
-by the specified **analyzer**. **analyzer** is optional and defaults to the
-current context analyzer (e.g. specified by `ANALYZER` function).
-
-`EXISTS(doc.someAttr, type)`
-
-Match documents where the **doc.someAttr** exists in the document
- and is of the specified type.
-
-- *doc.someAttr* - the path of the attribute to exist in the document
-- *analyzer* - string with the analyzer used, i.e. *"text_en"* or one of the
- other [available string analyzers](../../../Manual/Views/ArangoSearch/Analyzers.html)
-- *type* - data type as string; one of:
- - **bool**
- - **boolean**
- - **numeric**
- - **null**
- - **string**
-
-In case if **analyzer** isn't specified, current context analyzer (e.g.
-specified by `ANALYZER` function) will be used.
-
-### PHRASE()
-
-```
-PHRASE(doc.someAttr,
- phrasePart [, skipTokens] [, phrasePart | , phrasePart, skipTokens]*
- [, analyzer])
-```
-
-Search for a phrase in the referenced attributes.
-
-The phrase can be expressed as an arbitrary number of *phraseParts* separated by
-*skipToken* number of tokens.
-
-- *doc.someAttr* - the path of the attribute to compare against in the document
-- *phrasePart* - a string to search in the token stream; may consist of several
- words; will be split using the specified *analyzer*
-- *skipTokens* number of words or tokens to treat as wildcards
-- *analyzer* - string with the analyzer used, i.e. *"text_en"* or one of the
- other [available string analyzers
- ](../../../Manual/Views/ArangoSearch/Analyzers.html)
-
-For example, given a document `doc` containing the text `"Lorem ipsum dolor sit
-amet, consectetur adipiscing elit"`, the following expression will be `true`:
-
-```js
-PHRASE(doc.text, "ipsum", 1, "sit", 2, "adipiscing", "text_de")
-```
-
-Specifying deep attributes like `doc.some.deep.attr` is also allowed. The
-attribute has to be processed by the view as specified in the link.
-
-### STARTS_WITH()
-
-`STARTS_WITH(doc.someAttr, prefix)`
-
-Match the value of the **doc.someAttr** that starts with **prefix**
-
-- *doc.someAttr* - the path of the attribute to compare against in the document
-- *prefix* - a string to search at the start of the text
-
-Specifying deep attributes like `doc.some.deep.attr` is also allowed. The
-attribute has to be processed by the view as specified in the link.
-
-### TOKENS()
-
-`TOKENS(input, analyzer)`
-
-Split the **input** string with the help of the specified **analyzer** into an
-Array. The resulting Array can i.e. be used in subsequent `FILTER` or `SEARCH`
-statements with the **IN** operator. This can be used to better understand how
-the specific analyzer is going to behave.
-- *input* string to tokenize
-- *analyzer* one of the [available string_analyzers](../../../Manual/Views/ArangoSearch/Analyzers.html)
-
-### MIN_MATCH()
-
-`MIN_MATCH(searchExpression [, searchExpression]*, minMatchCount)`
-
-Match documents where at least **minMatchCount** of the specified
-**searchExpression**s are satisfied.
-
-- *searchExpression* - any valid search expression
-- *minMatchCount* - minimum number of *searchExpression*s that should be
- satisfied
-
-For example,
-
-```js
-MIN_MATCH(doc.text == 'quick', doc.text == 'brown', doc.text == 'fox', 2)
-```
-
-if `doc.text`, as analyzed by the current analyzer, contains 2 out of 'quick',
-'brown' and 'fox', it will be included as matched one.
-
-### Searching examples
-
-to match documents which have a 'name' attribute
-
- FOR doc IN someView SEARCH EXISTS(doc.name)
- RETURN doc
-
-or
-
- FOR doc IN someView SEARCH EXISTS(doc['name'])
- RETURN doc
-
-to match documents where 'body' was analyzed via the 'text_en' analyzer
-
- FOR doc IN someView SEARCH EXISTS(doc.body, 'analyzer', 'text_en')
- RETURN doc
-
-or
-
- FOR doc IN someView SEARCH EXISTS(doc['body'], 'analyzer', 'text_en')
- RETURN doc
-
-or
-
- FOR doc IN someView SEARCH ANALYZER(EXISTS(doc['body'], 'analyzer'), 'text_en')
- RETURN doc
-
-to match documents which have an 'age' attribute of type number
-
- FOR doc IN someView SEARCH EXISTS(doc.age, 'numeric')
- RETURN doc
-
-or
-
- FOR doc IN someView SEARCH EXISTS(doc['age'], 'numeric')
- RETURN doc
-
-to match documents where 'description' contains word 'quick' or word
-'brown' and has been analyzed with 'text_en' analyzer
-
- FOR doc IN someView SEARCH ANALYZER(doc.description == 'quick' OR doc.description == 'brown', 'text_en')
- RETURN doc
-
-to match documents where 'description' contains at least 2 of 3 words 'quick',
-'brown', 'fox' and has been analyzed with 'text_en' analyzer
-
- FOR doc IN someView SEARCH ANALYZER(
- MIN_MATCH(doc.description == 'quick', doc.description == 'brown', doc.description == 'fox', 2),
- 'text_en'
- )
- RETURN doc
-
-to match documents where 'description' contains a phrase 'quick brown'
-
- FOR doc IN someView SEARCH PHRASE(doc.description, [ 'quick brown' ], 'text_en')
- RETURN doc
-
-or
-
- FOR doc IN someView SEARCH PHRASE(doc['description'], [ 'quick brown' ], 'text_en')
- RETURN doc
-
-or
-
- FOR doc IN someView SEARCH ANALYZER(PHRASE(doc['description'], [ 'quick brown' ]), 'text_en')
- RETURN doc
-
-to match documents where 'body' contains the phrase consisting of a sequence
-like this:
-'quick' * 'fox jumps' (where the asterisk can be any single word)
-
- FOR doc IN someView SEARCH PHRASE(doc.body, [ 'quick', 1, 'fox jumps' ], 'text_en')
- RETURN doc
-
-or
-
- FOR doc IN someView SEARCH PHRASE(doc['body'], [ 'quick', 1, 'fox jumps' ], 'text_en')
- RETURN doc
-
-or
-
- FOR doc IN someView SEARCH ANALYZER(PHRASE(doc['body'], [ 'quick', 1, 'fox jumps' ]), 'text_en')
- RETURN doc
-
-to match documents where 'story' starts with 'In the beginning'
-
- FOR doc IN someView SEARCH STARTS_WITH(doc.story, 'In the beginning')
- RETURN DOC
-
-or
-
- FOR doc IN someView SEARCH STARTS_WITH(doc['story'], 'In the beginning')
- RETURN DOC
-
-to watch the analyzer doing its work
-
- RETURN TOKENS('a quick brown fox', 'text_en')
-
-to match documents where 'description' best matches 'a quick brown fox'
-
- FOR doc IN someView SEARCH ANALYZER(doc.description IN TOKENS('a quick brown fox', 'text_en'), 'text_en')
- RETURN doc
-
-ArangoSearch sorting
---------------------
-
-A major feature of ArangoSearch Views is their capability of sorting results
-based on the creation-time search conditions and zero or more sorting functions.
-The ArangoSearch sorting functions available are `TFIDF()` and `BM25()`.
-
-Note: The first argument to any ArangoSearch sorting function is _always_ the
-document emitted by a `FOR` operation over an ArangoSearch View.
-
-Note: An ArangoSearch sorting function is _only_ allowed as an argument to a
-`SORT` operation. But they can be mixed with other arguments to `SORT`.
-
-So the following examples are valid:
-
-```js
-FOR doc IN someView
- SORT TFIDF(doc)
-```
-
-```js
-FOR a IN viewA
- FOR b IN viewB
- SORT BM25(a), TFIDF(b)
-```
-
-```js
-FOR a IN viewA
- FOR c IN someCollection
- FOR b IN viewB
- SORT TFIDF(b), c.name, BM25(a)
-```
-
-while these will _not_ work:
-
-```js
-FOR doc IN someCollection
- SORT TFIDF(doc) // !!! Error
-```
-```js
-FOR doc IN someCollection
- RETURN BM25(doc) // !!! Error
-```
-```js
-FOR doc IN someCollection
- SORT BM25(doc.someAttr) // !!! Error
-```
-```js
-FOR doc IN someView
- SORT TFIDF("someString") // !!! Error
-```
-```js
-FOR doc IN someView
- SORT BM25({some: obj}) // !!! Error
-```
-
-The following sorting methods are available:
-
-### Literal sorting
-You can sort documents by simply specifying arbitrary values or expressions, as
-you do in other places.
-
-### BM25()
-
-`BM25(doc, k, b)`
-
-- *k* (number, _optional_): calibrates the text term frequency scaling, the default is
-_1.2_. A *k* value of _0_ corresponds to a binary model (no term frequency), and a large
-value corresponds to using raw term frequency
-- *b* (number, _optional_): determines the scaling by the total text length, the default
-is _0.75_. At the extreme values of the coefficient *b*, BM25 turns into ranking
-functions known as BM11 (for *b* = `1`, corresponds to fully scaling the term weight by
-the total text length) and BM15 (for *b* = `0`, corresponds to no length normalization)
-
-Sorts documents using the [**Best Matching 25** algorithm](https://en.wikipedia.org/wiki/Okapi_BM25).
-See the [`BM25()` section in ArangoSearch Scorers](../../../Manual/Views/ArangoSearch/Scorers.html)
-for details.
-
-### TFIDF()
-
-`TFIDF(doc, withNorms)`
-
-- *doc* (document): must be emitted by `FOR doc IN someView`
-- *withNorms* (bool, _optional_): specifying whether scores should be
- normalized, the default is _false_
-
-Sorts documents using the
-[**term frequency–inverse document frequency** algorithm](https://en.wikipedia.org/wiki/TF-IDF).
-See the
-[`TFIDF()` section in ArangoSearch Scorers](../../../Manual/Views/ArangoSearch/Scorers.html)
-for details.
-
-
-### Sorting examples
-
-to sort documents by the value of the 'name' attribute
-
- FOR doc IN someView
- SORT doc.name
- RETURN doc
-
-or
-
- FOR doc IN someView
- SORT doc['name']
- RETURN doc
-
-to sort documents via the
-[BM25 algorithm](https://en.wikipedia.org/wiki/Okapi_BM25)
-
- FOR doc IN someView
- SORT BM25(doc)
- RETURN doc
-
-to sort documents via the
-[BM25 algorithm](https://en.wikipedia.org/wiki/Okapi_BM25)
-with 'k' = 1.2 and 'b' = 0.75
-
- FOR doc IN someView
- SORT BM25(doc, 1.2, 0.75)
- RETURN doc
-
-to sort documents via the
-[TFIDF algorithm](https://en.wikipedia.org/wiki/TF-IDF)
-
- FOR doc IN someView
- SORT TFIDF(doc)
- RETURN doc
-
-to sort documents via the
-[TFIDF algorithm](https://en.wikipedia.org/wiki/TF-IDF) with norms
-
- FOR doc IN someView
- SORT TFIDF(doc, true)
- RETURN doc
-
-to sort documents by value of 'name' and then by the
-[TFIDF algorithm](https://en.wikipedia.org/wiki/TF-IDF) where 'name' values are
-equivalent
-
- FOR doc IN someView
- SORT doc.name, TFIDF(doc)
- RETURN doc
-
-
-Use cases
----------
-
-### Prefix search
-
-The data contained in our view looks like that:
-
-```json
-{ "id": 1, "body": "ThisIsAVeryLongWord" }
-{ "id": 2, "body": "ThisIsNotSoLong" }
-{ "id": 3, "body": "ThisIsShorter" }
-{ "id": 4, "body": "ThisIs" }
-{ "id": 5, "body": "ButNotThis" }
-```
-
-We now want to search for documents where the attribute `body` starts with "ThisIs",
-
-A simple AQL query executing this prefix search:
-
- FOR doc IN someView SEARCH STARTS_WITH(doc.body, 'ThisIs')
- RETURN doc
-
-It will find the documents with the ids `1`, `2`, `3`, `4`, but not `5`.
diff --git a/Documentation/Books/AQL/Views/README.md b/Documentation/Books/AQL/Views/README.md
deleted file mode 100644
index 1e8f19d78db3..000000000000
--- a/Documentation/Books/AQL/Views/README.md
+++ /dev/null
@@ -1,37 +0,0 @@
-Views in AQL
-============
-
-Conceptually a **view** is just another document data source, similar to an
-array or a document/edge collection, e.g.:
-
-```js
-FOR doc IN exampleView SEARCH ...
- FILTER ...
- SORT ...
- RETURN ...
-```
-
-Other than collections, views have an additional but optional `SEARCH` keyword:
-
-```js
-FOR doc IN exampleView
- SEARCH ...
- FILTER ...
- SORT ...
- RETURN ...
-```
-
-A view is meant to be an abstraction over a transformation applied to documents
-of zero or more collections. The transformation is view-implementation specific
-and may even be as simple as an identity transformation thus making the view
-represent all documents available in the specified set of collections.
-
-Views can be defined and administered on a per view-type basis via
-the [web interface](../../Manual/Programs/WebInterface/index.html).
-
-Currently there is a single supported view implementation, namely
-`arangosearch` as described in [ArangoSearch View](ArangoSearch/README.md).
-
-Also see the detailed
-[ArangoSearch tutorial](https://www.arangodb.com/tutorials/arangosearch/)
-to learn more.
diff --git a/Documentation/Books/AQL/book.json b/Documentation/Books/AQL/book.json
deleted file mode 100644
index d6e5303842ba..000000000000
--- a/Documentation/Books/AQL/book.json
+++ /dev/null
@@ -1,56 +0,0 @@
-{
- "gitbook": "^3.2.2",
- "title": "ArangoDB VERSION_NUMBER AQL Documentation",
- "version": "VERSION_NUMBER",
- "author": "ArangoDB GmbH",
- "description": "Official AQL manual for ArangoDB - the native multi-model NoSQL database",
- "language": "en",
- "plugins": [
- "-search",
- "-lunr",
- "-sharing",
- "toggle-chapters",
- "addcssjs",
- "anchorjs",
- "sitemap-general@git+https://github.com/Simran-B/gitbook-plugin-sitemap-general.git",
- "ga",
- "callouts@git+https://github.com/Simran-B/gitbook-plugin-callouts.git",
- "edit-link",
- "page-toc@git+https://github.com/Simran-B/gitbook-plugin-page-toc.git",
- "localized-footer"
- ],
- "pdf": {
- "fontSize": 12,
- "toc": true,
- "margin": {
- "right": 60,
- "left": 60,
- "top": 35,
- "bottom": 35
- }
- },
- "styles": {
- "website": "styles/website.css"
- },
- "pluginsConfig": {
- "addcssjs": {
- "js": ["styles/header.js", "styles/hs.js"],
- "css": ["styles/header.css"]
- },
- "sitemap-general": {
- "prefix": "https://docs.arangodb.com/devel/AQL/",
- "changefreq": "@GCHANGE_FREQ@",
- "priority": @GPRIORITY@
- },
- "ga": {
- "token": "UA-81053435-2"
- },
- "edit-link": {
- "base": "https://github.com/arangodb/arangodb/edit/devel/Documentation/Books/AQL",
- "label": "Edit Page"
- },
- "localized-footer": {
- "filename": "FOOTER.html"
- }
- }
-}
diff --git a/Documentation/Books/AQL/styles/header.css b/Documentation/Books/AQL/styles/header.css
deleted file mode 100644
index 4ec87c77b0e5..000000000000
--- a/Documentation/Books/AQL/styles/header.css
+++ /dev/null
@@ -1,305 +0,0 @@
-/* Design fix because of the header */
-@import url(https://fonts.googleapis.com/css?family=Roboto:400,500,300,700);
-
-body {
- overflow: hidden;
- font-family: Roboto, Helvetica, sans-serif;
- background: #444444;
-}
-
-.book .book-header h1 a, .book .book-header h1 a:hover {
- display: none;
-}
-
-/* GOOGLE START */
-
-.google-search #gsc-iw-id1{
- border: none !important;
-}
-
-.google-search .gsst_b {
- position: relative;
- top: 10px;
- left: -25px;
- width: 1px;
-}
-
-.gsst_a .gscb_a {
- color: #c01a07 !important;
-}
-
-.google-search input {
- background-color: #fff !important;
- font-family: Roboto, Helvetica, sans-serif;
- font-size: 10pt !important;
- padding-left: 5px !important;
- float: right;
- position: relative;
- top: 8px;
- width: 100% !important;
- height: 30px !important;
-}
-
-.google-search input:active {
-}
-
-.google-search {
- margin-right: 10px;
- margin-left: 10px !important;
- float: right !important;
-}
-
-.google-search td,
-.google-search table,
-.google-search tr,
-.google-search th {
- background-color: #444444 !important;
-}
-
-.google-search .gsc-input-box,
-.google-search .gsc-input-box input {
- border-radius: 3px !important;
- width: 200px;
-}
-
-.gsc-branding-text,
-.gsc-branding-img,
-.gsc-user-defined-text {
- display: none !important;
-}
-
-.google-search .gsc-input-box input {
- font-size: 16px !important;
-}
-
-.google-search .gsc-search-button {
- display: none !important;
-}
-
-.google-search .gsc-control-cse {
- padding: 10px !important;
-}
-
-.google-search > div {
- float: left !important;
- width: 200px !important;
-}
-
-/* GOOGLE END */
-
-.book-summary,
-.book-body {
- margin-top: 48px;
-}
-
-.arangodb-logo, .arangodb-logo-small {
- display: inline;
- float: left;
- padding-top: 12px;
- margin-left: 10px;
-}
-
-.arangodb-logo img {
- height: 23px;
-}
-
-.arangodb-logo-small {
- display: none;
-}
-
-.arangodb-version-switcher {
- width: 65px;
- height: 44px;
- margin-left: 16px;
- float: left;
- display: inline;
- font-weight: bold;
- color: #fff;
- background-color: inherit;
- border: 0;
-}
-
-.arangodb-version-switcher option {
- background-color: white;
- color: black;
-}
-
-
-.arangodb-header {
- position: fixed;
- width: 100%;
- height: 48px;
- z-index: 1;
-}
-
-.arangodb-header .socialIcons-googlegroups a img {
- position: relative;
- height: 14px;
- top: 3px;
-}
-
-.arangodb-navmenu {
- display: block;
- float: right;
- margin: 0;
- padding: 0;
-}
-
-.arangodb-navmenu li {
- display: block;
- float: left;
-}
-
-.arangodb-navmenu li a {
- display: block;
- float: left;
- padding: 0 10px;
- line-height: 48px;
- font-size: 16px;
- font-weight: 400;
- color: #fff;
- text-decoration: none;
- font-family: Roboto, Helvetica, sans-serif;
-}
-
-.arangodb-navmenu li.active-tab a, .arangodb-navmenu li a:hover {
- background-color: #88A049 !important;
-}
-
-.downloadIcon {
- margin-right: 10px;
-}
-
-/** simple responsive updates **/
-
-@media screen and (max-width: 1000px) {
- .arangodb-navmenu li a {
- padding: 0 6px;
- }
-
- .arangodb-logo {
- margin-left: 10px;
- }
-
- .google-search {
- margin-right: 5px !important;
- }
-
- .downloadIcon {
- margin-right: 0;
- }
-
- .socialIcons {
- display: none !important;
- }
-}
-
-
-@media screen and (max-width: 800px) {
-
- .google-search,
- .google-search .gsc-input-box,
- .google-search .gsc-input-box input {
- width: 130px !important;
- }
-
- .arangodb-navmenu li a {
- font-size: 15px;
- padding: 0 7px;
- }
-
- .arangodb-logo {
- display: none;
- }
-
- .arangodb-logo-small {
- display: inline;
- margin-left: 10px;
- }
-
- .arangodb-logo-small img {
- height: 20px;
- }
-
- .arangodb-version-switcher {
- margin: 0;
- }
-
-}
-
-@media screen and (max-width: 600px) {
- .arangodb-navmenu li a {
- font-size: 15px;
- padding: 0 7px;
- }
-
- .arangodb-version-switcher,
- .downloadIcon {
- display: none !important;
- }
-
- .google-search,
- .google-search .gsc-input-box,
- .google-search .gsc-input-box input {
- width: 24px !important;
- }
-
- .google-search .gsc-input-box input[style] {
- background: url(https://docs.arangodb.com/assets/searchIcon.png) left center no-repeat rgb(255, 255, 255) !important;
- }
-
- .google-search .gsc-input-box input:focus {
- width: 200px !important;
- position: relative;
- left: -176px;
- background-position: -9999px -9999px !important;
- }
-
-}
-
-@media screen and (max-width: 400px) {
- .arangodb-navmenu li a {
- font-size: 13px;
- padding: 0 5px;
- }
- .google-search {
- display: none;
- }
-}
-
-/*Hubspot Cookie notice */
-
-body div#hs-eu-cookie-confirmation {
- bottom: 0;
- top: auto;
- position: fixed;
- text-align: center !important;
-}
-
-body div#hs-eu-cookie-confirmation.can-use-gradients {
- background-image: linear-gradient(to bottom, rgba(255,255,255,0.9),rgba(255,255,255,0.75));
-}
-
-body div#hs-eu-cookie-confirmation div#hs-eu-cookie-confirmation-inner {
- display: inline-block;
- padding: 15px 18px 0;
-}
-
-body div#hs-eu-cookie-confirmation div#hs-eu-cookie-confirmation-inner #hs-en-cookie-confirmation-buttons-area {
- float: left;
-}
-
-body div#hs-eu-cookie-confirmation div#hs-eu-cookie-confirmation-inner a#hs-eu-confirmation-button {
- background-color: #577138 !important;
- border: none !important;
- text-shadow: none !important;
- box-shadow: none;
- padding: 5px 15px !important;
- margin-left: 10px;
-}
-
-body div#hs-eu-cookie-confirmation div#hs-eu-cookie-confirmation-inner > p {
- float: left;
- color: #000 !important;
- text-shadow: none;
-}
diff --git a/Documentation/Books/AQL/styles/header.js b/Documentation/Books/AQL/styles/header.js
deleted file mode 100644
index b14877f2357c..000000000000
--- a/Documentation/Books/AQL/styles/header.js
+++ /dev/null
@@ -1,161 +0,0 @@
-// Try to set the version number early, jQuery not available yet
-var searcheable_versions = [@BROWSEABLE_VERSIONS@];
-var cx = '@GSEARCH_ID@';
-
-document.addEventListener("DOMContentLoaded", function(event) {
- if (!gitbook.state.root) return;
- var bookVersion = gitbook.state.root.match(/\/(\d\.\d(\.\d)?|devel)\//);
- var switcher = document.getElementsByClassName("arangodb-version-switcher")[0];
- if (bookVersion) {
- switcher.value = bookVersion[1];
- } else {
- switcher.style.display = "none";
- }
-});
-
-window.onload = function(){
-window.localStorage.removeItem(":keyword");
-
-$(document).ready(function() {
-
-function appendHeader() {
- var VERSION_SELECTOR = ""
- var i = 0;
- var prefix;
- for (i = 0; i < searcheable_versions.length; i++ ) {
- if (searcheable_versions[i] === 'devel') {
- prefix = '';
- } else {
- prefix = 'v';
- }
- VERSION_SELECTOR += '' + prefix +
- searcheable_versions[i] +
- ' \n';
- }
-
- var div = document.createElement('div');
- div.innerHTML = '\n';
-
- $('.book').before(div.innerHTML);
-
- };
-
-
- function rerenderNavbar() {
- $('#header').remove();
- appendHeader();
- };
-
- //render header
- rerenderNavbar();
- function addGoogleSrc() {
- var gcse = document.createElement('script');
- gcse.type = 'text/javascript';
- gcse.async = true;
- gcse.src = (document.location.protocol == 'https:' ? 'https:' : 'http:') +
- '//cse.google.com/cse.js?cx=' + cx;
- var s = document.getElementsByTagName('script')[0];
- s.parentNode.insertBefore(gcse, s);
- };
- addGoogleSrc();
-
- $(".arangodb-navmenu a[data-book]").on("click", function(e) {
- e.preventDefault();
- var urlSplit = gitbook.state.root.split("/");
- urlSplit.pop(); // ""
- urlSplit.pop(); // e.g. "Manual"
- window.location.href = urlSplit.join("/") + "/" + e.target.getAttribute("data-book") + "/index.html";
- });
-
- // set again using jQuery to accommodate non-standard browsers (*cough* IE *cough*)
- var bookVersion = gitbook.state.root.match(/\/(\d\.\d(\.\d)?|devel)\//);
- var switcher = $(".arangodb-version-switcher");
- if (bookVersion) {
- switcher.val(bookVersion[1]);
- } else {
- switcher.hide();
- }
-
- $(".arangodb-version-switcher").on("change", function(e) {
- var urlSplit = gitbook.state.root.split("/");
- urlSplit.pop(); // ""
- var currentBook = urlSplit.pop(); // e.g. "Manual"
- urlSplit.pop() // e.g. "3.0"
- if (e.target.value == "2.8") {
- var legacyMap = {
- "Manual": "",
- "AQL": "/Aql",
- "HTTP": "/HttpApi",
- "Cookbook": "/Cookbook"
- };
- currentBook = legacyMap[currentBook];
- } else {
- currentBook = "/" + currentBook;
- }
- window.location.href = urlSplit.join("/") + "/" + e.target.value + currentBook + "/index.html";
- });
-
-});
-
-};
diff --git a/Documentation/Books/AQL/styles/hs.js b/Documentation/Books/AQL/styles/hs.js
deleted file mode 100644
index 9a8ae18a61d2..000000000000
--- a/Documentation/Books/AQL/styles/hs.js
+++ /dev/null
@@ -1,33 +0,0 @@
-// HubSpot Script Loader. Please do not block this resource. See more: http://hubs.ly/H0702_H0
-
-(function (id, src, attrs) {
- if (document.getElementById(id)) {
- try { console.warn('duplicate hubspot script with id: "' + id + '" included on page'); }
- finally { return; }
- }
- var js = document.createElement('script');
- js.src = src;
- js.type = 'text/javascript';
- js.id = id;
- for (var name in attrs) { if(attrs.hasOwnProperty(name)) { js.setAttribute(name, attrs[name]); } }
- var e = document.getElementsByTagName('script')[0];
- e.parentNode.insertBefore(js, e);
-})('hubspot-messages-loader', 'https://js.usemessages.com/messageswidgetshell.js', {"data-loader":"hs-scriptloader","data-hsjs-portal":2482448,"data-hsjs-env":"prod"});
-
-(function (id, src) {
- if (document.getElementById(id)) { return; }
- var js = document.createElement('script');
- js.src = src;
- js.type = 'text/javascript';
- js.id = id;
- var e = document.getElementsByTagName('script')[0];
- e.parentNode.insertBefore(js, e);
-})('hs-analytics', '//js.hs-analytics.net/analytics/1508760300000/2482448.js');
-
-window.setTimeout(function () {
- $('body').on('click', 'a', function () {
- var _hsq = window._hsq = window._hsq || [];
- _hsq.push(['setPath', window.location.pathname]);
- _hsq.push(['trackPageView']);
- });
-}, 1000);
diff --git a/Documentation/Books/AQL/styles/website.css b/Documentation/Books/AQL/styles/website.css
deleted file mode 100644
index 0bbc2f1eff37..000000000000
--- a/Documentation/Books/AQL/styles/website.css
+++ /dev/null
@@ -1,84 +0,0 @@
-.markdown-section small {
- font-size: 80%;
-}
-.markdown-section sub, .markdown-section sup {
- font-size: 75%;
- line-height: 0;
- position: relative;
- vertical-align: baseline;
-}
-.markdown-section sup {
- top: -.5em;
-}
-.markdown-section sub {
- bottom: -.25em;
-}
-
-div.example_show_button {
- border: medium solid lightgray;
- text-align: center;
- position: relative;
- top: -10px;
- display: flex;
- justify-content: center;
-}
-
-.book .book-body .navigation.navigation-next {
- right: 10px !important;
-}
-
-.book .book-summary ul.summary li.active>a,.book .book-summary ul.summary li a:hover {
- color: #fff !important;
- background: #80A54D !important;
- text-decoration: none;
-}
-
-.book .book-body .page-wrapper .page-inner section.normal .deprecated{
- background-color: rgba(240,240,0,0.4);
-}
-
-.book .book-body section > ul li:last-child {
- margin-bottom: 0.85em;
-}
-
-.book .book-body .alert p:last-child {
- margin-bottom: 0;
-}
-
-.columns-3 {
- -webkit-column-count: 3;
- -moz-column-count: 3;
- -ms-column-count: 3;
- -o-column-count: 3;
- column-count: 3;
- columns: 3;
-}
-
-.localized-footer {
- opacity: 0.5;
-}
-
-.example-container {
- position: relative;
-}
-
-.example-container a.anchorjs-link {
- position: absolute;
- top: 10px;
- right: 10px;
- font: 1em/1 anchorjs-icons;
-}
-
-.gsib_a {
-padding: 0px !important;
-}
-
-.gsc-control-cse {
-border: 0px !important;
-background-color: transparent !important;
-}
-
-
-.gsc-input {
-margin: 0px !important;
-}
diff --git a/Documentation/Books/Cookbook/.gitkeep b/Documentation/Books/Cookbook/.gitkeep
new file mode 100644
index 000000000000..936ca3adc4e3
--- /dev/null
+++ b/Documentation/Books/Cookbook/.gitkeep
@@ -0,0 +1,5 @@
+Git can not track empty repositories.
+This file ensures that the directory is kept.
+
+Some of the old documentation building scripts are still
+used by the new system which copy files into this folder.
\ No newline at end of file
diff --git a/Documentation/Books/Cookbook/AQL/AvoidingInjection.md b/Documentation/Books/Cookbook/AQL/AvoidingInjection.md
deleted file mode 100644
index 9e1f45d509f1..000000000000
--- a/Documentation/Books/Cookbook/AQL/AvoidingInjection.md
+++ /dev/null
@@ -1,288 +0,0 @@
-Avoiding parameter injection in AQL
-===================================
-
-Problem
--------
-
-I don't want my AQL queries to be affected by parameter injection.
-
-### What is parameter injection?
-
-Parameter injection means that potentially content is inserted into a query,
-and that injection may change the meaning of the query. It is a security issue
-that may allow an attacker to execute arbitrary queries on the database data.
-
-It often occurs if applications trustfully insert user-provided inputs into a
-query string, and do not fully or incorrectly filter them. It also occurs often
-when applications build queries naively, without using security mechanisms often
-provided by database software or querying mechanisms.
-
-Parameter injection examples
-----------------------------
-
-Assembling query strings with simple string concatenation looks trivial,
-but is potentially unsafe. Let's start with a simple query that's fed with some
-dynamic input value, let's say from a web form. A client application or a Foxx
-route happily picks up the input value, and puts it into a query:
-
-```js
-/* evil ! */
-var what = req.params("searchValue"); /* user input value from web form */
-...
-var query = "FOR doc IN collection FILTER doc.value == " + what + " RETURN doc";
-db._query(query, params).toArray();
-```
-
-The above will probably work fine for numeric input values.
-
-What could an attacker do to this query? Here are a few suggestions to use for the
-`searchValue` parameter:
-
-- for returning all documents in the collection: `1 || true`
-- for removing all documents: `1 || true REMOVE doc IN collection //`
-- for inserting new documents: `1 || true INSERT { foo: "bar" } IN collection //`
-
-It should have become obvious that this is extremely unsafe and should be avoided.
-
-An pattern often seen to counteract this is trying to quote and escape potentially
-unsafe input values before putting them into query strings. This may work in some situations,
-but it's easy to overlook something or get it subtly wrong:
-
-```js
-/* we're sanitzing now, but it's still evil ! */
-var value = req.params("searchValue").replace(/'/g, '');
-...
-var query = "FOR doc IN collection FILTER doc.value == '" + value + "' RETURN doc";
-db._query(query, params).toArray();
-```
-
-The above example uses single quotes for enclosing the potentially unsafe user
-input, and also replaces all single quotes in the input value beforehand. Not only may
-that change the user input (leading to subtle errors such as *"why does my search for
-`O'Brien` don't return any results?"*), but it is also unsafe. If the user input contains
-a backslash at the end (e.g. `foo bar\`), that backslash will escape the closing single
-quote, allowing the user input to break out of the string fence again.
-
-It gets worse if user input is inserted into the query at multiple places. Let's assume
-we have a query with two dynamic values:
-
-```js
-query = "FOR doc IN collection FILTER doc.value == '" + value + "' && doc.type == '" + type + "' RETURN doc";
-```
-
-If an attacker inserted `\` for parameter `value` and ` || true REMOVE doc IN collection //` for
-parameter `type`, then the effective query would become
-
-```
-FOR doc IN collection FILTER doc.value == '\' && doc.type == ' || true REMOVE doc IN collection //' RETURN doc
-```
-
-which is highly undesirable.
-
-
-Solution
---------
-
-Instead of mixing query string fragments with user inputs naively via string
-concatenation, use either **bind parameters** or a **query builder**. Both can
-help to avoid the problem of injection, because they allow separating the actual
-query operations (like `FOR`, `INSERT`, `REMOVE`) from (user input) values.
-
-This recipe focuses on using bind parameters. This is not to say that query
-builders shouldn't be used. They were simply omitted here for the sake of simplicity.
-To get started with a using an AQL query builder in ArangoDB or other JavaScript
-environments, have a look at [aqb](https://www.npmjs.com/package/aqb) (which comes
-bundled with ArangoDB). Inside ArangoDB, there are also [Foxx queries](../../Manual/Foxx/index.html)
-which can be combined with aqb.
-
-### What bind parameters are
-
-Bind parameters in AQL queries are special tokens that act as placeholders for
-actual values. Here's an example:
-
-```
-FOR doc IN collection
- FILTER doc.value == @what
- RETURN doc
-```
-
-In the above query, `@what` is a bind parameter. In order to execute this query,
-a value for bind parameter `@what` must be specified. Otherwise query execution will
-fail with error 1551 (*no value specified for declared bind parameter*). If a value
-for `@what` gets specified, the query can be executed. However, the query string
-and the bind parameter values (i.e. the contents of the `@what` bind parameter) will
-be handled separately. What's in the bind parameter will always be treated as a value,
-and it can't get out of its sandbox and change the semantic meaning of a query.
-
-### How bind parameters are used
-
-To execute a query with bind parameters, the query string (containing the bind
-parameters) and the bind parameter values are specified separately (note that when
-the bind parameter value is assigned, the prefix `@` needs to be omitted):
-
-```js
-/* query string with bind parameter */
-var query = "FOR doc IN collection FILTER doc.value == @what RETURN doc";
-
-/* actual value for bind parameter */
-var params = { what: 42 };
-
-/* run query, specifying query string and bind parameter separately */
-db._query(query, params).toArray();
-```
-
-If a malicious user would set `@what` to a value of `1 || true`, this wouldn't do
-any harm. AQL would treat the contents of `@what` as a single string token, and
-the meaning of the query would remain unchanged. The actually executed query would be:
-
-```
-FOR doc IN collection
- FILTER doc.value == "1 || true"
- RETURN doc
-```
-
-Thanks to bind parameters it is also impossible to turn a selection (i.e. read-only)
-query into a data deletion query.
-
-### Using JavaScript variables as bind parameters
-
-There is also a template string generator function `aql` that can be used to safely
-(and conveniently) built AQL queries using JavaScript variables and expressions. It
-can be invoked as follows:
-
-```js
-const aql = require('@arangodb').aql; // not needed in arangosh
-
-var value = "some input value";
-var query = aql`FOR doc IN collection
- FILTER doc.value == ${value}
- RETURN doc`;
-var result = db._query(query).toArray();
-```
-
-Note that an ES6 template string is used for populating the `query` variable. The
-string is assembled using the `aql` generator function which is bundled with
-ArangoDB. The template string can contain references to JavaScript variables or
-expressions via `${...}`. In the above example, the query references a variable
-named `value`. The `aql` function generates an object with two separate
-attributes: the query string, containing references to bind parameters, and the actual
-bind parameter values.
-
-Bind parameter names are automatically generated by the `aql` function:
-
-```js
-var value = "some input value";
-aql`FOR doc IN collection FILTER doc.value == ${value} RETURN doc`;
-
-{
- "query" : "FOR doc IN collection FILTER doc.value == @value0 RETURN doc",
- "bindVars" : {
- "value0" : "some input value"
- }
-}
-```
-
-### Using bind parameters in dynamic queries
-
-Bind parameters are helpful, so it makes sense to use them for handling the dynamic values.
-You can even use them for queries that itself are highly dynamic, for example with conditional
-`FILTER` and `LIMIT` parts. Here's how to do this:
-
-```js
-/* note: this example has a slight issue... hang on reading */
-var query = "FOR doc IN collection";
-var params = { };
-
-if (useFilter) {
- query += " FILTER doc.value == @what";
- params.what = req.params("searchValue");
-}
-
-if (useLimit) {
- /* not quite right, see below */
- query += " LIMIT @offset, @count";
- params.offset = req.params("offset");
- params.count = req.params("count");
-}
-
-query += " RETURN doc";
-db._query(query, params).toArray();
-```
-
-Note that in this example we're back to string concatenation, but without the problem of
-the query being vulnerable to arbitrary modifications.
-
-### Input value validation and sanitation
-
-Still you should prefer to be paranoid, and try to detect invalid input values as early as
-possible, at least before executing a query with them. This is because some input parameters
-may affect the runtime behavior of queries negatively or, when modified, may lead to queries
-throwing runtime errors instead of returning valid results. This isn't something an attacker
-should deserve.
-
-`LIMIT` is a good example for this: if used with a single argument, the argument should
-be numeric. When `LIMIT` is given a string value, executing the query will fail. You
-may want to detect this early and don't return an HTTP 500 (as this would signal attackers
-that they were successful breaking your application).
-
-Another problem with `LIMIT` is that high `LIMIT` values are likely more expensive than low
-ones, and you may want to disallow using `LIMIT` values exceeding a certain threshold.
-
-Here's what you could do in such cases:
-
-```js
-var query = "FOR doc IN collection LIMIT @count RETURN doc";
-
-/* some default value for limit */
-var params = { count: 100 };
-
-if (useLimit) {
- var count = req.params("count");
-
- /* abort if value does not look like an integer */
- if (! preg_match(/^d+$/, count)) {
- throw "invalid count value!";
- }
-
- /* actually turn it into an integer */
- params.count = parseInt(count, 10); // turn into numeric value
-}
-
-if (params.count < 1 || params.count > 1000) {
- /* value is outside of accepted thresholds */
- throw "invalid count value!";
-}
-
-db._query(query, params).toArray();
-```
-
-This is a bit more complex, but that's a price you're likely willing to pay for a
-bit of extra safety. In reality you may want to use a framework for validation (such as
-[joi](https://www.npmjs.com/package/joi) which comes bundled with ArangoDB) instead
-of writing your own checks all over the place.
-
-### Bind parameter types
-
-There are two types of bind parameters in AQL:
-
-- bind parameters for values: those are prefixed with a single `@` in AQL queries, and
- are specified without the prefix when they get their value assigned. These bind parameters
- can contain any valid JSON value.
-
- Examples: `@what`, `@searchValue`
-
-- bind parameters for collections: these are prefixed with `@@` in AQL queries, and are
- replaced with the name of a collection. When the bind parameter value is assigned, the
- parameter itself must be specified with a single `@` prefix. Only string values are allowed
- for this type of bind parameters.
-
- Examples: `@@collection`
-
-The latter type of bind parameter is probably not used as often, and it should not be used
-together with user input. Otherwise users may freely determine on which collection your
-AQL queries will operate (note: this may be a valid use case, but normally it is extremely
-undesired).
-
-**Authors**: [Jan Steemann](https://github.com/jsteemann)
-
-**Tags**: #injection #aql #security
diff --git a/Documentation/Books/Cookbook/AQL/CreatingTestData.md b/Documentation/Books/Cookbook/AQL/CreatingTestData.md
deleted file mode 100644
index f7b338b1bf08..000000000000
--- a/Documentation/Books/Cookbook/AQL/CreatingTestData.md
+++ /dev/null
@@ -1,92 +0,0 @@
-Creating test data with AQL
-===========================
-
-Problem
--------
-
-I want to create some test documents.
-
-Solution
---------
-
-If you haven't yet created a collection to hold the documents, create one now using the
-ArangoShell:
-
-```js
-db._create("myCollection");
-```
-
-This has created a collection named *myCollection*.
-
-One of the easiest ways to fill a collection with test data is to use an AQL query that
-iterates over a range.
-
-Run the following AQL query from the **AQL editor** in the web interface to insert 1,000
-documents into the just created collection:
-
-```
-FOR i IN 1..1000
- INSERT { name: CONCAT("test", i) } IN myCollection
-```
-
-The number of documents to create can be modified easily be adjusting the range boundary
-values.
-
-To create more complex test data, adjust the AQL query!
-
-Let's say we also want a `status` attribute, and fill it with integer values between `1` to
-(including) `5`, with equal distribution. A good way to achieve this is to use the modulo
-operator (`%`):
-
-```
-FOR i IN 1..1000
- INSERT {
- name: CONCAT("test", i),
- status: 1 + (i % 5)
- } IN myCollection
-```
-
-To create pseudo-random values, use the `RAND()` function. It creates pseudo-random numbers
-between 0 and 1. Use some factor to scale the random numbers, and `FLOOR()` to convert the
-scaled number back to an integer.
-
-For example, the following query populates the `value` attribute with numbers between 100 and
-150 (including):
-
-```
-FOR i IN 1..1000
- INSERT {
- name: CONCAT("test", i),
- value: 100 + FLOOR(RAND() * (150 - 100 + 1))
- } IN myCollection
-```
-
-After the test data has been created, it is often helpful to verify it. The
-`RAND()` function is also a good candidate for retrieving a random sample of the documents in the
-collection. This query will retrieve 10 random documents:
-
-```
-FOR doc IN myCollection
- SORT RAND()
- LIMIT 10
- RETURN doc
-```
-
-The `COLLECT` clause is an easy mechanism to run an aggregate analysis on some attribute. Let's
-say we wanted to verify the data distribution inside the `status` attribute. In this case we
-could run:
-
-```
-FOR doc IN myCollection
- COLLECT value = doc.value WITH COUNT INTO count
- RETURN {
- value: value,
- count: count
- }
-```
-
-The above query will provide the number of documents per distinct `value`.
-
-**Author:** [Jan Steemann](https://github.com/jsteemann)
-
-**Tags**: #aql
diff --git a/Documentation/Books/Cookbook/AQL/DiffingDocuments.md b/Documentation/Books/Cookbook/AQL/DiffingDocuments.md
deleted file mode 100644
index 32741846d47d..000000000000
--- a/Documentation/Books/Cookbook/AQL/DiffingDocuments.md
+++ /dev/null
@@ -1,126 +0,0 @@
-Diffing Two Documents in AQL
-============================
-
-Problem
--------
-
-How to create a `diff` of documents in AQL
-
-Solution
---------
-
-Though there is no built-in AQL function to `diff` two documents, it is easily possible to build your own like in the following query:
-
-```
-/* input document 1*/
-LET doc1 = {
- "foo" : "bar",
- "a" : 1,
- "b" : 2
-}
-
-/* input document 2 */
-LET doc2 = {
- "foo" : "baz",
- "a" : 2,
- "c" : 3
-}
-
-/* collect attributes present in doc1, but missing in doc2 */
-LET missing = (
- FOR key IN ATTRIBUTES(doc1)
- FILTER ! HAS(doc2, key)
- RETURN {
- [ key ]: doc1[key]
- }
-)
-
-/* collect attributes present in both docs, but that have different values */
-LET changed = (
- FOR key IN ATTRIBUTES(doc1)
- FILTER HAS(doc2, key) && doc1[key] != doc2[key]
- RETURN {
- [ key ] : {
- old: doc1[key],
- new: doc2[key]
- }
- }
-)
-
-/* collect attributes present in doc2, but missing in doc1 */
-LET added = (
- FOR key IN ATTRIBUTES(doc2)
- FILTER ! HAS(doc1, key)
- RETURN {
- [ key ] : doc2[key]
- }
-)
-
-/* return final result */
-RETURN {
- "missing" : missing,
- "changed" : changed,
- "added" : added
-}
-```
-
-**Note**: The query may look a bit lengthy, but much of that is due to formatting. A more terse version can be found below.
-
-The above query will return a document with three attributes:
-
-- _missing_: Contains all attributes only present in first document (i.e. missing in second document)
-- _changed_: Contains all attributes present in both documents that have different values
-- _added_: Contains all attributes only present in second document (i.e. missing in first document)
-
-For the two example documents it will return:
-
-```json
-[
- {
- "missing" : [
- {
- "b" : 2
- }
- ],
- "changed" : [
- {
- "foo" : {
- "old" : "bar",
- "new" : "baz"
- }
- },
- {
- "a" : {
- "old" : 1,
- "new" : 2
- }
- }
- ],
- "added" : [
- {
- "c" : 3
- }
- ]
- }
-]
-```
-
-
-That output format was the first that came to my mind. It is of course possible to adjust the query so it produces a different output format.
-
-Following is a version of the same query that can be invoked from JavaScript easily. It passes the two documents as bind parameters and calls `db._query`. The query is now an one-liner (less readable but easier to copy&paste):
-
-```js
-bindVariables = {
- doc1 : { "foo" : "bar", "a" : 1, "b" : 2 },
- doc2 : { "foo" : "baz", "a" : 2, "c" : 3 }
-};
-
-query = "LET doc1 = @doc1, doc2 = @doc2, missing = (FOR key IN ATTRIBUTES(doc1) FILTER ! HAS(doc2, key) RETURN { [ key ]: doc1[key] }), changed = (FOR key IN ATTRIBUTES(doc1) FILTER HAS(doc2, key) && doc1[key] != doc2[key] RETURN { [ key ] : { old: doc1[key], new: doc2[key] } }), added = (FOR key IN ATTRIBUTES(doc2) FILTER ! HAS(doc1, key) RETURN { [ key ] : doc2[key] }) RETURN { missing : missing, changed : changed, added : added }";
-
-result = db._query(query, bindVariables).toArray();
-```
-
-**Author:** [Jan Steemann](https://github.com/jsteemann)
-
-**Tags**: #howto #aql
diff --git a/Documentation/Books/Cookbook/AQL/DynamicAttributeNames.md b/Documentation/Books/Cookbook/AQL/DynamicAttributeNames.md
deleted file mode 100644
index 16d47be27897..000000000000
--- a/Documentation/Books/Cookbook/AQL/DynamicAttributeNames.md
+++ /dev/null
@@ -1,197 +0,0 @@
-Using dynamic attribute names in AQL
-====================================
-
-Problem
--------
-
-I want an AQL query to return results with attribute names assembled by a function,
-or with a variable number of attributes.
-
-This will not work by specifying the result using a regular object literal, as object
-literals require the names and numbers of attributes to be fixed at query compile time.
-
-Solution
---------
-
-There are several solutions to getting dynamic attribute names to work.
-
-### Subquery solution
-
-A general solution is to let a subquery or another function to produce the dynamic
-attribute names, and finally pass them through the `ZIP()` function to create an object
-from them.
-
-Let's assume we want to process the following input documents:
-
-```json
-{ "name" : "test", "gender" : "f", "status" : "active", "type" : "user" }
-{ "name" : "dummy", "gender" : "m", "status" : "inactive", "type" : "unknown", "magicFlag" : 23 }
-```
-
-Let's also assume our goal for each of these documents is to return only the attribute
-names that contain the letter `a`, together with their respective values.
-
-To extract the attribute names and values from the original documents, we can use a subquery
-as follows:
-
-```
-LET documents = [
- { "name" : "test"," gender" : "f", "status" : "active", "type" : "user" },
- { "name" : "dummy", "gender" : "m", "status" : "inactive", "type" : "unknown", "magicFlag" : 23 }
-]
-
-FOR doc IN documents
- RETURN (
- FOR name IN ATTRIBUTES(doc)
- FILTER LIKE(name, '%a%')
- RETURN {
- name: name,
- value: doc[name]
- }
- )
-```
-
-The subquery will only let attribute names pass that contain the letter `a`. The results
-of the subquery are then made available to the main query and will be returned. But the
-attribute names in the result are still `name` and `value`, so we're not there yet.
-
-So let's also employ AQL's `ZIP()` function, which can create an object from two arrays:
-
-* the first parameter to `ZIP()` is an array with the attribute names
-* the second parameter to `ZIP()` is an array with the attribute values
-
-Instead of directly returning the subquery result, we first capture it in a variable, and
-pass the variable's `name` and `value` components into `ZIP()` like this:
-
-```
-LET documents = [
- { "name" : "test"," gender" : "f", "status" : "active", "type" : "user" },
- { "name" : "dummy", "gender" : "m", "status" : "inactive", "type" : "unknown", "magicFlag" : 23 }
-]
-
-FOR doc IN documents
- LET attributes = (
- FOR name IN ATTRIBUTES(doc)
- FILTER LIKE(name, '%a%')
- RETURN {
- name: name,
- value: doc[name]
- }
- )
- RETURN ZIP(attributes[*].name, attributes[*].value)
-```
-
-Note that we have to use the expansion operator (`[*]`) on `attributes` because `attributes`
-itself is an array, and we want either the `name` attribute or the `value` attribute of each
-of its members.
-
-To prove this is working, here is the above query's result:
-
-```json
-[
- {
- "name": "test",
- "status": "active"
- },
- {
- "name": "dummy",
- "status": "inactive",
- "magicFlag": 23
- }
-]
-```
-
-As can be seen, the two results have a different amount of result attributes. We can also
-make the result a bit more dynamic by prefixing each attribute with the value of the `name`
-attribute:
-
-```
-LET documents = [
- { "name" : "test"," gender" : "f", "status" : "active", "type" : "user" },
- { "name" : "dummy", "gender" : "m", "status" : "inactive", "type" : "unknown", "magicFlag" : 23 }
-]
-
-FOR doc IN documents
- LET attributes = (
- FOR name IN ATTRIBUTES(doc)
- FILTER LIKE(name, '%a%')
- RETURN {
- name: CONCAT(doc.name, '-', name),
- value: doc[name]
- }
- )
- RETURN ZIP(attributes[*].name, attributes[*].value)
-```
-
-That will give us document-specific attribute names like this:
-
-```json
-[
- {
- "test-name": "test",
- "test-status": "active"
- },
- {
- "dummy-name": "dummy",
- "dummy-status": "inactive",
- "dummy-magicFlag": 23
- }
-]
-```
-
-### Using expressions as attribute names (ArangoDB 2.5)
-
-If the number of dynamic attributes to return is known in advance, and only the attribute names
-need to be calculated using an expression, then there is another solution.
-
-ArangoDB 2.5 and higher allow using expressions instead of fixed attribute names in object literals.
-Using expressions as attribute names requires enclosing the expression in extra `[` and `]` to
-disambiguate them from regular, unquoted attribute names.
-
-Let's create a result that returns the original document data contained in a dynamically named
-attribute. We'll be using the expression `doc.type` for the attribute name. We'll also return
-some other attributes from the original documents, but prefix them with the documents' `_key`
-attribute values. For this we also need attribute name expressions.
-
-Here is a query showing how to do this. The attribute name expressions all required to be
-enclosed in `[` and `]` in order to make this work:
-
-```
-LET documents = [
- { "_key" : "3231748397810", "gender" : "f", "status" : "active", "type" : "user" },
- { "_key" : "3231754427122", "gender" : "m", "status" : "inactive", "type" : "unknown" }
-]
-
-FOR doc IN documents
- RETURN {
- [ doc.type ] : {
- [ CONCAT(doc._key, "_gender") ] : doc.gender,
- [ CONCAT(doc._key, "_status") ] : doc.status
- }
- }
-```
-
-This will return:
-
-```json
-[
- {
- "user": {
- "3231748397810_gender": "f",
- "3231748397810_status": "active"
- }
- },
- {
- "unknown": {
- "3231754427122_gender": "m",
- "3231754427122_status": "inactive"
- }
- }
-]
-```
-
-Note: attribute name expressions and regular, unquoted attribute names can be mixed.
-
-**Author:** [Jan Steemann](https://github.com/jsteemann)
-
-**Tags**: #aql
diff --git a/Documentation/Books/Cookbook/AQL/Joins.md b/Documentation/Books/Cookbook/AQL/Joins.md
deleted file mode 100644
index ac19c048b83f..000000000000
--- a/Documentation/Books/Cookbook/AQL/Joins.md
+++ /dev/null
@@ -1,582 +0,0 @@
-Using Joins in AQL
-==================
-
-Problem
--------
-
-I want to join documents from collections in an AQL query.
-
-- One-to-Many: I have a collection users and a collection cities. A user lives in a city and I need the city information during the query.
-
-- Many-To-Many: I have a collection authors and books. An author can write many
- books and a book can have many authors. I want to return a list of books with
- their authors. Therefore I need to join the authors and books.
-
-Solution
---------
-
-Unlike many NoSQL databases, ArangoDB does support joins in AQL queries. This is
-similar to the way traditional relational databases handle this. However,
-because documents allow for more flexibility, joins are also more flexible. The
-following sections provide solutions for common questions.
-
-### One-To-Many
-
-You have a collection called users. Users live in city and a city is identified
-by its primary key. In principle you can embedded the city document into the users
-document and be happy with it.
-
-```json
-{
- "_id" : "users/2151975421",
- "_key" : "2151975421",
- "_rev" : "2151975421",
- "name" : {
- "first" : "John",
- "last" : "Doe"
- },
- "city" : {
- "name" : "Metropolis"
- }
-}
-```
-
-This works well for many use cases. Now assume, that you have additional
-information about the city, like the number of people living in it. It would be
-impractical to change each and every user document if this numbers
-changes. Therefore it is good idea to hold the city information in a separate
-collection.
-
-```json
-arangosh> db.cities.document("cities/2241300989");
-{
- "population" : 1000,
- "name" : "Metropolis",
- "_id" : "cities/2241300989",
- "_rev" : "2241300989",
- "_key" : "2241300989"
-}
-```
-
-Now you instead of embedding the city directly in the user document, you can use
-the key of the city.
-
-```json
-arangosh> db.users.document("users/2290649597");
-{
- "name" : {
- "first" : "John",
- "last" : "Doe"
- },
- "city" : "cities/2241300989",
- "_id" : "users/2290649597",
- "_rev" : "2290649597",
- "_key" : "2290649597"
-}
-```
-
-We can now join these two collections very easily.
-
-```json
-arangosh> db._query(
-........>"FOR u IN users " +
-........>" FOR c IN cities " +
-........>" FILTER u.city == c._id RETURN { user: u, city: c }"
-........>).toArray()
-[
- {
- "user" : {
- "name" : {
- "first" : "John",
- "last" : "Doe"
- },
- "city" : "cities/2241300989",
- "_id" : "users/2290649597",
- "_rev" : "2290649597",
- "_key" : "2290649597"
- },
- "city" : {
- "population" : 1000,
- "name" : "Metropolis",
- "_id" : "cities/2241300989",
- "_rev" : "2241300989",
- "_key" : "2241300989"
- }
- }
-]
-```
-
-Unlike SQL there is no special JOIN keyword. The optimizer ensures that the
-primary index is used in the above query.
-
-However, very often it is much more convenient for the client of the query if a
-single document would be returned, where the city information is embedded in the
-user document - as in the simple example above. With AQL there you do not need
-to forgo this simplification.
-
-```json
-arangosh> db._query(
-........>"FOR u IN users " +
-........>" FOR c IN cities " +
-........>" FILTER u.city == c._id RETURN merge(u, {city: c})"
-........>).toArray()
-[
- {
- "_id" : "users/2290649597",
- "_key" : "2290649597",
- "_rev" : "2290649597",
- "name" : {
- "first" : "John",
- "last" : "Doe"
- },
- "city" : {
- "_id" : "cities/2241300989",
- "_key" : "2241300989",
- "_rev" : "2241300989",
- "population" : 1000,
- "name" : "Metropolis"
- }
- }
-]
-```
-
-So you can have both: the convenient representation of the result for your client
-and the flexibility of joins for your data model.
-
-### Many-To-Many
-
-In the relational word you need a third table to model the many-to-many
-relation. In ArangoDB you have a choice depending on the information you are
-going to store and the type of questions you are going to ask.
-
-Assume that authors are stored in one collection and books in a second. If all
-you need is "which are the authors of a book" then you can
-easily model this as a list attribute in users.
-
-If you want to store more information, for example which author wrote which page in a conference proceeding, or if you also want to know "which books were written by which author", you can use edge collections. This is very similar to the "join table" from the relational world.
-
-#### Embedded Lists
-
-If you only want to store the authors of a book, you can embed them as list in the book document. There is no need for a separate collection.
-
-```json
-arangosh> db.authors.toArray()
-[
- {
- "_id" : "authors/2661190141",
- "_key" : "2661190141",
- "_rev" : "2661190141",
- "name" : {
- "first" : "Maxima",
- "last" : "Musterfrau"
- }
- },
- {
- "_id" : "authors/2658437629",
- "_key" : "2658437629",
- "_rev" : "2658437629",
- "name" : {
- "first" : "John",
- "last" : "Doe"
- }
- }
-]
-```
-
-You can query books
-
-```json
-arangosh> db._query("FOR b IN books RETURN b").toArray();
-[
- {
- "_id" : "books/2681506301",
- "_key" : "2681506301",
- "_rev" : "2681506301",
- "title" : "The beauty of JOINS",
- "authors" : [
- "authors/2661190141",
- "authors/2658437629"
- ]
- }
-]
-```
-
-and join the authors in a very similar manner given in the one-to-many section.
-
-```json
-arangosh> db._query(
-........>"FOR b IN books " +
-........>" LET a = (FOR x IN b.authors " +
-........>" FOR a IN authors FILTER x == a._id RETURN a) " +
-........>" RETURN { book: b, authors: a }"
-........>).toArray();
-[
- {
- "book" : {
- "title" : "The beauty of JOINS",
- "authors" : [
- "authors/2661190141",
- "authors/2658437629"
- ],
- "_id" : "books/2681506301",
- "_rev" : "2681506301",
- "_key" : "2681506301"
- },
- "authors" : [
- {
- "name" : {
- "first" : "Maxima",
- "last" : "Musterfrau"
- },
- "_id" : "authors/2661190141",
- "_rev" : "2661190141",
- "_key" : "2661190141"
- },
- {
- "name" : {
- "first" : "John",
- "last" : "Doe"
- },
- "_id" : "authors/2658437629",
- "_rev" : "2658437629",
- "_key" : "2658437629"
- }
- ]
- }
-]
-```
-
-or embed the authors directly
-
-```json
-arangosh> db._query(
-........>"FOR b IN books LET a = (" +
-........>" FOR x IN b.authors " +
-........>" FOR a IN authors FILTER x == a._id RETURN a)" +
-........>" RETURN merge(b, { authors: a })"
-........>).toArray();
-[
- {
- "_id" : "books/2681506301",
- "_key" : "2681506301",
- "_rev" : "2681506301",
- "title" : "The beauty of JOINS",
- "authors" : [
- {
- "_id" : "authors/2661190141",
- "_key" : "2661190141",
- "_rev" : "2661190141",
- "name" : {
- "first" : "Maxima",
- "last" : "Musterfrau"
- }
- },
- {
- "_id" : "authors/2658437629",
- "_key" : "2658437629",
- "_rev" : "2658437629",
- "name" : {
- "first" : "John",
- "last" : "Doe"
- }
- }
- ]
- }
-]
-```
-
-#### Using Edge Collections
-
-If you also want to query which books are written by a given author, embedding authors
-in the book document is possible, but it is more efficient to use a edge collections for
-speed.
-
-Or you are publishing a proceeding, then you want to store the pages the author has written
-as well. This information can be stored in the edge document.
-
-First create the users
-
-```json
-arangosh> db._create("authors");
-[ArangoCollection 2926807549, "authors" (type document, status loaded)]
-
-arangosh> db.authors.save({ name: { first: "John", last: "Doe" } })
-{
- "error" : false,
- "_id" : "authors/2935261693",
- "_rev" : "2935261693",
- "_key" : "2935261693"
-}
-
-arangosh> db.authors.save({ name: { first: "Maxima", last: "Musterfrau" } })
-{
- "error" : false,
- "_id" : "authors/2938210813",
- "_rev" : "2938210813",
- "_key" : "2938210813"
-}
-```
-
-Now create the books without any author information.
-
-```json
-arangosh> db._create("books");
-[ArangoCollection 2928380413, "books" (type document, status loaded)]
-
-arangosh> db.books.save({ title: "The beauty of JOINS" });
-{
- "error" : false,
- "_id" : "books/2980088317",
- "_rev" : "2980088317",
- "_key" : "2980088317"
-}
-```
-
-An edge collection is now used to link authors and books.
-
-```json
-arangosh> db._createEdgeCollection("written");
-[ArangoCollection 2931132925, "written" (type edge, status loaded)]
-
-arangosh> db.written.save("authors/2935261693",
-........>"books/2980088317",
-........>{ pages: "1-10" })
-{
- "error" : false,
- "_id" : "written/3006237181",
- "_rev" : "3006237181",
- "_key" : "3006237181"
-}
-
-arangosh> db.written.save("authors/2938210813",
-........>"books/2980088317",
-........>{ pages: "11-20" })
-{
- "error" : false,
- "_id" : "written/3012856317",
- "_rev" : "3012856317",
- "_key" : "3012856317"
-}
-```
-
-In order to get all books with their authors you can use a [graph
-traversal](../../AQL/Graphs/Traversals.html#working-with-collection-sets)
-
-```json
-arangosh> db._query(
-...> "FOR b IN books " +
-...> "LET authorsByBook = ( " +
-...> " FOR author, writtenBy IN INBOUND b written " +
-...> " RETURN { " +
-...> " vertex: author, " +
-...> " edge: writtenBy " +
-...> " } " +
-...> ") " +
-...> "RETURN { " +
-...> " book: b, " +
-...> " authors: authorsByBook " +
-...> "} "
-...> ).toArray();
-[
- {
- "book" : {
- "_key" : "2980088317",
- "_id" : "books/2980088317",
- "_rev" : "2980088317",
- "title" : "The beauty of JOINS"
- },
- "authors" : [
- {
- "vertex" : {
- "_key" : "2935261693",
- "_id" : "authors/2935261693",
- "_rev" : "2935261693",
- "name" : {
- "first" : "John",
- "last" : "Doe"
- }
- },
- "edge" : {
- "_key" : "2935261693",
- "_id" : "written/2935261693",
- "_from" : "authors/2935261693",
- "_to" : "books/2980088317",
- "_rev" : "3006237181",
- "pages" : "1-10"
- }
- },
- {
- "vertex" : {
- "_key" : "2938210813",
- "_id" : "authors/2938210813",
- "_rev" : "2938210813",
- "name" : {
- "first" : "Maxima",
- "last" : "Musterfrau"
- }
- },
- "edge" : {
- "_key" : "6833274",
- "_id" : "written/6833274",
- "_from" : "authors/2938210813",
- "_to" : "books/2980088317",
- "_rev" : "3012856317",
- "pages" : "11-20"
- }
- }
- ]
- }
-]
-```
-
-Or if you want only the information stored in the vertices.
-
-```json
-arangosh> db._query(
-...> "FOR b IN books " +
-...> "LET authorsByBook = ( " +
-...> " FOR author IN INBOUND b written " +
-...> " OPTIONS { " +
-...> " bfs: true, " +
-...> " uniqueVertices: 'global' " +
-...> " } " +
-...> " RETURN author " +
-...> ") " +
-...> "RETURN { " +
-...> " book: b, " +
-...> " authors: authorsByBook " +
-...> "} "
-...> ).toArray();
-[
- {
- "book" : {
- "_key" : "2980088317",
- "_id" : "books/2980088317",
- "_rev" : "2980088317",
- "title" : "The beauty of JOINS"
- },
- "authors" : [
- {
- "_key" : "2938210813",
- "_id" : "authors/2938210813",
- "_rev" : "2938210813",
- "name" : {
- "first" : "Maxima",
- "last" : "Musterfrau"
- }
- },
- {
- "_key" : "2935261693",
- "_id" : "authors/2935261693",
- "_rev" : "2935261693",
- "name" : {
- "first" : "John",
- "last" : "Doe"
- }
- }
- ]
- }
-]
-```
-
-Or again embed the authors directly into the book document.
-
-```json
-arangosh> db._query(
-...> "FOR b IN books " +
-...> "LET authors = ( " +
-...> " FOR author IN INBOUND b written " +
-...> " OPTIONS { " +
-...> " bfs: true, " +
-...> " uniqueVertices: 'global' " +
-...> " } " +
-...> " RETURN author " +
-...> ") " +
-...> "RETURN MERGE(b, {authors: authors}) "
-...> ).toArray();
-[
- {
- "_id" : "books/2980088317",
- "_key" : "2980088317",
- "_rev" : "2980088317",
- "title" : "The beauty of JOINS",
- "authors" : [
- {
- "_key" : "2938210813",
- "_id" : "authors/2938210813",
- "_rev" : "2938210813",
- "name" : {
- "first" : "Maxima",
- "last" : "Musterfrau"
- }
- },
- {
- "_key" : "2935261693",
- "_id" : "authors/2935261693",
- "_rev" : "2935261693",
- "name" : {
- "first" : "John",
- "last" : "Doe"
- }
- }
- ]
- }
-]
-```
-
-If you need the authors and their books, simply reverse the direction.
-
-```json
-> db._query(
-...> "FOR a IN authors " +
-...> "LET booksByAuthor = ( " +
-...> " FOR b IN OUTBOUND a written " +
-...> " OPTIONS { " +
-...> " bfs: true, " +
-...> " uniqueVertices: 'global' " +
-...> " } " +
-...> " RETURN b" +
-...> ") " +
-...> "RETURN MERGE(a, {books: booksByAuthor}) "
-...> ).toArray();
-[
- {
- "_id" : "authors/2935261693",
- "_key" : "2935261693",
- "_rev" : "2935261693",
- "name" : {
- "first" : "John",
- "last" : "Doe"
- },
- "books" : [
- {
- "_key" : "2980088317",
- "_id" : "books/2980088317",
- "_rev" : "2980088317",
- "title" : "The beauty of JOINS"
- }
- ]
- },
- {
- "_id" : "authors/2938210813",
- "_key" : "2938210813",
- "_rev" : "2938210813",
- "name" : {
- "first" : "Maxima",
- "last" : "Musterfrau"
- },
- "books" : [
- {
- "_key" : "2980088317",
- "_id" : "books/2980088317",
- "_rev" : "2980088317",
- "title" : "The beauty of JOINS"
- }
- ]
- }
-]
-```
-
-**Authors**: [Frank Celler](https://github.com/fceller)
-
-**Tags**: #join #aql
diff --git a/Documentation/Books/Cookbook/AQL/MigratingEdgeFunctionsTo3.md b/Documentation/Books/Cookbook/AQL/MigratingEdgeFunctionsTo3.md
deleted file mode 100644
index f83d0d139151..000000000000
--- a/Documentation/Books/Cookbook/AQL/MigratingEdgeFunctionsTo3.md
+++ /dev/null
@@ -1,395 +0,0 @@
-Migrating anonymous graph Functions from 2.8 or earlier to 3.0
-==============================================================
-
-Problem
--------
-
-With the release of 3.0 all GRAPH functions have been dropped from AQL in favor of a more
-native integration of graph features into the query language. I have used the old graph
-functions and want to upgrade to 3.0.
-
-Graph functions covered in this recipe:
-
-* EDGES
-* NEIGHBORS
-* PATHS
-* TRAVERSAL
-* TRAVERSAL_TREE
-
-Solution
---------
-
-### EDGES
-
-The EDGES can be simply replaced by a call to the AQL traversal.
-
-**No options**
-
-The syntax is slightly different but mapping should be simple:
-
-```
-// OLD
-[..] FOR e IN EDGES(@@edgeCollection, @startId, 'outbound') RETURN e
-
-// NEW
-[..] FOR v, e IN OUTBOUND @startId @@edgeCollection RETURN e
-```
-
-**Using EdgeExamples**
-
-Examples have to be transformed into AQL filter statements.
-How to do this please read the GRAPH_VERTICES section
-in [Migrating GRAPH_* Functions from 2.8 or earlier to 3.0](MigratingGraphFunctionsTo3.md).
-Apply these filters on the edge variable `e`.
-
-**Option incluceVertices**
-
-In order to include the vertices you just use the vertex variable v as well:
-
-```
-// OLD
-[..] FOR e IN EDGES(@@edgeCollection, @startId, 'outbound', null, {includeVertices: true}) RETURN e
-
-// NEW
-[..] FOR v, e IN OUTBOUND @startId @@edgeCollection RETURN {edge: e, vertex: v}
-```
-
-NOTE: The direction cannot be given as a bindParameter any more it has to be hard-coded in the query.
-
-### NEIGHBORS
-
-The NEIGHBORS is a breadth-first-search on the graph with a global unique check for vertices. So we can replace it by a an AQL traversal with these options.
-Due to syntax changes the vertex collection of the start vertex is no longer mandatory to be given.
-You may have to adjust bindParameteres for this query.
-
-**No options**
-
-The default options did just return the neighbors `_id` value.
-
-```
-// OLD
-[..] FOR n IN NEIGHBORS(@@vertexCollection, @@edgeCollection, @startId, 'outbound') RETURN n
-
-// NEW
-[..] FOR n IN OUTBOUND @startId @@edgeCollection OPTIONS {bfs: true, uniqueVertices: 'global'} RETURN n._id
-```
-
-NOTE: The direction cannot be given as a bindParameter any more it has to be hard-coded in the query.
-
-**Using edgeExamples**
-
-Examples have to be transformed into AQL filter statements.
-How to do this please read the GRAPH_VERTICES section
-in [Migrating GRAPH_* Functions from 2.8 or earlier to 3.0](MigratingGraphFunctionsTo3.md).
-Apply these filters on the edge variable `e` which is the second return variable of the traversal statement.
-
-However this is a bit more complicated as it interferes with the global uniqueness check.
-For edgeExamples it is sufficent when any edge pointing to the neighbor matches the filter. Using `{uniqueVertices: 'global'}` first picks any edge randomly. Than it checks against this edge only.
-If we know there are no vertex pairs with multiple edges between them we can use the simple variant which is save:
-
-```
-// OLD
-[..] FOR n IN NEIGHBORS(@@vertexCollection, @@edgeCollection, @startId, 'outbound', {label: 'friend'}) RETURN n
-
-// NEW
-[..] FOR n, e IN OUTBOUND @startId @@edgeCollection OPTIONS {bfs: true, uniqueVertices: 'global'}
-FILTER e.label == 'friend'
-RETURN n._id
-```
-
-If there may be multiple edges between the same pair of vertices we have to make the distinct check ourselfes and cannot rely on the traverser doing it correctly for us:
-
-```
-// OLD
-[..] FOR n IN NEIGHBORS(@@vertexCollection, @@edgeCollection, @startId, 'outbound', {label: 'friend'}) RETURN n
-
-// NEW
-[..] FOR n, e IN OUTBOUND @startId @@edgeCollection OPTIONS {bfs: true}
-FILTER e.label == 'friend'
-RETURN DISTINCT n._id
-```
-
-**Option includeData**
-
-If you want to include the data simply return the complete document instead of only the `_id`value.
-
-```
-// OLD
-[..] FOR n IN NEIGHBORS(@@vertexCollection, @@edgeCollection, @startId, 'outbound', null, {includeData: true}) RETURN n
-
-// NEW
-[..] FOR n, e IN OUTBOUND @startId @@edgeCollection OPTIONS {bfs: true, uniqueVertices: 'global'} RETURN n
-```
-
-### PATHS
-
-This function computes all paths of the entire edge collection (with a given minDepth and maxDepth) as you can imagine this feature is extremely expensive and should never be used.
-However paths can again be replaced by AQL traversal.
-
-**No options**
-By default paths of length 0 to 10 are returned. And circles are not followed.
-
-```
-// OLD
-RETURN PATHS(@@vertexCollection, @@edgeCollection, "outbound")
-
-// NEW
-FOR start IN @@vertexCollection
-FOR v, e, p IN 0..10 OUTBOUND start @@edgeCollection RETURN {source: start, destination: v, edges: p.edges, vertices: p.vertices}
-```
-
-**followCycles**
-
-If this option is set we have to modify the options of the traversal by modifying the `uniqueEdges` property:
-
-```
-// OLD
-RETURN PATHS(@@vertexCollection, @@edgeCollection, "outbound", {followCycles: true})
-
-// NEW
-FOR start IN @@vertexCollection
-FOR v, e, p IN 0..10 OUTBOUND start @@edgeCollection OPTIONS {uniqueEdges: 'none'} RETURN {source: start, destination: v, edges: p.edges, vertices: p.vertices}
-```
-
-**minDepth and maxDepth**
-
-If this option is set we have to give these parameters directly before the direction.
-
-```
-// OLD
-RETURN PATHS(@@vertexCollection, @@edgeCollection, "outbound", {minDepth: 2, maxDepth: 5})
-
-// NEW
-FOR start IN @@vertexCollection
-FOR v, e, p IN 2..5 OUTBOUND start @@edgeCollection
-RETURN {source: start, destination: v, edges: p.edges, vertices: p.vertices}
-```
-
-### TRAVERSAL and TRAVERSAL_TREE
-
-These have been removed and should be replaced by the
-[native AQL traversal](../../Manual/Graphs/Traversals/index.html).
-There are many potential solutions using the new syntax, but they largely depend
-on what exactly you are trying to achieve and would go beyond the scope of this
-cookbook. Here is one example how to do the transition, using the
-[world graph](../..//Manual/Graphs/index.html#the-world-graph)
-as data:
-
-In 2.8, it was possible to use `TRAVERSAL()` together with a custom visitor
-function to find leaf nodes in a graph. Leaf nodes are vertices that have inbound
-edges, but no outbound edges. The visitor function code looked like this:
-
-```js
-var aqlfunctions = require("org/arangodb/aql/functions");
-
-aqlfunctions.register("myfunctions::leafNodeVisitor", function (config, result, vertex, path, connected) {
- if (connected && connected.length === 0) {
- return vertex.name + " (" + vertex.type + ")";
- }
-});
-```
-
-And the AQL query to make use of it:
-
-```js
-LET params = {
- order: "preorder-expander",
- visitor: "myfunctions::leafNodeVisitor",
- visitorReturnsResults: true
-}
-FOR result IN TRAVERSAL(worldVertices, worldEdges, "worldVertices/world", "inbound", params)
- RETURN result
-```
-
-To traverse the graph starting at vertex `worldVertices/world` using native
-AQL traversal and an anonymous graph, we can simply do:
-
-```js
-FOR v IN 0..10 INBOUND "worldVertices/world" worldEdges
- RETURN v
-```
-
-It will give us all vertex documents including the start vertex (because the
-minimum depth is set to *0*). The maximum depth is set to *10*, which is enough
-to follow all edges and reach the leaf nodes in this graph.
-
-The query can be modified to return a formatted path from first to last node:
-
-```js
-FOR v, e, p IN 0..10 INBOUND "worldVertices/world" e
- RETURN CONCAT_SEPARATOR(" -> ", p.vertices[*].name)
-```
-
-The result looks like this (shortened):
-
-```json
-[
- "World",
- "World -> Africa",
- "World -> Africa -> Cote d'Ivoire",
- "World -> Africa -> Cote d'Ivoire -> Yamoussoukro",
- "World -> Africa -> Angola",
- "World -> Africa -> Angola -> Luanda",
- "World -> Africa -> Chad",
- "World -> Africa -> Chad -> N'Djamena",
- ...
-]
-```
-
-As we can see, all possible paths of varying lengths are returned. We are not
-really interested in them, but we still have to do the traversal to go from
-*World* all the way to the leaf nodes (e.g. *Yamoussoukro*). To determine
-if a vertex is really the last on the path in the sense of being a leaf node,
-we can use another traversal of depth 1 to check if there is at least one
-outgoing edge - which means the vertex is not a leaf node, otherwise it is:
-
-```js
-FOR v IN 0..10 INBOUND "worldVertices/world" worldEdges
- FILTER LENGTH(FOR vv IN INBOUND v worldEdges LIMIT 1 RETURN 1) == 0
- RETURN CONCAT(v.name, " (", v.type, ")")
-```
-
-Using the current vertex `v` as starting point, the second traversal is
-performed. It can return early after one edge was followed (`LIMIT 1`),
-because we don't need to know the exact count and it is faster this way.
-We also don't need the actual vertex, so we can just `RETURN 1` as dummy
-value as an optimization. The traversal (which is a sub-query) will
-return an empty array in case of a leaf node, and `[ 1 ]` otherwise.
-Since we only want the leaf nodes, we `FILTER` out all non-empty arrays
-and what is left are the leaf nodes only. The attributes `name` and
-`type` are formatted the way they were like in the original JavaScript
-code, but now with AQL. The final result is a list of all capitals:
-
-```json
-[
- "Yamoussoukro (capital)",
- "Luanda (capital)",
- "N'Djamena (capital)",
- "Algiers (capital)",
- "Yaounde (capital)",
- "Ouagadougou (capital)",
- "Gaborone (capital)",
- "Asmara (capital)",
- "Cairo (capital)",
- ...
-]
-```
-
-There is no direct substitute for the `TRAVERSAL_TREE()` function.
-The advantage of this function was that its (possibly highly nested) result
-data structure inherently represented the "longest" possible paths only.
-With native AQL traversal, all paths from minimum to maximum traversal depth
-are returned, including the "short" paths as well:
-
-```js
-FOR v, e, p IN 1..2 INBOUND "worldVertices/continent-north-america" worldEdges
- RETURN CONCAT_SEPARATOR(" <- ", p.vertices[*]._key)
-```
-
-```json
-[
- "continent-north-america <- country-antigua-and-barbuda",
- "continent-north-america <- country-antigua-and-barbuda <- capital-saint-john-s",
- "continent-north-america <- country-barbados",
- "continent-north-america <- country-barbados <- capital-bridgetown",
- "continent-north-america <- country-canada",
- "continent-north-america <- country-canada <- capital-ottawa",
- "continent-north-america <- country-bahamas",
- "continent-north-america <- country-bahamas <- capital-nassau"
-]
-```
-
-A second traversal with depth = 1 can be used to check if we reached a leaf node
-(no more incoming edges). Based on this information, the "short" paths can be
-filtered out. Note that a second condition is required: it is possible that the
-last node in a traversal is not a leaf node if the maximum traversal depth is
-exceeded. Thus, we need to also let paths through, which contain as many edges
-as hops we do in the traversal (here: 2).
-
-```js
-FOR v, e, p IN 1..2 INBOUND "worldVertices/continent-north-america" worldEdges
- LET other = (
- FOR vv, ee IN INBOUND v worldEdges
- //FILTER ee != e // needed if traversing edges in ANY direction
- LIMIT 1
- RETURN 1
- )
- FILTER LENGTH(other) == 0 || LENGTH(p.edges) == 2
- RETURN CONCAT_SEPARATOR(" <- ", p.vertices[*]._key)
-```
-
-```json
-[
- "continent-north-america <- country-antigua-and-barbuda <- capital-saint-john-s",
- "continent-north-america <- country-barbados <- capital-bridgetown",
- "continent-north-america <- country-canada <- capital-ottawa",
- "continent-north-america <- country-bahamas <- capital-nassau"
-]
-```
-
-The full paths can be returned, but it is not in a tree-like structure as
-with `TRAVERSAL_TREE()`. Such a data structure can be constructed on
-client-side if really needed.
-
-```js
-FOR v, e, p IN 1..2 INBOUND "worldVertices/continent-north-america" worldEdges
- LET other = (FOR vv, ee IN INBOUND v worldEdges LIMIT 1 RETURN 1)
- FILTER LENGTH(other) == 0 || LENGTH(p.edges) == 2
- RETURN p
-```
-
-Path data (shortened):
-
-```json
-[
- {
- "edges": [
- {
- "_id": "worldEdges/57585025",
- "_from": "worldVertices/country-antigua-and-barbuda",
- "_to": "worldVertices/continent-north-america",
- "type": "is-in"
- },
- {
- "_id": "worldEdges/57585231",
- "_from": "worldVertices/capital-saint-john-s",
- "_to": "worldVertices/country-antigua-and-barbuda",
- "type": "is-in"
- }
- ],
- "vertices": [
- {
- "_id": "worldVertices/continent-north-america",
- "name": "North America",
- "type": "continent"
- },
- {
- "_id": "worldVertices/country-antigua-and-barbuda",
- "code": "ATG",
- "name": "Antigua and Barbuda",
- "type": "country"
- },
- {
- "_id": "worldVertices/capital-saint-john-s",
- "name": "Saint John's",
- "type": "capital"
- }
- ]
- },
- {
- ...
- }
-]
-```
-
-The first and second vertex of the nth path are connected by the first edge
-(`p[n].vertices[0]` ⟝ `p[n].edges[0]` → `p[n].vertices[1]`) and so on. This
-structure might actually be more convenient to process compared to a tree-like
-structure. Note that the edge documents are also included, in constrast to the
-removed graph traversal function.
-
-Contact us via our social channels if you need further help.
-
-**Author:** [Michael Hackstein](https://github.com/mchacki)
-
-**Tags**: #howto #aql #migration
diff --git a/Documentation/Books/Cookbook/AQL/MigratingGraphFunctionsTo3.md b/Documentation/Books/Cookbook/AQL/MigratingGraphFunctionsTo3.md
deleted file mode 100644
index f1dc3de0a19d..000000000000
--- a/Documentation/Books/Cookbook/AQL/MigratingGraphFunctionsTo3.md
+++ /dev/null
@@ -1,777 +0,0 @@
-Migrating GRAPH_* Functions from 2.8 or earlier to 3.0
-======================================================
-
-Problem
--------
-
-With the release of 3.0 all GRAPH functions have been dropped from AQL in favor of a more
-native integration of graph features into the query language. I have used the old graph
-functions and want to upgrade to 3.0.
-
-Graph functions covered in this recipe:
-
-* GRAPH_COMMON_NEIGHBORS
-* GRAPH_COMMON_PROPERTIES
-* GRAPH_DISTANCE_TO
-* GRAPH_EDGES
-* GRAPH_NEIGHBORS
-* GRAPH_TRAVERSAL
-* GRAPH_TRAVERSAL_TREE
-* GRAPH_SHORTEST_PATH
-* GRAPH_PATHS
-* GRAPH_VERTICES
-
-Solution 1: Quick and Dirty (not recommended)
----------------------------------------------
-
-**When to use this solution**
-
-I am not willing to invest a lot if time into the upgrade process and I am
-willing to surrender some performance in favor of less effort.
-Some constellations may not work with this solution due to the nature of
-user-defined functions.
-Especially check for AQL queries that do both modifications
-and `GRAPH_*` functions.
-
-**Registering user-defined functions**
-
-This step has to be executed once on ArangoDB for every database we are using.
-
-We connect to `arangodb` with `arangosh` to issue the following commands two:
-
-```js
-var graphs = require("@arangodb/general-graph");
-graphs._registerCompatibilityFunctions();
-```
-
-These have registered all old `GRAPH_*` functions as user-defined functions again, with the prefix `arangodb::`.
-
-**Modify the application code**
-
-Next we have to go through our application code and replace all calls to `GRAPH_*` by `arangodb::GRAPH_*`.
-Perform a test run of the application and check if it worked.
-If it worked we are ready to go.
-
-**Important Information**
-
-The user defined functions will call translated subqueries (as described in Solution 2).
-The optimizer does not know anything about these subqueries beforehand and cannot optimize the whole plan.
-Also there might be read/write constellations that are forbidden in user-defined functions, therefore
-a "really" translated query may work while the user-defined function work around may be rejected.
-
-Solution 2: Translating the queries (recommended)
--------------------------------------------------
-
-**When to use this solution**
-
-I am willing to invest some time on my queries in order to get
-maximum performance, full query optimization and a better
-control of my queries. No forcing into the old layout
-any more.
-
-**Before you start**
-
-If you are using `vertexExamples` which are not only `_id` strings do not skip
-the GRAPH_VERTICES section, because it will describe how to translate them to
-AQL. All graph functions using a vertexExample are identical to executing a
-GRAPH_VERTICES before and using it's result as start point.
-Example with NEIGHBORS:
-
-```
-FOR res IN GRAPH_NEIGHBORS(@graph, @myExample) RETURN res
-```
-
-Is identical to:
-
-```
-FOR start GRAPH_VERTICES(@graph, @myExample)
- FOR res IN GRAPH_NEIGHBORS(@graph, start) RETURN res
-```
-
-All non GRAPH_VERTICES functions will only explain the transformation for a single input document's `_id`.
-
-**Options used everywhere**
-
-**Option edgeCollectionRestriction**
-
-In order to use edge Collection restriction we just use the feature that the traverser
-can walk over a list of edge collections directly. So the edgeCollectionRestrictions
-just form this list (exampleGraphEdges):
-
-```
-// OLD
-[..] FOR e IN GRAPH_EDGES(@graphName, @startId, {edgeCollectionRestriction: [edges1, edges2]}) RETURN e
-
-// NEW
-[..] FOR v, e IN ANY @startId edges1, edges2 RETURN DISTINCT e._id
-```
-
-Note: The `@graphName` bindParameter is not used anymore and probably has to be removed from the query.
-
-**Option includeData**
-
-If we use the option includeData we simply return the object directly instead of only the _id
-
-Example GRAPH_EDGES:
-
-```
-// OLD
-[..] FOR e IN GRAPH_EDGES(@graphName, @startId, {includeData: true}) RETURN e
-
-// NEW
-[..] FOR v, e IN ANY @startId GRAPH @graphName RETURN DISTINCT e
-```
-
-**Option direction**
-
-The direction has to be placed before the start id.
-Note here: The direction has to be placed as Word it cannot be handed in via a bindParameter
-anymore:
-
-```
-// OLD
-[..] FOR e IN GRAPH_EDGES(@graphName, @startId, {direction: 'inbound'}) RETURN e
-
-// NEW
-[..] FOR v, e IN INBOUND @startId GRAPH @graphName RETURN DISTINCT e._id
-```
-
-**Options minDepth, maxDepth**
-
-If we use the options minDepth and maxDepth (both default 1 if not set) we can simply
-put them in front of the direction part in the Traversal statement.
-
-Example GRAPH_EDGES:
-
-```
-// OLD
-[..] FOR e IN GRAPH_EDGES(@graphName, @startId, {minDepth: 2, maxDepth: 4}) RETURN e
-
-// NEW
-[..] FOR v, e IN 2..4 ANY @startId GRAPH @graphName RETURN DISTINCT e._id
-```
-
-**Option maxIteration**
-
-The option `maxIterations` is removed without replacement.
-Your queries are now bound by main memory not by an arbitrary number of iterations.
-
-### GRAPH_VERTICES
-
-First we have to branch on the example.
-There we have three possibilities:
-
-1. The example is an `_id` string.
-2. The example is `null` or `{}`.
-3. The example is a non empty object or an array.
-
-**Example is '_id' string**
-
-This is the easiest replacement. In this case we simply replace the function with a call to `DOCUMENT`:
-
-```
-// OLD
-[..] GRAPH_VERTICES(@graphName, @idString) [..]
-
-// NEW
-[..] DOCUMENT(@idString) [..]
-```
-
-NOTE: The `@graphName` is not required anymore, we may have to adjust bindParameters.
-
-The AQL graph features can work with an id directly, no need to call `DOCUMENT` before if we just need this to find a starting point.
-
-**Example is `null` or the empty object**
-
-This case means we use all documents from the graph.
-Here we first have to now the vertex collections of the graph.
-
-1. If we only have one collection (say `vertices`) we can replace it with a simple iteration over this collection:
-
-```
-// OLD
-[..] FOR v IN GRAPH_VERTICES(@graphName, {}) [..]
-
-// NEW
-[..] FOR v IN vertices [..]
-````
-
-NOTE: The `@graphName` is not required anymore, we may have to adjust bindParameters.
-
-
-2. We have more than one collection. This is the unfortunate case for a general replacement.
-So in the general replacement we assume we do not want to exclude any of the collections in
-the graph. Than we unfortunately have to form a `UNION`over all these collections.
-Say our graph has the vertex collections `vertices1`, `vertices2`, `vertices3` we create a sub-query for
-a single collection for each of them and wrap them in a call to `UNION`.
-
-```
-// OLD
-[..] FOR v IN GRAPH_VERTICES(@graphName, {}) [..]
-
-// NEW
-[..]
-FOR v IN UNION( // We start a UNION
- (FOR v IN vertices1 RETURN v), // For each vertex collection
- (FOR v IN vertices2 RETURN v), // we create the same subquery
- (FOR v IN vertices3 RETURN v)
-) // Finish with the UNION
-[..]
-````
-
-NOTE: If you have any more domain knowledge of your graph apply it at this point to identify which
-collections are actually relevant as this `UNION` is a rather expensive operation.
-
-If we use the option `vertexCollectionRestriction` in the original query. The `UNION` has to be formed
-by the collections in this restriction instead of ALL collections.
-
-**Example is a non-empty object**
-
-First we follow the instructions for the empty object above.
-In this section we will just focus on a single collection `vertices`, the UNION for multiple collections
-is again wrapped around a subquery for each of these collections built in the following way.
-
-Now we have to transform the example into an AQL `FILTER` statement.
-Therefore we take all top-level attributes of the example and do an equal comparison with their values.
-All of these comparisons are joined with an `AND` because the all have to be fulfilled.
-
-Example:
-
-```
-// OLD
-[..] FOR v IN GRAPH_VERTICES(@graphName, {foo: 'bar', the: {answer: 42}}}) [..]
-
-// NEW
-[..] FOR v IN vertices
- FILTER v.foo == 'bar' // foo: bar
- AND v.the == {answer: 42} //the: {answer: 42}
-[..]
-```
-
-**Example is an array**
-
-The idea transformation is almost identical to a single non-empty object.
-For each element in the array we create the filter conditions and than we
-`OR`-combine them (mind the brackets):
-
-```
-// OLD
-[..] FOR v IN GRAPH_VERTICES(@graphName, [{foo: 'bar', the: {answer: 42}}, {foo: 'baz'}])) [..]
-
-// NEW
-[..] FOR v IN vertices
- FILTER (v.foo == 'bar' // foo: bar
- AND v.the == {answer: 42}) //the: {answer: 42}
- OR (v.foo == 'baz')
-[..]
-```
-
-### GRAPH_EDGES
-
-The GRAPH_EDGES can be simply replaced by a call to the AQL traversal.
-
-**No options**
-
-The default options did use a direction `ANY` and returned a distinct result of the edges.
-Also it did just return the edges `_id` value.
-
-```
-// OLD
-[..] FOR e IN GRAPH_EDGES(@graphName, @startId) RETURN e
-
-// NEW
-[..] FOR v, e IN ANY @startId GRAPH @graphName RETURN DISTINCT e._id
-```
-
-**Option edgeExamples.**
-
-See `GRAPH_VERTICES` on how to transform examples to AQL FILTER. Apply the filter on the edge variable `e`.
-
-### GRAPH_NEIGHBORS
-
-The GRAPH_NEIGHBORS is a breadth-first-search on the graph with a global unique check for vertices. So we can replace it by a an AQL traversal with these options.
-
-**No options**
-
-The default options did use a direction `ANY` and returned a distinct result of the neighbors.
-Also it did just return the neighbors `_id` value.
-
-```
-// OLD
-[..] FOR n IN GRAPH_NEIGHBORS(@graphName, @startId) RETURN n
-
-// NEW
-[..] FOR n IN ANY @startId GRAPH @graphName OPTIONS {bfs: true, uniqueVertices: 'global'} RETURN n
-```
-
-**Option neighborExamples**
-
-See `GRAPH_VERTICES` on how to transform examples to AQL FILTER. Apply the filter on the neighbor variable `n`.
-
-**Option edgeExamples**
-
-See `GRAPH_VERTICES` on how to transform examples to AQL FILTER. Apply the filter on the edge variable `e`.
-
-However this is a bit more complicated as it interferes with the global uniqueness check.
-For edgeExamples it is sufficient when any edge pointing to the neighbor matches the filter. Using `{uniqueVertices: 'global'}` first picks any edge randomly. Than it checks against this edge only.
-If we know there are no vertex pairs with multiple edges between them we can use the simple variant which is save:
-
-```
-// OLD
-[..] FOR n IN GRAPH_NEIGHBORS(@graphName, @startId, {edgeExample: {label: 'friend'}}) RETURN e
-
-// NEW
-[..] FOR n, e IN ANY @startId GRAPH @graphName OPTIONS {bfs: true, uniqueVertices: 'global'} FILTER e.label == 'friend' RETURN n._id
-```
-
-If there may be multiple edges between the same pair of vertices we have to make the distinct check ourselfes and cannot rely on the traverser doing it correctly for us:
-
-```
-// OLD
-[..] FOR n IN GRAPH_NEIGHBORS(@graphName, @startId, {edgeExample: {label: 'friend'}}) RETURN e
-
-// NEW
-[..] FOR n, e IN ANY @startId GRAPH @graphName OPTIONS {bfs: true} FILTER e.label == 'friend' RETURN DISTINCT n._id
-```
-
-**Option vertexCollectionRestriction**
-
-If we use the vertexCollectionRestriction we have to postFilter the neighbors based on their collection. Therefore we can make use of the function `IS_SAME_COLLECTION`:
-
-```
-// OLD
-[..] FOR n IN GRAPH_NEIGHBORS(@graphName, @startId, {vertexCollectionRestriction: ['vertices1', 'vertices2']}) RETURN e
-
-// NEW
-[..] FOR n IN ANY @startId GRAPH @graphName OPTIONS {bfs: true, uniqueVertices: true} FILTER IS_SAME_COLLECTION(vertices1, n) OR IS_SAME_COLLECTION(vertices2, n) RETURN DISTINCT n._id
-```
-
-### GRAPH_COMMON_NEIGHBORS
-
-`GRAPH_COMMON_NEIGHBORS` is defined as two `GRAPH_NEIGHBORS` queries and than forming the `INTERSECTION` of both queries.
-How to translate the options please refer to `GRAPH_NEIGHBORS`.
-Finally we have to build the old result format `{left, right, neighbors}`.
-If you just need parts of the result you can adapt this query to your specific needs.
-
-```
-// OLD
-FOR v IN GRAPH_COMMON_NEIGHBORS(@graphName, 'vertices/1' , 'vertices/2', {direction : 'any'}) RETURN v
-
-// NEW
-LET n1 = ( // Neighbors for vertex1Example
- FOR n IN ANY 'vertices/1' GRAPH 'graph' OPTIONS {bfs: true, uniqueVertices: "global"} RETURN n._id
- )
-LET n2 = ( // Neighbors for vertex2Example
- FOR n IN ANY 'vertices/2' GRAPH 'graph' OPTIONS {bfs: true, uniqueVertices: "global"} RETURN n._id
- )
-LET common = INTERSECTION(n1, n2) // Get the intersection
-RETURN { // Produce the original result
- left: 'vertices/1',
- right: 'vertices/2,
- neighbors: common
-}
-```
-
-NOTE: If you are using examples instead of `_ids` you have to add a filter to make sure that the left is not equal to the right start vertex.
-To give you an example with a single vertex collection `vertices`, the replacement would look like this:
-
-```
-// OLD
-FOR v IN GRAPH_COMMON_NEIGHBORS(@graphName, {name: "Alice"}, {name: "Bob"}) RETURN v
-
-// NEW
-FOR left IN vertices
- FILTER left.name == "Alice"
- LET n1 = (FOR n IN ANY left GRAPH 'graph' OPTIONS {bfs: true, uniqueVertices: "global"} RETURN n._id)
- FOR right IN vertices
- FILTER right.name == "Bob"
- FILTER right != left // Make sure left is not identical to right
- LET n2 = (FOR n IN ANY right GRAPH 'graph' OPTIONS {bfs: true, uniqueVertices: "global"} RETURN n._id)
- LET neighbors = INTERSECTION(n1, n2)
- FILTER LENGTH(neighbors) > 0 // Only pairs with shared neighbors should be returned
- RETURN {left: left._id, right: right._id, neighbors: neighbors}
-```
-
-### GRAPH_PATHS
-
-This function computes all paths of the entire graph (with a given minDepth and maxDepth) as you can imagine this feature is extremely expensive and should never be used.
-However paths can again be replaced by AQL traversal.
-Assume we only have one vertex collection `vertices` again.
-
-**No options**
-By default paths of length 0 to 10 are returned. And circles are not followed.
-
-```
-// OLD
-RETURN GRAPH_PATHS('graph')
-
-// NEW
-FOR start IN vertices
-FOR v, e, p IN 0..10 OUTBOUND start GRAPH 'graph' RETURN {source: start, destination: v, edges: p.edges, vertices: p.vertices}
-```
-
-**followCycles**
-
-If this option is set we have to modify the options of the traversal by modifying the `uniqueEdges` property:
-
-```
-// OLD
-RETURN GRAPH_PATHS('graph', {followCycles: true})
-
-// NEW
-FOR start IN vertices
-FOR v, e, p IN 0..10 OUTBOUND start GRAPH 'graph' OPTIONS {uniqueEdges: 'none'} RETURN {source: start, destination: v, edges: p.edges, vertices: p.vertices}
-```
-
-### GRAPH_COMMON_PROPERTIES
-
-This feature involves several full-collection scans and therefore is extremely expensive.
-If you really need it you can transform it with the help of `ATTRIBUTES`, `KEEP` and `ZIP`.
-
-**Start with single _id**
-
-```
-// OLD
-RETURN GRAPH_COMMON_PROPERTIES('graph', "vertices/1", "vertices/2")
-
-// NEW
-LET left = DOCUMENT("vertices/1") // get one document
-LET right = DOCUMENT("vertices/2") // get the other one
-LET shared = (FOR a IN ATTRIBUTES(left) // find all shared attributes
- FILTER left[a] == right[a]
- OR a == '_id' // always include _id
- RETURN a)
-FILTER LENGTH(shared) > 1 // Return them only if they share an attribute
-RETURN ZIP([left._id], [KEEP(right, shared)]) // Build the result
-```
-
-**Start with vertexExamples**
-
-Again we assume we only have a single collection `vertices`.
-We have to transform the examples into filters. Iterate
-over vertices to find all left documents.
-For each left document iterate over vertices again
-to find matching right documents.
-And return the shared attributes as above:
-
-```
-// OLD
-RETURN GRAPH_COMMON_PROPERTIES('graph', {answer: 42}, {foo: "bar"})
-
-// NEW
-FOR left IN vertices
- FILTER left.answer == 42
- LET commons = (
- FOR right IN vertices
- FILTER right.foo == "bar"
- FILTER left != right
- LET shared = (FOR a IN ATTRIBUTES(left)
- FILTER left[a] == right[a]
- OR a == '_id'
- RETURN a)
- FILTER LENGTH(shared) > 1
- RETURN KEEP(right, shared))
- FILTER LENGTH(commons) > 0
- RETURN ZIP([left._id], [commons])
-```
-
-
-### GRAPH_SHORTEST_PATH
-
-A shortest path computation is now done via the new SHORTEST_PATH AQL statement.
-
-**No options**
-
-```
-// OLD
-FOR p IN GRAPH_SHORTEST_PATH(@graphName, @startId, @targetId, {direction : 'outbound'}) RETURN p
-
-// NEW
-LET p = ( // Run one shortest Path
- FOR v, e IN OUTBOUND SHORTEST_PATH @startId TO @targetId GRAPH @graphName
- // We return objects with vertex, edge and weight for each vertex on the path
- RETURN {vertex: v, edge: e, weight: (IS_NULL(e) ? 0 : 1)}
-)
-FILTER LENGTH(p) > 0 // We only want shortest paths that actually exist
-RETURN { // We rebuild the old format
- vertices: p[*].vertex,
- edges: p[* FILTER CURRENT.e != null].edge,
- distance: SUM(p[*].weight)
-}
-```
-
-**Options weight and defaultWeight**
-
-The new AQL SHORTEST_PATH offers the options `weightAttribute` and `defaultWeight`.
-
-```
-// OLD
-FOR p IN GRAPH_SHORTEST_PATH(@graphName, @startId, @targetId, {direction : 'outbound', weight: "weight", defaultWeight: 80}) RETURN p
-
-// NEW
-LET p = ( // Run one shortest Path
- FOR v, e IN OUTBOUND SHORTEST_PATH @startId TO @targetId GRAPH @graphName
- // We return objects with vertex, edge and weight for each vertex on the path
- RETURN {vertex: v, edge: e, weight: (IS_NULL(e) ? 0 : (IS_NUMBER(e.weight) ? e.weight : 80))}
-)
-FILTER LENGTH(p) > 0 // We only want shortest paths that actually exist
-RETURN { // We rebuild the old format
- vertices: p[*].vertex,
- edges: p[* FILTER CURRENT.e != null].edge,
- distance: SUM(p[*].weight) // We have to recompute the distance if we need it
-}
-```
-
-
-### GRAPH_DISTANCE_TO
-
-Graph distance to only differs by the result format from `GRAPH_SHORTEST_PATH`.
-So we follow the transformation for `GRAPH_SHORTEST_PATH`, remove some unnecessary parts,
-and change the return format
-
-```
-// OLD
-FOR p IN GRAPH_DISTANCE_TO(@graphName, @startId, @targetId, {direction : 'outbound'}) RETURN p
-
-// NEW
-LET p = ( // Run one shortest Path
- FOR v, e IN OUTBOUND SHORTEST_PATH @startId TO @targetId GRAPH @graphName
- // DIFFERENCE we only return the weight for each edge on the path
- RETURN IS_NULL(e) ? 0 : 1}
-)
-FILTER LENGTH(p) > 0 // We only want shortest paths that actually exist
-RETURN { // We rebuild the old format
- startVertex: @startId,
- vertex: @targetId,
- distance: SUM(p[*].weight)
-}
-```
-
-### GRAPH_TRAVERSAL and GRAPH_TRAVERSAL_TREE
-
-These have been removed and should be replaced by the
-[native AQL traversal](../../Manual/Graphs/Traversals/index.html).
-There are many potential solutions using the new syntax, but they largely depend
-on what exactly you are trying to achieve and would go beyond the scope of this
-cookbook. Here is one example how to do the transition, using the
-[world graph](../../Manual/Graphs/index.html#the-world-graph)
-as data:
-
-In 2.8, it was possible to use `GRAPH_TRAVERSAL()` together with a custom visitor
-function to find leaf nodes in a graph. Leaf nodes are vertices that have inbound
-edges, but no outbound edges. The visitor function code looked like this:
-
-```js
-var aqlfunctions = require("org/arangodb/aql/functions");
-
-aqlfunctions.register("myfunctions::leafNodeVisitor", function (config, result, vertex, path, connected) {
- if (connected && connected.length === 0) {
- return vertex.name + " (" + vertex.type + ")";
- }
-});
-```
-
-And the AQL query to make use of it:
-
-```js
-LET params = {
- order: "preorder-expander",
- visitor: "myfunctions::leafNodeVisitor",
- visitorReturnsResults: true
-}
-FOR result IN GRAPH_TRAVERSAL("worldCountry", "worldVertices/world", "inbound", params)
- RETURN result
-```
-
-To traverse the graph starting at vertex `worldVertices/world` using native
-AQL traversal and a named graph, we can simply do:
-
-```js
-FOR v IN 0..10 INBOUND "worldVertices/world" GRAPH "worldCountry"
- RETURN v
-```
-
-It will give us all vertex documents including the start vertex (because the
-minimum depth is set to *0*). The maximum depth is set to *10*, which is enough
-to follow all edges and reach the leaf nodes in this graph.
-
-The query can be modified to return a formatted path from first to last node:
-
-```js
-FOR v, e, p IN 0..10 INBOUND "worldVertices/world" GRAPH "worldCountry"
- RETURN CONCAT_SEPARATOR(" -> ", p.vertices[*].name)
-```
-
-The result looks like this (shortened):
-
-```json
-[
- "World",
- "World -> Africa",
- "World -> Africa -> Cote d'Ivoire",
- "World -> Africa -> Cote d'Ivoire -> Yamoussoukro",
- "World -> Africa -> Angola",
- "World -> Africa -> Angola -> Luanda",
- "World -> Africa -> Chad",
- "World -> Africa -> Chad -> N'Djamena",
- ...
-]
-```
-
-As we can see, all possible paths of varying lengths are returned. We are not
-really interested in them, but we still have to do the traversal to go from
-*World* all the way to the leaf nodes (e.g. *Yamoussoukro*). To determine
-if a vertex is really the last on the path in the sense of being a leaf node,
-we can use another traversal of depth 1 to check if there is at least one
-outgoing edge - which means the vertex is not a leaf node, otherwise it is:
-
-```js
-FOR v IN 0..10 INBOUND "worldVertices/world" GRAPH "worldCountry"
- FILTER LENGTH(FOR vv IN INBOUND v GRAPH "worldCountry" LIMIT 1 RETURN 1) == 0
- RETURN CONCAT(v.name, " (", v.type, ")")
-```
-
-Using the current vertex `v` as starting point, the second traversal is
-performed. It can return early after one edge was followed (`LIMIT 1`),
-because we don't need to know the exact count and it is faster this way.
-We also don't need the actual vertex, so we can just `RETURN 1` as dummy
-value as an optimization. The traversal (which is a sub-query) will
-return an empty array in case of a leaf node, and `[ 1 ]` otherwise.
-Since we only want the leaf nodes, we `FILTER` out all non-empty arrays
-and what is left are the leaf nodes only. The attributes `name` and
-`type` are formatted the way they were like in the original JavaScript
-code, but now with AQL. The final result is a list of all capitals:
-
-```json
-[
- "Yamoussoukro (capital)",
- "Luanda (capital)",
- "N'Djamena (capital)",
- "Algiers (capital)",
- "Yaounde (capital)",
- "Ouagadougou (capital)",
- "Gaborone (capital)",
- "Asmara (capital)",
- "Cairo (capital)",
- ...
-]
-```
-
-There is no direct substitute for the `GRAPH_TRAVERSAL_TREE()` function.
-The advantage of this function was that its (possibly highly nested) result
-data structure inherently represented the "longest" possible paths only.
-With native AQL traversal, all paths from minimum to maximum traversal depth
-are returned, including the "short" paths as well:
-
-```js
-FOR v, e, p IN 1..2 INBOUND "worldVertices/continent-north-america" GRAPH "worldCountry"
- RETURN CONCAT_SEPARATOR(" <- ", p.vertices[*]._key)
-```
-
-```json
-[
- "continent-north-america <- country-antigua-and-barbuda",
- "continent-north-america <- country-antigua-and-barbuda <- capital-saint-john-s",
- "continent-north-america <- country-barbados",
- "continent-north-america <- country-barbados <- capital-bridgetown",
- "continent-north-america <- country-canada",
- "continent-north-america <- country-canada <- capital-ottawa",
- "continent-north-america <- country-bahamas",
- "continent-north-america <- country-bahamas <- capital-nassau"
-]
-```
-
-A second traversal with depth = 1 can be used to check if we reached a leaf node
-(no more incoming edges). Based on this information, the "short" paths can be
-filtered out. Note that a second condition is required: it is possible that the
-last node in a traversal is not a leaf node if the maximum traversal depth is
-exceeded. Thus, we need to also let paths through, which contain as many edges
-as hops we do in the traversal (here: 2).
-
-```js
-FOR v, e, p IN 1..2 INBOUND "worldVertices/continent-north-america" GRAPH "worldCountry"
- LET other = (
- FOR vv, ee IN INBOUND v GRAPH "worldCountry"
- //FILTER ee != e // needed if traversing edges in ANY direction
- LIMIT 1
- RETURN 1
- )
- FILTER LENGTH(other) == 0 || LENGTH(p.edges) == 2
- RETURN CONCAT_SEPARATOR(" <- ", p.vertices[*]._key)
-```
-
-```json
-[
- "continent-north-america <- country-antigua-and-barbuda <- capital-saint-john-s",
- "continent-north-america <- country-barbados <- capital-bridgetown",
- "continent-north-america <- country-canada <- capital-ottawa",
- "continent-north-america <- country-bahamas <- capital-nassau"
-]
-```
-
-The full paths can be returned, but it is not in a tree-like structure as
-with `GRAPH_TRAVERSAL_TREE()`. Such a data structure can be constructed on
-client-side if really needed.
-
-```js
-FOR v, e, p IN 1..2 INBOUND "worldVertices/continent-north-america" GRAPH "worldCountry"
- LET other = (FOR vv, ee IN INBOUND v GRAPH "worldCountry" LIMIT 1 RETURN 1)
- FILTER LENGTH(other) == 0 || LENGTH(p.edges) == 2
- RETURN p
-```
-
-Path data (shortened):
-
-```json
-[
- {
- "edges": [
- {
- "_id": "worldEdges/57585025",
- "_from": "worldVertices/country-antigua-and-barbuda",
- "_to": "worldVertices/continent-north-america",
- "type": "is-in"
- },
- {
- "_id": "worldEdges/57585231",
- "_from": "worldVertices/capital-saint-john-s",
- "_to": "worldVertices/country-antigua-and-barbuda",
- "type": "is-in"
- }
- ],
- "vertices": [
- {
- "_id": "worldVertices/continent-north-america",
- "name": "North America",
- "type": "continent"
- },
- {
- "_id": "worldVertices/country-antigua-and-barbuda",
- "code": "ATG",
- "name": "Antigua and Barbuda",
- "type": "country"
- },
- {
- "_id": "worldVertices/capital-saint-john-s",
- "name": "Saint John's",
- "type": "capital"
- }
- ]
- },
- {
- ...
- }
-]
-```
-
-The first and second vertex of the nth path are connected by the first edge
-(`p[n].vertices[0]` ⟝ `p[n].edges[0]` → `p[n].vertices[1]`) and so on. This
-structure might actually be more convenient to process compared to a tree-like
-structure. Note that the edge documents are also included, in contrast to the
-removed graph traversal function.
-
-Contact us via our social channels if you need further help.
-
-**Author:** [Michael Hackstein](https://github.com/mchacki)
-
-**Tags**: #howto #aql #migration
diff --git a/Documentation/Books/Cookbook/AQL/MigratingMeasurementsTo3.md b/Documentation/Books/Cookbook/AQL/MigratingMeasurementsTo3.md
deleted file mode 100644
index 1684683892a9..000000000000
--- a/Documentation/Books/Cookbook/AQL/MigratingMeasurementsTo3.md
+++ /dev/null
@@ -1,98 +0,0 @@
-Migrating GRAPH_* Measurements from 2.8 or earlier to 3.0
-=========================================================
-
-Problem
--------
-
-With the release of 3.0 all GRAPH functions have been dropped from AQL in favor of a more
-native integration of graph features into the query language. I have used the old graph
-functions and want to upgrade to 3.0.
-
-Graph functions covered in this recipe:
-
-* GRAPH_ABSOLUTE_BETWEENNESS
-* GRAPH_ABSOLUTE_CLOSENESS
-* GRAPH_ABSOLUTE_ECCENTRICITY
-* GRAPH_BETWEENNESS
-* GRAPH_CLOSENESS
-* GRAPH_DIAMETER
-* GRAPH_ECCENTRICITY
-* GRAPH_RADIUS
-
-Solution 1: User Defined Funtions
----------------------------------
-
-### Registering user-defined functions
-
-This step has to be executed once on ArangoDB for every database we are using.
-
-We connect to `arangodb` with `arangosh` to issue the following commands two:
-
-```js
-var graphs = require("@arangodb/general-graph");
-graphs._registerCompatibilityFunctions();
-```
-
-These have registered all old `GRAPH_*` functions as user-defined functions again, with the prefix `arangodb::`.
-
-### Modify the application code
-
-Next we have to go through our application code and replace all calls to `GRAPH_*` by `arangodb::GRAPH_*`.
-Now run a testrun of our application and check if it worked.
-If it worked we are ready to go.
-
-### Important Information
-
-The user defined functions will call translated subqueries (as described in Solution 2).
-The optimizer does not know anything about these subqueries beforehand and cannot optimize the whole plan.
-Also there might be read/write constellations that are forbidden in user-defined functions, therefore
-a "really" translated query may work while the user-defined function work around may be rejected.
-
-Solution 2: Foxx (recommended)
-------------------------------
-
-The general graph module still offers the measurement functions.
-As these are typically computation expensive and create long running queries it is recommended
-to not use them in combination with other AQL features.
-Therefore the best idea is to offer these measurements directly via an API using Foxx.
-
-First we create a new [Foxx service](../../Manual/Foxx/index.html).
-Then we include the `general-graph` module in the service.
-For every measurement we need we simply offer a GET route to read this measurement.
-
-As an example we do the `GRAPH_RADIUS`:
-
-```
-/// ADD FOXX CODE ABOVE
-
-const joi = require('joi');
-const createRouter = require('@arangodb/foxx/router');
-const dd = require('dedent');
-const router = createRouter();
-
-const graphs = require("@arangodb/general-graph");
-
-router.get('/radius/:graph', function(req, res) {
- let graph;
-
- // Load the graph
- try {
- graph = graphs._graph(req.graph);
- } catch (e) {
- res.throw('not found');
- }
- res.json(graphs._radius()); // Return the radius
-})
-.pathParam('graph', joi.string().required(), 'The name of the graph')
-.error('not found', 'Graph with this name does not exist.')
-.summary('Compute the Radius')
-.description(dd`
- This function computes the radius of the given graph
- and returns it.
-`);
-```
-
-
-**Author:** [Michael Hackstein](https://github.com/mchacki)
-
-**Tags**: #howto #aql #migration
diff --git a/Documentation/Books/Cookbook/AQL/MultilineQueryStrings.md b/Documentation/Books/Cookbook/AQL/MultilineQueryStrings.md
deleted file mode 100644
index 401c925e9303..000000000000
--- a/Documentation/Books/Cookbook/AQL/MultilineQueryStrings.md
+++ /dev/null
@@ -1,171 +0,0 @@
-Writing multi-line AQL queries
-==============================
-
-Problem
--------
-
-I want to write an AQL query that spans multiple lines in my JavaScript source code,
-but it does not work. How to do this?
-
-Solution
---------
-
-AQL supports multi-line queries, and the AQL editor in ArangoDB's web interface supports
-them too.
-
-When issued programmatically, multi-line queries can be a source of errors, at least in
-some languages. For example, JavaScript is notoriously bad at handling multi-line (JavaScript)
-statements, and until recently it had no support for multi-line strings.
-
-In JavaScript, there are three ways of writing a multi-line AQL query in the source code:
-
-- string concatenation
-- ES6 template strings
-- query builder
-
-Which method works best depends on a few factors, but is often enough a simple matter of preference.
-Before deciding on any, please make sure to read the recipe for [avoiding parameter injection](AvoidingInjection.md)
-too.
-
-### String concatenation
-
-We want the query `FOR doc IN collection FILTER doc.value == @what RETURN doc` to become
-more legible in the source code.
-
-Simply splitting the query string into three lines will leave us with a parse error in
-JavaScript:
-
-```js
-/* will not work */
-var query = "FOR doc IN collection
- FILTER doc.value == @what
- RETURN doc";
-```
-
-Instead, we could do this:
-
-```js
-var query = "FOR doc IN collection " +
- "FILTER doc.value == @what " +
- "RETURN doc";
-```
-
-This is perfectly valid JavaScript, but it's error-prone. People have spent ages on finding
-subtle bugs in their queries because they missed a single whitespace character at the
-beginning or start of some line.
-
-Please note that when assembling queries via string concatenation, you should still use
-bind parameters (as done above with `@what`) and not insert user input values into the
-query string without sanitation.
-
-### ES6 template strings
-
-ES6 template strings are easier to get right and also look more elegant. They can be used
-inside ArangoDB since version 2.5. but some other platforms don't support them et.
-For example, they can't be used in IE and older node.js versions. So use them if your
-environment supports them and your code does not need to run on any non-ES6 environments.
-
-Here's the query string declared via an ES6 template string (note that the string must
-be enclosed in backticks now):
-
-```js
-var query = `FOR doc IN collection
- FILTER doc.value == @what
- RETURN doc`;
-```
-The whitespace in the template string-variant is much easier to get right than when doing
-the string concatenation.
-
-There are a few things to note regarding template strings:
-
-- ES6 template strings can be used to inject JavaScript values into the string dynamically.
- Substitutions start with the character sequence `${`. Care must be taken if this sequence
- itself is used inside the AQL query string (currently this would be invalid AQL, but this
- may change in future ArangoDB versions). Additionally, any values injected into the query
- string using parameter substitutions will not be escaped correctly automatically, so again
- special care must be taken when using this method to keep queries safe from parameter
- injection.
-
-- a multi-line template string will actually contain newline characters. This is not necessarily
- the case when doing string concatenation. In the string concatenation example, we used
- three lines of source code to create a single-line query string. We could have inserted
- newlines into the query string there too, but we didn't. Just to point out that the two
- variants will not create bytewise-identical query strings.
-
-Please note that when using ES6 template strings for your queries, you should still use
-bind parameters (as done above with `@what`) and not insert user input values into the
-query string without sanitation.
-
-There is a convenience function `aql` which can be used to safely
-and easily build an AQL query with substitutions from arbitrary JavaScript values and
-expressions. It can be invoked like this:
-
-```js
-const aql = require("@arangodb").aql; // not needed in arangosh
-
-var what = "some input value";
-var query = aql`FOR doc IN collection
- FILTER doc.value == ${what}
- RETURN doc`;
-```
-
-The template string variant that uses `aql` is both convenient and safe. Internally, it
-will turn the substituted values into bind parameters. The query string and the bind parameter
-values will be returned separately, so the result of `query` above will be something like:
-
-```js
-{
- "query" : "FOR doc IN collection FILTER doc.value == @value0 RETURN doc",
- "bindVars" : {
- "value0" : "some input value"
- }
-}
-```
-
-### Query builder
-
-ArangoDB comes bundled with a query builder named [aqb](https://www.npmjs.com/package/aqb).
-That query builder can be used to programmatically construct AQL queries, without having
-to write query strings at all.
-
-Here's an example of its usage:
-
-```js
-var qb = require("aqb");
-
-var jobs = db._createStatement({
- query: (
- qb.for('job').in('_jobs')
- .filter(
- qb('pending').eq('job.status')
- .and(qb.ref('@queue').eq('job.queue'))
- .and(qb.ref('@now').gte('job.delayUntil'))
- )
- .sort('job.delayUntil', 'ASC')
- .limit('@max')
- .return('job')
- ),
- bindVars: {
- queue: queue._key,
- now: Date.now(),
- max: queue.maxWorkers - numBusy
- }
-}).execute().toArray();
-```
-
-As can be seen, aqb provides a fluent API that allows chaining function calls for
-creating the individual query operations. This has a few advantages:
-
-- flexibility: there is no query string in the source code, so the code can be formatted
- as desired without having to bother about strings
-- validation: the query can be validated syntactically by aqb before being actually executed
- by the server. Testing of queries also becomes easier. Additionally, some IDEs may
- provide auto-completion to some extend and thus aid development
-- security: built-in separation of query operations (e.g. `FOR`, `FILTER`, `SORT`, `LIMIT`)
- and dynamic values (e.g. user input values)
-
-aqb can be used inside ArangoDB and from node.js and even from within browsers.
-
-**Authors**: [Jan Steemann](https://github.com/jsteemann)
-
-**Tags**: #aql #aqb #es6
diff --git a/Documentation/Books/Cookbook/AQL/README.md b/Documentation/Books/Cookbook/AQL/README.md
deleted file mode 100644
index abe384e2869c..000000000000
--- a/Documentation/Books/Cookbook/AQL/README.md
+++ /dev/null
@@ -1,27 +0,0 @@
-AQL
-===
-
-Using AQL in general
---------------------
-
-- [Using Joins in AQL](Joins.md)
-
-- [Using Dynamic Attribute Names](DynamicAttributeNames.md)
-
-- [Creating Test-data using AQL](CreatingTestData.md)
-
-- [Diffing Documents](DiffingDocuments.md)
-
-- [Avoiding Parameter Injection](AvoidingInjection.md)
-
-- [Multiline Query Strings](Joins.md)
-
-
-Migrating from 2.x to 3.0
--------------------------
-
-- [Migrating named graph functions to 3.0](MigratingGraphFunctionsTo3.md)
-
-- [Migrating anonymous graph functions to 3.0](MigratingEdgeFunctionsTo3.md)
-
-- [Migrating graph measurements to 3.0](MigratingMeasurementsTo3.md)
diff --git a/Documentation/Books/Cookbook/AccessingShapesData.md b/Documentation/Books/Cookbook/AccessingShapesData.md
deleted file mode 100644
index cde23f068500..000000000000
--- a/Documentation/Books/Cookbook/AccessingShapesData.md
+++ /dev/null
@@ -1,120 +0,0 @@
-# Accessing Shapes Data
-
-## Problem
-Documents in a collection may have different shapes associated with them. There is no way to query the shapes data directly. So how do you solve this problem?
-
-## Solution
-There are two possible ways to do this.
-
-*A) The fast way with some random samplings:*
-
-1. Ask for a random document (`db..any()`) and note its top-level attribute names
-2. Repeat this for at least 10 times. After that repeat it only if you think it's worth it.
-
-Following is an example of an implementation:
-
-```js
-attributes(db.myCollection);
-
-
-function attributes(collection) {
- "use strict"
-
- var probes = 10;
- var maxRounds = 3;
- var threshold = 0.5;
-
- var maxDocuments = collection.count();
-
- if (maxDocuments < probes) {
- probes = maxDocuments;
- }
-
- if (probes === 0) {
- return [ ];
- }
-
- var attributes = { };
-
- while (maxRounds--) {
- var newDocuments = 0;
- var n = probes;
- while (n--) {
- var doc = collection.any();
- var found = false;
- var keys = Object.keys(doc);
-
- for (var i = 0; i < keys.length; ++i) {
- if (attributes.hasOwnProperty(keys[i])) {
- ++attributes[keys[i]];
- }
- else {
- attributes[keys[i]] = 1;
- found = true;
- }
- }
-
- if (found) {
- ++newDocuments;
- }
- }
-
- if (newDocuments / probes <= threshold) {
- break;
- }
- }
-
- return Object.keys(attributes);
-}
-```
-
-*B) The way to find all top-level attributes*
-
-If you don't mind to make some extra inserts and you don't care about deletion or updates of documents you can use the following:
-
-```js
-db._create("mykeys");
-db.mykeys.ensureUniqueSkiplist("attribute");
-
-
-function insert(collection, document) {
- var result = collection.save(document);
-
- try {
- var keys = Objects.keys(document);
-
- for (i = 0; i < keys.length; ++i) {
- try {
- db.mykeys.save({ attribute: keys[i] });
- }
- catch (err1) {
- // potential unique key constraint violations
- }
- }
- }
- catch (err2) {
- }
-
- return result;
-}
-```
-
-## Comment
-
-*A) The fast way with some random samplings:*
-
-You get some random sampling with bounded complexity.
-If you have a variety of attributes you should repeat the procedure more than 10 times.
-
-The procedure can be implemented as a server side action.
-
-*B) The way to find all top-level attributes*:
-
-This procedure will not care about updates or deletions of documents.
-Also only the top-level attribute of the documents will be inserted and nested one ignored.
-
-The procedure can be implemented as a server side action.
-
-**Author:** [Arangodb](https://github.com/arangodb)
-
-**Tags:** #collection #database
\ No newline at end of file
diff --git a/Documentation/Books/Cookbook/Administration/Authentication.md b/Documentation/Books/Cookbook/Administration/Authentication.md
deleted file mode 100644
index 39c21c0e78e6..000000000000
--- a/Documentation/Books/Cookbook/Administration/Authentication.md
+++ /dev/null
@@ -1,114 +0,0 @@
-Using authentication
-====================
-
-Problem
--------
-
-I want to use authentication in ArangoDB.
-
-Solution
---------
-
-In order to make authentication work properly, you will need to create user accounts first.
-
-Then adjust ArangoDB's configuration and turn on authentication (if it's off).
-
-### Set up or adjust user accounts
-
-ArangoDB user accounts are valid throughout a server instance and users can be granted
-access to one or more databases. They are managed through the database named `_system`.
-
-To manage user accounts, connect with the ArangoShell to the ArangoDB host and the
-`_system` database:
-
-```
-$ arangosh --server.endpoint tcp://127.0.0.1:8529 --server.database "_system"
-```
-
-By default, arangosh will connect with a username `root` and an empty password. This
-will work if authentication is turned off.
-
-When connected, you can create a new user account with the following command:
-
-```
-arangosh> require("org/arangodb/users").save("myuser", "mypasswd");
-```
-
-`myuser` will be the username and `mypasswd` will be the user's password. Note that running
-the command like this may store the password literally in ArangoShell's history.
-
-To avoid that, use a dynamically created password, e.g.:
-
-```
-arangosh> passwd = require("internal").genRandomAlphaNumbers(20);
-arangosh> require("org/arangodb/users").save("myuser", passwd);
-```
-
-The above will print the password on screen (so you can memorize it) but won't store
-it in the command history.
-
-While there, you probably want to change the password of the default `root` user too.
-Otherwise one will be able to connect with the default `root` user and its
-empty password. The following commands change the `root` user's password:
-
-```
-arangosh> passwd = require("internal").genRandomAlphaNumbers(20);
-arangosh> require("org/arangodb/users").update("root", passwd);
-```
-
-### Turn on authentication
-
-Authentication is turned on by default in ArangoDB. You should make sure that it was
-not turned off manually however. Check the configuration file (normally named
-`/etc/arangodb.conf`) and make sure it contains the following line in the `server` section:
-
-```
-authentication = true
-```
-
-This will make ArangoDB require authentication for every request (including requests to
-Foxx apps).
-
-If you want to run Foxx apps without HTTP authentcation, but activate HTTP authentication
-for the built-in server APIs, you can add the following line in the `server` section of
-the configuration:
-
-```
-authentication-system-only = true
-```
-
-The above will bypass authentication for requests to Foxx apps.
-
-When finished making changes, you need to restart ArangoDB:
-
-```
-service arangodb restart
-```
-
-### Check accessibility
-
-To confirm authentication is in effect, try connecting to ArangoDB with the ArangoShell:
-
-```
-$ arangosh --server.endpoint tcp://127.0.0.1:8529 --server.database "_system"
-```
-
-The above will implicity use a username `root` and an empty password when connecting. If
-you changed the password of the `root` account as described above, this should not work anymore.
-
-You should also validate that you can connect with a valid user:
-
-```
-$ arangosh --server.endpoint tcp://127.0.0.1:8529 --server.database "_system" --server.username myuser
-```
-
-You can also use curl to check that you are actually getting HTTP 401 (Unauthorized) server
-responses for requests that require authentication:
-
-```
-$ curl --dump - http://127.0.0.1:8529/_api/version
-```
-
-**Author:** [Jan Steemann](https://github.com/jsteemann)
-
-**Tags**: #authentication #security
diff --git a/Documentation/Books/Cookbook/Administration/ImportingData.md b/Documentation/Books/Cookbook/Administration/ImportingData.md
deleted file mode 100644
index fa7c0588f7ef..000000000000
--- a/Documentation/Books/Cookbook/Administration/ImportingData.md
+++ /dev/null
@@ -1,200 +0,0 @@
-Importing data
-==============
-
-Problem
--------
-
-I want to import data from a file into ArangoDB.
-
-Solution
---------
-
-ArangoDB comes with a command-line tool utility named `arangoimport`. This utility can be
-used for importing JSON-encoded, CSV, and tab-separated files into ArangoDB.
-
-`arangoimport` needs to be invoked from the command-line once for each import file.
-The target collection can already exist or can be created by the import run.
-
-### Importing JSON-encoded data
-
-#### Input formats
-
-There are two supported input formats for importing JSON-encoded data into ArangoDB:
-
-- **line-by-line format**: This format expects each line in the input file to be a valid
- JSON objects. No line breaks must occur within each single JSON object
-
-- **array format**: Expects a file containing a single array of JSON objects. Whitespace is
- allowed for formatting inside the JSON array and the JSON objects
-
-Here's an example for the **line-by-line format** looks like this:
-
-```js
-{"author":"Frank Celler","time":"2011-10-26 08:42:49 +0200","sha":"c413859392a45873936cbe40797970f8eed93ff9","message":"first commit","user":"f.celler"}
-{"author":"Frank Celler","time":"2011-10-26 21:32:36 +0200","sha":"10bb77b8cc839201ff59a778f0c740994083c96e","message":"initial release","user":"f.celler"}
-...
-```
-
-Here's an example for the same data in **array format**:
-
-```js
-[
- {
- "author": "Frank Celler",
- "time": "2011-10-26 08:42:49 +0200",
- "sha": "c413859392a45873936cbe40797970f8eed93ff9",
- "message": "first commit",
- "user": "f.celler"
- },
- {
- "author": "Frank Celler",
- "time": "2011-10-26 21:32:36 +0200",
- "sha": "10bb77b8cc839201ff59a778f0c740994083c96e",
- "message": "initial release",
- "user": "f.celler"
- },
- ...
-]
-```
-
-#### Importing JSON data in line-by-line format
-
-An example data file in **line-by-line format** can be downloaded
-[here](http://jsteemann.github.io/downloads/code/git-commits-single-line.json). The example
-file contains all the commits to the ArangoDB repository as shown by `git log --reverse`.
-
-The following commands will import the data from the file into a collection named `commits`:
-
-```bash
-# download file
-wget http://jsteemann.github.io/downloads/code/git-commits-single-line.json
-
-# actually import data
-arangoimport --file git-commits-single-line.json --collection commits --create-collection true
-```
-
-Note that no file type has been specified when `arangoimport` was invoked. This is because `json`
-is its default input format.
-
-The other parameters used have the following meanings:
-
-- `file`: input filename
-- `collection`: name of the target collection
-- `create-collection`: whether or not the collection should be created if it does not exist
-
-The result of the import printed by `arangoimport` should be:
-
-```
-created: 20039
-warnings/errors: 0
-total: 20039
-```
-
-The collection `commits` should now contain the example commit data as present in the input file.
-
-#### Importing JSON data in array format
-
-An example input file for the **array format** can be found [here](http://jsteemann.github.io/downloads/code/git-commits-array.json).
-
-The command for importing JSON data in **array format** is similar to what we've done before:
-
-```bash
-# download file
-wget http://jsteemann.github.io/downloads/code/git-commits-array.json
-
-# actually import data
-arangoimport --file git-commits-array.json --collection commits --create-collection true
-```
-
-Though the import command is the same (except the filename), there is a notable difference between the
-two JSON formats: for the **array format**, `arangoimport` will read and parse the JSON in its entirety
-before it sends any data to the ArangoDB server. That means the whole input file must fit into
-`arangoimport`'s buffer. By default, `arangoimport` will allocate a 16 MiB internal buffer, and input files bigger
-than that will be rejected with the following message:
-
-```
-import file is too big. please increase the value of --batch-size (currently 16777216).
-```
-
-So for JSON input files in **array format** it might be necessary to increase the value of `--batch-size`
-in order to have the file imported. Alternatively, the input file can be converted to **line-by-line format**
-manually.
-
-
-### Importing CSV data
-
-Data can also be imported from a CSV file. An example file can be found [here](http://jsteemann.github.io/downloads/code/git-commits.csv).
-
-The `--type` parameter for the import command must now be set to `csv`:
-
-```bash
-# download file
-wget http://jsteemann.github.io/downloads/code/git-commits.csv
-
-# actually import data
-arangoimport --file git-commits.csv --type csv --collection commits --create-collection true
-```
-
-For the CSV import, the first line in the input file has a special meaning: every value listed in the
-first line will be treated as an attribute name for the values in all following lines. All following
-lines should also have the same number of "columns".
-
-"columns" inside the CSV input file can be left empty though. If a "column" is left empty in a line,
-then this value will be omitted for the import so the respective attribute will not be set in the imported
-document. Note that values from the input file that are enclosed in double quotes will always be imported as
-strings. To import numeric values, boolean values or the `null` value, don't enclose these values in quotes in
-the input file. Note that leading zeros in numeric values will be removed. Importing numbers with leading
-zeros will only work when putting the numbers into strings.
-
-Here is an example CSV file:
-
-```plain
-"author","time","sha","message"
-"Frank Celler","2011-10-26 08:42:49 +0200","c413859392a45873936cbe40797970f8eed93ff9","first commit"
-"Frank Celler","2011-10-26 21:32:36 +0200","10bb77b8cc839201ff59a778f0c740994083c96e","initial release"
-...
-```
-
-`arangoimport` supports Windows (CRLF) and Unix (LF) line breaks. Line breaks might also occur inside values
-that are enclosed with the quote character.
-
-The default separator for CSV files is the comma. It can be changed using the `--separator` parameter
-when invoking `arangoimport`. The quote character defaults to the double quote (**"**). To use a literal double
-quote inside a "column" in the import data, use two double quotes. To change the quote character, use the
-`--quote` parameter. To use a backslash for escaping quote characters, please set the option `--backslash-escape`
-to `true`.
-
-
-### Changing the database and server endpoint
-
-By default, `arangoimport` will connect to the default database on `127.0.0.1:8529` with a user named
-`root`. To change this, use the following parameters:
-
-- `server.database`: name of the database to use when importing (default: `_system`)
-- `server.endpoint`: address of the ArangoDB server (default: `tcp://127.0.0.1:8529`)
-
-
-### Using authentication
-
-`arangoimport` will by default send an username `root` and an empty password to the ArangoDB
-server. This is ArangoDB's default configuration, and it should be changed. To make `arangoimport`
-use a different username or password, the following command-line arguments can be used:
-
-- `server.username`: username, used if authentication is enabled on server
-- `server.password`: password for user, used if authentication is enabled on server
-
-The password argument can also be omitted in order to avoid having it saved in the shell's
-command-line history. When specifying a username but omitting the password parameter,
-`arangoimport` will prompt for a password.
-
-
-### Additional parameters
-
-By default, `arangoimport` will import data into the specified collection but will not touch
-existing data. Often it is convenient to first remove all data from a collection and then run
-the import. `arangoimport` supports this with the optional `--overwrite` flag. When setting it to
-`true`, all documents in the collection will be removed prior to the import.
-
-**Author:** [Jan Steemann](https://github.com/jsteemann)
-
-**Tags**: #arangoimport #import
diff --git a/Documentation/Books/Cookbook/Administration/Migrate2.8to3.0.md b/Documentation/Books/Cookbook/Administration/Migrate2.8to3.0.md
deleted file mode 100644
index e906b443961b..000000000000
--- a/Documentation/Books/Cookbook/Administration/Migrate2.8to3.0.md
+++ /dev/null
@@ -1,109 +0,0 @@
-Migration from ArangoDB 2.8 to 3.0
-==================================
-
-Problem
--------
-
-I want to use ArangoDB 3.0 from now on but I still have data in ArangoDB 2.8.
-I need to migrate my data. I am running an ArangoDB 3.0 cluster (and
-possibly a cluster with ArangoDB 2.8 as well).
-
-Solution
---------
-
-The internal data format changed completely from ArangoDB 2.8 to 3.0,
-therefore you have to dump all data using `arangodump` and then
-restore it to the new ArangoDB instance using `arangorestore`.
-
-General instructions for this procedure can be found
-[in the manual](../../Manual/Upgrading/VersionSpecific/Upgrading30.html).
-Here, we cover some additional details about the cluster case.
-
-### Dumping the data in ArangoDB 2.8
-
-Basically, dumping the data works with the following command (use `arangodump`
-from your ArangoDB 2.8 distribution!):
-
- arangodump --server.endpoint tcp://localhost:8530 --output-directory dump
-
-or a variation of it, for details see the above mentioned manual page and
-[this section](https://docs.arangodb.com/2.8/HttpBulkImports/Arangodump.html).
-If your ArangoDB 2.8 instance is a cluster, simply use one of the
-coordinator endpoints as the above `--server.endpoint`.
-
-### Restoring the data in ArangoDB 3.0
-
-The output consists of JSON files in the output directory, two for each
-collection, one for the structure and one for the data. The data format
-is 100% compatible with ArangoDB 3.0, except that ArangoDB 3.0 has
-an additional option in the structure files for synchronous replication,
-namely the attribute `replicationFactor`, which is used to specify,
-how many copies of the data for each shard are kept in the cluster.
-
-Therefore, you can simply use this command (use the `arangorestore` from
-your ArangoDB 3.0 distribution!):
-
- arangorestore --server.endpoint tcp://localhost:8530 --input-directory dump
-
-to import your data into your new ArangoDB 3.0 instance. See
-[this page](../../Manual/Programs/Arangorestore/index.html)
-for details on the available command line options. If your ArangoDB 3.0
-instance is a cluster, then simply use one of the coordinators as
-`--server.endpoint`.
-
-That is it, your data is migrated.
-
-### Controlling the number of shards and the replication factor
-
-This procedure works for all four combinations of single server and cluster
-for source and destination respectively. If the target is a single server
-all simply works.
-
-So it remains to explain how one controls the number of shards and the
-replication factor if the destination is a cluster.
-
-If the source was a cluster, `arangorestore` will use the same number
-of shards as before, if you do not tell it otherwise. Since ArangoDB 2.8
-does not have synchronous replication, it does not produce dumps
-with the `replicationFactor` attribute, and so `arangorestore` will
-use replication factor 1 for all collections. If the source was a
-single server, the same will happen, additionally, `arangorestore`
-will always create collections with just a single shard.
-
-There are essentially 3 ways to change this behavior:
-
- 1. The first is to create the collections explicitly on the
- ArangoDB 3.0 cluster, and then set the `--create-collection false` flag.
- In this case you can control the number of shards and the replication
- factor for each collection individually when you create them.
- 2. The second is to use `arangorestore`'s options
- `--default-number-of-shards` and `--default-replication-factor`
- (this option was introduced in Version 3.0.2)
- respectively to specify default values, which are taken if the
- dump files do not specify numbers. This means that all such
- restored collections will have the same number of shards and
- replication factor.
- 3. If you need more control you can simply edit the structure files
- in the dump. They are simply JSON files, you can even first
- use a JSON pretty printer to make editing easier. For the
- replication factor you simply have to add a `replicationFactor`
- attribute to the `parameters` subobject with a numerical value.
- For the number of shards, locate the `shards` subattribute of the
- `parameters` attribute and edit it, such that it has the right
- number of attributes. The actual names of the attributes as well
- as their values do not matter. Alternatively, add a `numberOfShards`
- attribute to the `parameters` subobject, this will override the
- `shards` attribute (this possibility was introduced in Version
- 3.0.2).
-
-Note that you can remove individual collections from your dump by
-deleting their pair of structure and data file in the dump directory.
-In this way you can restore your data in several steps or even
-parallelize the restore operation by running multiple `arangorestore`
-processes concurrently on different dump directories. You should
-consider using different coordinators for the different `arangorestore`
-processes in this case.
-
-All these possibilities together give you full control over the sharding
-layout of your data in the new ArangoDB 3.0 cluster.
-
diff --git a/Documentation/Books/Cookbook/Administration/NSISSilentMode.md b/Documentation/Books/Cookbook/Administration/NSISSilentMode.md
deleted file mode 100644
index 87cc997e3215..000000000000
--- a/Documentation/Books/Cookbook/Administration/NSISSilentMode.md
+++ /dev/null
@@ -1,40 +0,0 @@
-Installing ArangoDB unattended under Windows
-============================================
-
-Problem
--------
-The Available NSIS based installer requires user interaction; This may be unwanted for unattended install i.e. via Chocolatey.
-
-Solution
---------
-The NSIS installer now offers a ["Silent Mode"](http://nsis.sourceforge.net/Docs/Chapter3.html) which allows you to run it non interactive
-and specify all choices available in the UI via commandline Arguments.
-
-The options are as all other NSIS options specified in the form of `/OPTIONNAME=value`.
-
-## Supported options
-
-*For Installation*:
-
- - PASSWORD - Set the database password. Newer versions will also try to evaluate a PASSWORD environment variable
-
- - INSTDIR - Installation directory. A directory where you have access to.
- - DATABASEDIR - Database directory. A directory where you have access to and the databases should be created.
- - APPDIR - Foxx Services directory. A directory where you have access to.
- - INSTALL_SCOPE_ALL:
- - 1 - AllUsers +Service - launch the arangodb service via the Windows Services, install it for all users
- - 0 - SingleUser - install it into the home of this user, don'launch a service. Eventually create a desktop Icon so the user can do this.
- - DESKTOPICON - [0/1] whether to create Icons on the desktop to reference arangosh and the webinterface
- - PATH
- - 0 - don't alter the PATH environment at all
- - 1:
- - INSTALL_SCOPE_ALL = 1 add it to the path for all users
- - INSTALL_SCOPE_ALL = 0 add it to the path of the currently logged in users
- - STORAGE_ENGINE - [auto/mmfiles/rocksdb] which storage engine to use (arangodb 3.2 onwards)
-
-*For Uninstallation*:
- - PURGE_DB - [0/1] if set to 1 the database files ArangoDB created during its lifetime will be removed too.
-
-## Generic Options derived from NSIS
-
- - S - silent - don't open the UI during installation
diff --git a/Documentation/Books/Cookbook/Administration/README.md b/Documentation/Books/Cookbook/Administration/README.md
deleted file mode 100644
index d206ad7a6ea6..000000000000
--- a/Documentation/Books/Cookbook/Administration/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-Administration
-==============
-
-- [Using Authentication](Authentication.md)
-
-- [Importing Data](ImportingData.md)
-
-- [Replicating Data](ReplicatingData.md)
-
-- [Installing ArangoDB unattended under Windows](NSISSilentMode.md)
-
-- [Migrating 2.8 to 3.0](Migrate2.8to3.0.md)
-
-- [A function to show grants in Arangosh](ShowUsersGrants.md)
diff --git a/Documentation/Books/Cookbook/Administration/ReplicatingData.md b/Documentation/Books/Cookbook/Administration/ReplicatingData.md
deleted file mode 100644
index 5c3ffc8d9fbe..000000000000
--- a/Documentation/Books/Cookbook/Administration/ReplicatingData.md
+++ /dev/null
@@ -1,184 +0,0 @@
-Replicating data from different databases
-=========================================
-
-Problem
--------
-
-You have two or more different databases with various data respectively collections in each one of this, but you want your data to be collected at one place.
-
-**Note**: For this solution you need at least Arango 2.0 and you must run the script in every database you want to be collect data from.
-
-Solution
---------
-
-First of all you have to start a server on endpoint:
-
-```
-arangod --server.endpoint tcp://127.0.0.1:8529
-```
-
-Now you have to create two collections and name them *data* and *replicationStatus*
-
-```js
-db._create("data");
-db._create("replicationStatus");
-```
-
-Save the following script in a file named *js/common/modules/org/mysync.js*
-
-```js
-var internal = require("internal");
-
-// maximum number of changes that we can handle
-var maxChanges = 1000;
-
-// URL of central node
-var transferUrl = "http://127.0.0.1:8599/_api/import?collection=central&type=auto&createCollection=true&complete=true";
-
-var transferOptions = {
- method: "POST",
- timeout: 60
-};
-
-// the collection that keeps the status of what got replicated to central node
-var replicationCollection = internal.db.replicationStatus;
-
-// the collection containing all data changes
-var changesCollection = internal.db.data;
-
-function keyCompare (l, r) {
- if (l.length != r.length) {
- return l.length - r.length < 0 ? -1 : 1;
- }
-
- // length is equal
- for (i = 0; i < l.length; ++i) {
- if (l[i] != r[i]) {
- return l[i] < r[i] ? -1 : 1;
- }
- }
-
- return 0;
-};
-
-function logger (msg) {
- "use strict";
-
- require("console").log("%s", msg);
-}
-
-function replicate () {
- "use strict";
-
- var key = "status"; // const
-
- var status, newStatus;
- try {
- // fetch the previous replication state
- status = replicationCollection.document(key);
- newStatus = { _key: key, lastKey: status.lastKey };
- }
- catch (err) {
- // no previous replication state. start from the beginning
- newStatus = { _key: key, lastKey: "0" };
- }
-
- // fetch the latest changes (need to reverse them because `last` returns newest changes first)
- var changes = changesCollection.last(maxChanges).reverse(), change;
- var transfer = [ ];
- for (change in changes) {
- if (changes.hasOwnProperty(change)) {
- var doc = changes[change];
- if (keyCompare(doc._key, newStatus.lastKey) <= 0) {
- // already handled in a previous replication run
- continue;
- }
-
- // documents we need to transfer
- // if necessary, we could rewrite the documents here, e.g. insert
- // extra values, create client-specific keys etc.
- transfer.push(doc);
-
- if (keyCompare(doc._key, newStatus.lastKey) > 0) {
- // keep track of highest key
- newStatus.lastKey = doc._key;
- }
- }
- }
-
- if (transfer.length === 0) {
- // nothing to do
- logger("nothing to transfer");
- return;
- }
-
- logger("transferring " + transfer.length + " document(s)");
-
- // now transfer the documents to the remote server
- var result = internal.download(transferUrl, JSON.stringify(transfer), transferOptions);
-
- if (result.code >= 200 && result.code <= 202) {
- logger("central server accepted the documents: " + JSON.stringify(result));
- }
- else {
- // error
- logger("central server did not accept the documents: " + JSON.stringify(result));
- throw "replication error";
- }
-
- // update the replication state
- if (status) {
- // need to update the previous replication state
- replicationCollection.update(key, newStatus);
- }
- else {
- // need to insert the replication state (1st time)
- replicationCollection.save(newStatus);
- }
-
- logger("deleting old documents");
-
- // finally remove all elements that we transferred successfully from the changes collection
- // no need to keep them
- transfer.forEach(function (k) {
- changesCollection.remove(k);
- });
-}
-
-exports.execute = function (param) {
- "use strict";
-
- logger("replication wake up");
- replicate();
- logger("replication shutdown");
-};
-```
-
-Afterwards change the URL of the central node in the script to the one you chosen before - e.g. *tcp://127.0.0.1:8599*
-
-Now register the script as a recurring action:
-
-```js
-require("internal").definePeriodic(1, 10, "org/arangodb/mysync", "execute", "");
-```
-
-**Note**: At this point you can change the time the script will be executed.
-
-Comment
--------
-
-The server started on endpoint will be the central node. It collects changes from the local node by replicating its data.
-The script will pick up everything that has been changed since the last alteration in your *data* collection.
-Every 10 seconds - or the time you chosen - the script will be executed and send the changed data to the central
-node where it will be imported into a collection named *central*.
-After that the transferred data will be removed from the *data* collection.
-
-If you want to test your script simply add some data to your *data* collection - e.g.:
-
-```js
-for (i = 0; i < 100; ++i) db.data.save({ value: i });
-```
-
-**Author:** [Jan Steemann](https://github.com/jsteemann)
-
-**Tags:** #database #collection
\ No newline at end of file
diff --git a/Documentation/Books/Cookbook/Administration/Replication/README.md b/Documentation/Books/Cookbook/Administration/Replication/README.md
deleted file mode 100644
index 6a67150809f5..000000000000
--- a/Documentation/Books/Cookbook/Administration/Replication/README.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# Replication
-
-This Section includes cookbook recipes related to the *Replication* topic.
-
-* [Replicating data from different databases](../ReplicatingData.md)
-* [Speeding up slave initialization](ReplicationFromBackup.md)
diff --git a/Documentation/Books/Cookbook/Administration/Replication/ReplicationFromBackup.md b/Documentation/Books/Cookbook/Administration/Replication/ReplicationFromBackup.md
deleted file mode 100644
index 1d86d1851714..000000000000
--- a/Documentation/Books/Cookbook/Administration/Replication/ReplicationFromBackup.md
+++ /dev/null
@@ -1,257 +0,0 @@
-# Speeding up slave initialization
-
-## Problem
-
-You have a very big database and want to set up a `master-slave` replication between two or more ArangoDB instances. Transfering the entire database over the network may take a long time, if the database is big. In order to speed-up the replication initialization process the **slave** can be initialized using a backup of the **master**.
-
-For the following example setup, we will use the instance with endpoint `tcp://master.domain.org:8529` as master, and the instance with endpoint `tcp://slave.domain.org:8530` as slave.
-
-The goal is to have all data from the database ` _system` on master replicated to the database `_system` on the slave (the same process can be applied for other databases) .
-
-## Solution
-
-First of all you have to start the master server, using a command like the above:
-
-```sh
-arangod --server.endpoint tcp://master.domain.org:8529
-```
-
-Depending on your storage engine you also want to adjust the following options:
-
-For MMFiles:
-
-```sh
---wal.historic-logfiles (maximum number of historic logfiles to keep after collection
- (default: 10))
-```
-
-For RocksDB:
-
-```sh
---rocksdb.wal-file-timeout (timeout after which unused WAL files are deleted
- in seconds (default: 10))
-```
-
-The options above prevent the premature removal of old WAL files from the master, and are useful in case intense write operations happen on the master while you are initializing the slave. In fact, if you do not tune these options, what can happen is that the master WAL files do not include all the write operations happened after the backup is taken. This may lead to situations in which the initialized slave is missing some data, or fails to start.
-
-Now you have to create a dump from the master using the tool `arangodump`:
-
-```sh
-arangodump --output-directory "dump" --server.endpoint tcp://master.domain.org:8529
-```
-
-Please adapt the `arangodump` command to your specific case.
-
-The following is a possible `arangodump` output:
-
-```sh
-Server version: 3.3
-Connected to ArangoDB 'tcp://master.domain.org:8529', database: '_system', username: 'root'
-Writing dump to output directory 'dump'
-Last tick provided by server is: 37276350
-# Dumping document collection 'TestNums'...
-# Dumping document collection 'TestNums2'...
-# Dumping document collection 'frenchCity'...
-# Dumping document collection 'germanCity'...
-# Dumping document collection 'persons'...
-# Dumping edge collection 'frenchHighway'...
-# Dumping edge collection 'germanHighway'...
-# Dumping edge collection 'internationalHighway'...
-# Dumping edge collection 'knows'...
-Processed 9 collection(s), wrote 1298855504 byte(s) into datafiles, sent 32 batch(es)
-```
-
-In line *4* the last server `tick` is displayed. This value will be useful when we will start the replication, to have the `replication-applier` start replicating exactly from that `tick`.
-
-Next you have to start the slave:
-
-```sh
-arangod --server.endpoint tcp://slave.domain.org:8530
-```
-
-If you are running master and slave on the same server (just for test), please make sure you give your slave a different data directory.
-
-Now you are ready to restore the dump with the tool `arangorestore`:
-
-```sh
-arangorestore --input-directory "dump" --server.endpoint tcp://slave.domain.org:8530
-```
-
-Again, please adapt the command above in case you are using a database different than `_system`.
-
-Once the restore is finished there are two possible approaches to start the replication.
-
-### Approach 1: All-in-one setup
-
-Start replication on the slave with `arangosh` using the following command:
-
-```sh
-arangosh --server.endpoint tcp://slave.domain.org:8530
-```
-
-```js
-db._useDatabase("_system");
-require("@arangodb/replication").setupReplication({
- endpoint: "tcp://master.domain.org:8529",
- username: "myuser",
- password: "mypasswd",
- verbose: false,
- includeSystem: false,
- incremental: true,
- autoResync: true
-});
-```
-
-The following is the printed output:
-
-```sh
-still synchronizing... last received status: 2017-12-06T14:06:25Z: fetching collection keys for collection 'TestNums' from /_api/replication/keys/keys?collection=7173693&to=57482456&serverId=24282855553110&batchId=57482462
-still synchronizing... last received status: 2017-12-06T14:06:25Z: fetching collection keys for collection 'TestNums' from /_api/replication/keys/keys?collection=7173693&to=57482456&serverId=24282855553110&batchId=57482462
-[...]
-still synchronizing... last received status: 2017-12-06T14:07:13Z: sorting 10000000 local key(s) for collection 'TestNums'
-still synchronizing... last received status: 2017-12-06T14:07:13Z: sorting 10000000 local key(s) for collection 'TestNums'
-[...]
-still synchronizing... last received status: 2017-12-06T14:09:10Z: fetching master collection dump for collection 'TestNums3', type: document, id 37276943, batch 2, markers processed: 15278, bytes received: 2097258
-still synchronizing... last received status: 2017-12-06T14:09:18Z: fetching master collection dump for collection 'TestNums5', type: document, id 37276973, batch 5, markers processed: 123387, bytes received: 17039688
-[...]
-still synchronizing... last received status: 2017-12-06T14:13:49Z: fetching master collection dump for collection 'TestNums5', type: document, id 37276973, batch 132, markers processed: 9641823, bytes received: 1348744116
-still synchronizing... last received status: 2017-12-06T14:13:59Z: fetching collection keys for collection 'frenchCity' from /_api/replication/keys/keys?collection=27174045&to=57482456&serverId=24282855553110&batchId=57482462
-{
- "state" : {
- "running" : true,
- "lastAppliedContinuousTick" : null,
- "lastProcessedContinuousTick" : null,
- "lastAvailableContinuousTick" : null,
- "safeResumeTick" : null,
- "progress" : {
- "time" : "2017-12-06T14:13:59Z",
- "message" : "send batch finish command to url /_api/replication/batch/57482462?serverId=24282855553110",
- "failedConnects" : 0
- },
- "totalRequests" : 0,
- "totalFailedConnects" : 0,
- "totalEvents" : 0,
- "totalOperationsExcluded" : 0,
- "lastError" : {
- "errorNum" : 0
- },
- "time" : "2017-12-06T14:13:59Z"
- },
- "server" : {
- "version" : "3.3.devel",
- "serverId" : "24282855553110"
- },
- "endpoint" : "tcp://master.domain.org:8529",
- "database" : "_system"
-}
-```
-
-This is the same command that you would use to start replication even without taking a backup first. The difference, in this case, is that the data that is present already on the slave (and that has been restored from the backup) this time is not transferred over the network from the master to the slave.
-
-The command above will only check that the data already included in the slave is in sync with the master. After this check, the `replication-applier` will make sure that all write operations that happened on the master after the backup are replicated on the slave.
-
-While this approach is definitely faster than transferring the whole database over the network, since a sync check is performed, it can still require some time.
-
-### Approach 2: Apply replication by tick
-
-In this approach, the sync check described above is not performed. As a result this approach is faster as the existing slave data is not checked. Write operations are executed starting from the `tick` you provide and continue with the master's available `ticks`.
-
-This is still a secure way to start replication as far as the correct `tick` is passed.
-
-As previously mentioned the last `tick` provided by the master is displayed when using `arangodump`. In our example the last tick was **37276350**.
-
-First of all you have to apply the properties of the replication, using `arangosh` on the slave:
-
-```sh
-arangosh --server.endpoint tcp://slave.domain.org:8530
-```
-
-```js
-db._useDatabase("_system");
-require("@arangodb/replication").applier.properties({
- endpoint: "tcp://master.domain.org:8529",
- username: "myuser",
- password: "mypasswd",
- verbose: false,
- includeSystem: false,
- incremental: true,
- autoResync: true});
-```
-
-Then you can start the replication with the last provided `logtick` of the master (output of `arangodump`):
-
-```js
-require("@arangodb/replication").applier.start(37276350)
-```
-
-The following is the printed output:
-
-```sh
-{
- "state" : {
- "running" : true,
- "lastAppliedContinuousTick" : null,
- "lastProcessedContinuousTick" : null,
- "lastAvailableContinuousTick" : null,
- "safeResumeTick" : null,
- "progress" : {
- "time" : "2017-12-06T13:26:04Z",
- "message" : "applier initially created for database '_system'",
- "failedConnects" : 0
- },
- "totalRequests" : 0,
- "totalFailedConnects" : 0,
- "totalEvents" : 0,
- "totalOperationsExcluded" : 0,
- "lastError" : {
- "errorNum" : 0
- },
- "time" : "2017-12-06T13:33:25Z"
- },
- "server" : {
- "version" : "3.3.devel",
- "serverId" : "176090204017635"
- },
- "endpoint" : "tcp://master.domain.org:8529",
- "database" : "_system"
-}
-```
-
-After the replication has been started with the command above, you can use the `applier.state` command to check how far the last applied `tick` on the slave is far from the last available master `tick`:
-
-```sh
-require("@arangodb/replication").applier.state()
-{
- "state" : {
- "running" : true,
- "lastAppliedContinuousTick" : "42685113",
- "lastProcessedContinuousTick" : "42685113",
- "lastAvailableContinuousTick" : "57279944",
- "safeResumeTick" : "37276974",
- "progress" : {
- "time" : "2017-12-06T13:35:25Z",
- "message" : "fetching master log from tick 42685113, first regular tick 37276350, barrier: 0, open transactions: 1",
- "failedConnects" : 0
- },
- "totalRequests" : 190,
- "totalFailedConnects" : 0,
- "totalEvents" : 2704032,
- "totalOperationsExcluded" : 0,
- "lastError" : {
- "errorNum" : 0
- },
- "time" : "2017-12-06T13:35:25Z"
- },
- "server" : {
- "version" : "3.3.devel",
- "serverId" : "176090204017635"
- },
- "endpoint" : "tcp://master.domain.org:8529",
- "database" : "_system"
-}
-```
-
-
-**Author:** [Max Kernbach](https://github.com/maxkernbach)
-
-**Tags:** #database #replication #arangodump #arangorestore
diff --git a/Documentation/Books/Cookbook/Administration/ShowUsersGrants.md b/Documentation/Books/Cookbook/Administration/ShowUsersGrants.md
deleted file mode 100644
index 57e7f82bb49d..000000000000
--- a/Documentation/Books/Cookbook/Administration/ShowUsersGrants.md
+++ /dev/null
@@ -1,48 +0,0 @@
-Show grants function
-====================
-
-Problem
--------
-
-I'm looking for user database grants
-
-
-Solution
---------
-Create a global function in your _.arangosh.rc_ file like this:
-```
-global.show_grants = function () {
- let stmt;
- stmt=db._createStatement({"query": "FOR u in _users RETURN {\"user\": u.user, \"databases\": u.databases}"});
- console.log(stmt.execute().toString());
-};
-```
-Now when you enter in arangosh, you can call **show_grants()** function.
-
-#### Function out example
-```
-[object ArangoQueryCursor, count: 3, hasMore: false]
-
-
-[
- {
- "user" : "foo",
- "databases" : {
- "_system" : "rw",
- "bar" : "rw"
- }
- },
- {
- "user" : "foo2",
- "databases" : {
- "bar" : "rw"
- }
- },
- {
- "user" : "root",
- "databases" : {
- "*" : "rw"
- }
- }
-]
-```
diff --git a/Documentation/Books/Cookbook/Cloud/DockerContainer.md b/Documentation/Books/Cookbook/Cloud/DockerContainer.md
deleted file mode 100644
index d525bf4412e1..000000000000
--- a/Documentation/Books/Cookbook/Cloud/DockerContainer.md
+++ /dev/null
@@ -1,16 +0,0 @@
-How to run ArangoDB in a Docker container
-=========================================
-
-Problem
--------
-
-How do you make ArangoDB run in a Docker container?
-
-Solution
---------
-
-ArangoDB is now available as an [official repository in the Docker Hub](https://hub.docker.com/_/arangodb/) (@see documentation there).
-
-**Author:** [Frank Celler](https://github.com/fceller)
-
-**Tags:** #docker #howto
diff --git a/Documentation/Books/Cookbook/Cloud/NodeJsDocker.md b/Documentation/Books/Cookbook/Cloud/NodeJsDocker.md
deleted file mode 100644
index 6a1056ebe4f4..000000000000
--- a/Documentation/Books/Cookbook/Cloud/NodeJsDocker.md
+++ /dev/null
@@ -1,144 +0,0 @@
-ArangoDB, NodeJS and Docker
-===========================
-
-Problem
--------
-
-I'm looking for a head start in using the ArangoDB docker image.
-
-Solution
---------
-
-We will use the guesser game for ArangoDB from
-
-```
-https://github.com/arangodb/guesser
-```
-
-This is a simple game guessing animals or things. It learns while playing
-and stores the learned information in an ArangoDB instance. The game is written using the
-express framework.
-
-**Note**: You need to switch to the docker branch.
-
-The game has the two components
-
-* front-end with node.js and express
-* back-end with ArangoDB and Foxx
-
-Therefore the guesser game needs two docker containers, one container for the node.js
-server to run the front-end code and one container for ArangoDB for the storage back-end.
-
-### Node Server
-
-The game is itself can be install via NPM or from github. There is an image available from
-dockerhub called `arangodb/example-guesser` which is based on the Dockerfile
-from github.
-
-You can either build the docker container locally or simply use the available one from
-docker hub.
-
-```
-unix> docker run -p 8000:8000 -e nolink=1 arangodb/example-guesser
-Starting without a database link
-Using DB-Server http://localhost:8529
-Guesser app server listening at http://0.0.0.0:8000
-```
-
-This will start-up node and the guesser game is available on port 8000. Now point your
-browser to port 8000. You should see the start-up screen. However, without a storage
-backend it will be pretty useless. Therefore, stop the container and proceed with the next
-step.
-
-If you want to build the container locally, check out the guesser game from
-
-```
-https://github.com/arangodb/example-guesser
-```
-
-Switch into the `docker/node` subdirectory and execute `docker build .`.
-
-### ArangoDB
-
-ArangoDB is already available on docker, so we start an instance
-
-```
-unix> docker run --name arangodb-guesser arangodb/arangodb
-show all options:
- docker run -e help=1 arangodb
-
-starting ArangoDB in stand-alone mode
-```
-
-That's it. Note that in an productive environment you would need to attach a storage
-container to it. We ignore this here for the sake of simplicity.
-
-### Guesser Game
-
-
-#### Some Testing
-
-Use the guesser game image to start the ArangoDB shell and link the ArangoDB instance to
-it.
-
-```
-unix> docker run --link arangodb-guesser:db-link -it arangodb/example-guesser arangosh --server.endpoint @DB_LINK_PORT_8529_TCP@
-```
-
-The parameter `--link arangodb-guesser:db-link` links the running ArangoDB into the
-application container and sets an environment variable `DB_LINK_PORT_8529_TCP` which
-points to the exposed port of the ArangoDB container:
-
-```
-DB_LINK_PORT_8529_TCP=tcp://172.17.0.17:8529
-```
-
-Your IP may vary. The command `arangosh ...` at the end of docker command executes the
-ArangoDB shell instead of the default node command.
-
-```
-Welcome to arangosh 2.3.1 [linux]. Copyright (c) ArangoDB GmbH
-Using Google V8 3.16.14 JavaScript engine, READLINE 6.3, ICU 52.1
-
-Pretty printing values.
-Connected to ArangoDB 'tcp://172.17.0.17:8529' version: 2.3.1, database: '_system', username: 'root'
-
-Type 'tutorial' for a tutorial or 'help' to see common examples
-arangosh [_system]>
-```
-
-The important line is
-
-```
-Connected to ArangoDB 'tcp://172.17.0.17:8529' version: 2.3.1, database: '_system', username: 'root'
-```
-
-It tells you that the application container was able to connect to the database
-back-end. Press `Control-D` to exit.
-
-#### Start Up The Game
-
-Ready to play? Start the front-end container with the database link and initialize the database.
-
-```
-unix> docker run --link arangodb-guesser:db-link -p 8000:8000 -e init=1 arangodb/example-guesser
-```
-
-Use your browser to play the game at the address http://127.0.0.1:8000/.
-The
-
-```
--e init=1
-```
-
-is only need the first time you start-up the front-end and only once. The next time you
-run the front-end or if you start a second front-end server use
-
-```
-unix> docker run --link arangodb-guesser:db-link -p 8000:8000 arangodb/example-guesser
-```
-
-
-**Author**: [Frank Celler](https://github.com/fceller)
-
-**Tags**: #docker
diff --git a/Documentation/Books/Cookbook/Cloud/README.md b/Documentation/Books/Cookbook/Cloud/README.md
deleted file mode 100644
index 8e1e78ae246b..000000000000
--- a/Documentation/Books/Cookbook/Cloud/README.md
+++ /dev/null
@@ -1,9 +0,0 @@
-Cloud, DCOS and Docker
-======================
-
-Docker
-------
-
-- [Docker ArangoDB](DockerContainer.md)
-- [Docker with NodeJS App](NodeJsDocker.md)
-
diff --git a/Documentation/Books/Cookbook/Compiling/Debian.md b/Documentation/Books/Cookbook/Compiling/Debian.md
deleted file mode 100644
index 53de7178f8d1..000000000000
--- a/Documentation/Books/Cookbook/Compiling/Debian.md
+++ /dev/null
@@ -1,251 +0,0 @@
-Compiling on Debian
-===================
-
-Problem
--------
-
-You want to compile and run the devel branch, for example to test a bug fix. In this example the system is Debian based.
-
-Solution
---------
-
-This solution was made using a fresh Debian Testing machine on Amazon EC2. For completeness, the steps pertaining to AWS are also included in this recipe.
-
-### Launch the VM
-
-*Optional*
-
-Login to your AWS account and launch an instance of Debian Testing. I used an 'm3.xlarge' since that has a bunch of cores, more than enough memory, optimized network and the instance store is on SSDs which can be switched to provisioned IOPs.
-
-The Current AMI ID's can be found in the Debian Wiki: https://wiki.debian.org/Cloud/AmazonEC2Image/Jessie
-
-### Upgrade to the very latest version
-
-*Optional*
-
-Once your EC2 instance is up, login ad `admin` and `sudo su` to become `root`.
-
-First, we remove the backports and change the primary sources.list
-
-```bash
-rm -rf /etc/apt/sources.list.d
-echo "deb http://http.debian.net/debian testing main contrib" > /etc/apt/sources.list
-echo "deb-src http://http.debian.net/debian testing main contrib" >> /etc/apt/sources.list
-```
-
-Update and upgrade the system. Make sure you don't have any broken/unconfigured packages. Sometimes you need to run safe/full upgrade more than once. When you're done, reboot.
-
-```bash
-apt-get install aptitude
-aptitude -y update
-aptitude -y safe-upgrade
-aptitude -y full-upgrade
-reboot
-```
-
-### Install build dependencies
-
-*Mandatory*
-
-Before you can build ArangoDB, you need a few packages pre-installed on your system.
-
-Login again and install them.
-
-```bash
-sudo aptitude -y install git-core \
- build-essential \
- libssl-dev \
- libjemalloc-dev \
- cmake \
- python2.7 \
-sudo aptitude -y install libldap2-dev # Enterprise Edition only
-```
-
-###
-Download the Source
-
-Download the latest source using ***git***:
-
- unix> git clone git://github.com/arangodb/arangodb.git
-
-This will automatically clone the **devel** branch.
-
-Note: if you only plan to compile ArangoDB locally and do not want to modify or push
-any changes, you can speed up cloning substantially by using the *--single-branch* and
-*--depth* parameters for the clone command as follows:
-
- unix> git clone --single-branch --depth 1 git://github.com/arangodb/arangodb.git
-
-### Setup
-
-Switch into the ArangoDB directory
-
- unix> cd arangodb
- unix> mkdir build
- unix> cd build
-
-In order to generate the build environment please execute
-
- unix> cmake ..
-
-to setup the Makefiles. This will check the various system characteristics and
-installed libraries. If you installed the compiler in a non standard location, you may need to specify it:
-
- cmake -DCMAKE_C_COMPILER=/opt/bin/gcc -DCMAKE_CXX_COMPILER=/opt/bin/g++ ..
-
-If you compile on macOS, you should add the following options to the cmake command:
-
- cmake .. -DOPENSSL_ROOT_DIR=/usr/local/opt/openssl -DCMAKE_OSX_DEPLOYMENT_TARGET=10.11
-
-If you also plan to make changes to the source code of ArangoDB, you may want to compile with the
-`Debug` build type:
-
- cmake .. -DCMAKE_BUILD_TYPE=Debug
-
-The `Debug` target enables additional sanity checks etc. which would slow down production
-binaries. If no build type is specified, ArangoDB will be compiled with build type `RelWithDebInfo`,
-which is a compromise between good performance and medium debugging experience.
-
-Other options valuable for development:
-
- -DUSE_MAINTAINER_MODE=On
-
-Needed if you plan to make changes to AQL language (which is implemented using a lexer and parser
-files in `arangod/Aql/grammar.y` and `arangod/Aql/tokens.ll`) or if you want to enable runtime
-assertions. To use the maintainer mode, your system has to contain the tools FLEX and BISON.
-
- -DUSE_BACKTRACE=On
-
-Use this option if you want to have C++ stacktraces attached to your exceptions. This can be useful
-to more quickly locate the place where an exception or an assertion was thrown. Note that this
-option will slow down the produces binaries a bit and requires building with maintainer mode.
-
- -DUSE_OPTIMIZE_FOR_ARCHITECTURE=On
-
-This will optimize the binary for the target architecture, potentially enabling more compiler
-optimizations, but making the resulting binary less portable.
-
-ArangoDB will then automatically use the configuration from file *etc/relative/arangod.conf*.
-
- -DUSE_FAILURE_TESTS=On
-
-This option activates additional code in the server that intentionally makes the
-server crash or misbehave (e.g. by pretending the system ran out of memory) when certain tests
-are run. This option is useful for writing tests.
-
- -DUSE_JEMALLOC=Off
-
-By default ArangoDB will be built with a bundled version of the JEMalloc allocator. This
-however will not work when using runtime analyzers such as ASAN or Valgrind. In order to use
-these tools for instrumenting an ArangoDB binary, JEMalloc must be turned off during compilation.
-
-### shared memory
-Gyp is used as makefile generator by V8. Gyp requires shared memory to be available,
-which may not if you i.e. compile in a chroot. You can make it available like this:
-
- none /opt/chroots/ubuntu_precise_x64/dev/shm tmpfs rw,nosuid,nodev,noexec 0 2
- devpts /opt/chroots/ubuntu_precise_x64/dev/pts devpts gid=5,mode=620 0 0
-
-
-### Compilation
-
-Compile the programs (server, client, utilities) by executing
-
- make
-
-in the build subdirectory. This will compile ArangoDB and create the binary executable
-in file `build/bin/arangod`.
-
-### Starting and testing
-
-Check the binary by starting it using the command line.
-
- unix> build/bin/arangod -c etc/relative/arangod.conf --server.endpoint tcp://127.0.0.1:8529 /tmp/database-dir
-
-This will start up the ArangoDB and listen for HTTP requests on port 8529 bound
-to IP address 127.0.0.1. You should see the startup messages similar to the
-following:
-
-```
-2016-06-01T12:47:29Z [29266] INFO ArangoDB xxx ...
-2016-06-10T12:47:29Z [29266] INFO using endpoint 'tcp://127.0.0.1.8529' for non-encrypted requests
-2016-06-01T12:47:30Z [29266] INFO Authentication is turned on
-2016-60-01T12:47:30Z [29266] INFO ArangoDB (version xxx) is ready for business. Have fun!
-```
-
-If it fails with a message about the database directory, please make sure the
-database directory you specified exists and can be written into.
-
-Use your favorite browser to access the URL
-
- http://127.0.0.1:8529/
-
-This should bring up ArangoDB's web interface.
-
-### Re-building ArangoDB after an update
-
-To stay up-to-date with changes made in the main ArangoDB repository, you will
-need to pull the changes from it and re-run `make`.
-
-Normally, this will be as simple as follows:
-
- unix> git pull
- unix> (cd build && make)
-
-From time to time there will be bigger structural changes in ArangoDB, which may
-render the old Makefiles invalid. Should this be the case and `make` complains
-about missing files etc., the following commands should fix it:
-
-
- unix> rm -rf build/*
- unix> cd build && cmake ..
- unix> (cd build && make)
-
-Note that the above commands will run a full rebuild of ArangoDB and all
-of its third-party components. That will take a while to complete.
-
-### Installation
-
-In a local development environment it is not necessary to install ArangoDB
-somewhere, because it can be started from within the source directory as
-shown above.
-
-If there should be the need to install ArangoDB, execute the following command:
-
- (cd build && sudo make install)
-
-The server will by default be installed in
-
- /usr/local/sbin/arangod
-
-The configuration file will be installed in
-
- /usr/local/etc/arangodb3/arangod.conf
-
-The database will be installed in
-
- /usr/local/var/lib/arangodb3
-
-The ArangoShell will be installed in
-
- /usr/local/bin/arangosh
-
-You should add an arangodb user and group (as root), plus make sure it owns these directories:
-
- useradd -g arangodb arangodb
- chown -R arangodb:arangodb /usr/local/var/lib/arangodb3-apps/
- chown -R arangodb:arangodb /tmp/database-dir/
-
-**Note:** The installation directory will be different if you use one of the
-`precompiled` packages. Please check the default locations of your operating
-system, e. g. `/etc` and `/var/lib`.
-
-When upgrading from a previous version of ArangoDB, please make sure you inspect
-ArangoDB's log file after an upgrade. It may also be necessary to start ArangoDB
-with the *--database.auto-upgrade* parameter once to perform required upgrade or
-initialization tasks.
-
-**Author:** [Patrick Huber](https://github.com/stackmagic)
-**Author:** [Wilfried Goesgens](https://github.com/dothebart)
-
-**Tags:** #debian #driver
diff --git a/Documentation/Books/Cookbook/Compiling/README.md b/Documentation/Books/Cookbook/Compiling/README.md
deleted file mode 100644
index 271df1a51b7a..000000000000
--- a/Documentation/Books/Cookbook/Compiling/README.md
+++ /dev/null
@@ -1,25 +0,0 @@
-Compiling ArangoDB
-==================
-
-Problem
--------
-
-You want to modify sources or add your own changes to ArangoDB.
-
-Solution
---------
-
-ArangoDB, as many other open source projects nowadays, is standing on the shoulder of giants.
-This gives us a solid foundation to bring you a unique feature set, but it introduces a lot of
-dependencies that need to be in place in order to compile ArangoDB.
-
-Since build infrastructures are very different depending on the target OS, choose your target
-from the recipes below.
-
-- [Compile on Debian](Debian.md)
-
-- [Compile on Windows](Windows.md)
-
-- [Running Custom Build](RunningCustomBuild.md)
-
- - [Recompiling jemalloc](jemalloc.md)
diff --git a/Documentation/Books/Cookbook/Compiling/RunningCustomBuild.md b/Documentation/Books/Cookbook/Compiling/RunningCustomBuild.md
deleted file mode 100644
index 3cdb904013c0..000000000000
--- a/Documentation/Books/Cookbook/Compiling/RunningCustomBuild.md
+++ /dev/null
@@ -1,59 +0,0 @@
-Running a custom build
-======================
-
-Problem
--------
-
-You've already built a custom version of ArangoDB and want to run it. Possibly in isolation from an existing installation or you may want to re-use the data.
-
-Solution
---------
-
-First, you need to build your own version of ArangoDB. If you haven't done so
-already, have a look at any of the [Compiling](README.md) recipes.
-
-This recipe assumes you're in the root directory of the ArangoDB distribution and compiling has successfully finished.
-
-### Running in isolation
-
-This part shows how to run your custom build with an empty database directory
-
-```bash
-# create data directory
-mkdir /tmp/arangodb
-
-# run
-bin/arangod \
- --configuration etc/relative/arangod.conf\
- --database.directory /tmp/arangodb
-```
-
-### Running with data
-
-This part shows how to run your custom build with the config and data from a pre-existing stable installation.
-
-{% hint 'danger' %}
-ArangoDB's developers may change the db file format and after running with a
-changed file format, there may be no way back. Alternatively you can run your
-build in isolation and [dump](../../Manual/Programs/Arangodump/index.html) and
-[restore](../../Manual/Programs/Arangorestore/index.html) the data from the
-stable to your custom build.
-{% endhint %}
-
-When running like this, you must run the db as the arangod user (the default
-installed by the package) in order to have write access to the log, database
-directory etc. Running as root will likely mess up the file permissions - good
-luck fixing that!
-
-```bash
-# become root first
-su
-
-# now switch to arangod and run
-su - arangod
-bin/arangod --configuration /etc/arangodb/arangod.conf
-```
-
-**Author:** [Patrick Huber](https://github.com/stackmagic)
-
-**Tags:** #build
diff --git a/Documentation/Books/Cookbook/Compiling/Windows.md b/Documentation/Books/Cookbook/Compiling/Windows.md
deleted file mode 100644
index 330e00d55a91..000000000000
--- a/Documentation/Books/Cookbook/Compiling/Windows.md
+++ /dev/null
@@ -1,197 +0,0 @@
-Compiling ArangoDB under Windows
-================================
-
-Problem
--------
-
-I want to compile ArangoDB 3.4 and onwards under Windows.
-
-**Note:** If you want to compile version 3.3 or earlier, then look at the
-[Compiling ArangoDB under Windows](https://docs.arangodb.com/3.3/Cookbook/Compiling/Windows.html)
-recipe in the 3.3 documentation.
-
-Solution
---------
-
-With ArangoDB 3.0 a complete cmake environment was introduced. This also streamlines the dependencies on Windows.
-We suggest to use [chocolatey.org](https://chocolatey.org/) to install most of the dependencies. For sure
-most projects offer their own setup & install packages, chocolatey offers a simplified way to install them
-with less user interactions. You can even use chocolatey via
-[ansibles 2.7 winrm facility](https://docs.ansible.com/ansible/latest/user_guide/windows.html)
-to do unattended installations of some software on Windows.
-
-### Ingredients
-
-First install the choco package manager by pasting this tiny cmdlet into a command window
-*(needs to be run with Administrator privileges; Right click start menu, **Command Prompt (Admin)**)*:
-
- @powershell -NoProfile -ExecutionPolicy Bypass -Command "iex ((new-object net.webclient).DownloadString('https://chocolatey.org/install.ps1'))" && SET PATH=%PATH%;%ALLUSERSPROFILE%\chocolatey\bin
-
-#### Visual Studio and its Compiler
-
-Since choco currently fails to alter the environment for
-[Microsoft Visual Studio](https://www.visualstudio.com/en-us/products/visual-studio-community-vs.aspx),
-we suggest to download and install Visual Studio by hand.
-Currently Visual Studio 2017 is the only supported option.
-
-{% hint 'warning' %}
-You need to make sure that it installs the **Desktop development with C++** preset,
-else cmake will fail to detect it later on. Furthermore, the **Windows 8.1 SDK and UCRT SDK**
-optional component is required to be selected during Visual Studio installation, else V8
-will fail to compile later on.
-{% endhint %}
-
-After it successfully installed, start it once, so it can finish its setup.
-
-#### More Dependencies
-
-Now you can invoke the choco package manager for an unattended install of the dependencies
-*(needs to be run with Administrator privileges again)*:
-
- choco install -y cmake.portable nsis python2 procdump windbg wget
-
-Then we need to install the [OpenSSL](https://openssl.org) library from its sources or using precompiled
-[Third Party OpenSSL Related Binary Distributions](https://wiki.openssl.org/index.php/Binaries).
-
-#### Optional Dependencies
-
-If you intend to run the unit tests or compile from git, you also need
-*(needs to be run with Administrator privileges again)*:
-
- choco install -y git winflexbison ruby
-
-Close and reopen the Administrator command window in order to continue with the ruby devkit:
-
- choco install -y ruby2.devkit
-
-And manually install the requirements via the `Gemfile` fetched from the ArangoDB Git repository
-*(needs to be run with Administrator privileges)*:
-
- wget https://raw.githubusercontent.com/arangodb/arangodb/devel/tests/rb/HttpInterface/Gemfile
- setx PATH %PATH%;C:\tools\DevKit2\bin;C:\tools\DevKit2\mingw\bin
- gem install bundler
- bundler
-
-Note that the V8 build scripts and gyp aren't compatible with Python 3.x hence you need python2!
-
-### Building ArangoDB
-
-Download and extract the release tarball from https://www.arangodb.com/download/
-
-Or clone the GitHub repository and checkout the branch or tag you need (e.g. `devel`)
-
- git clone https://github.com/arangodb/arangodb.git -b devel
- cd arangodb
-
-Generate the Visual studio project files, and check back that cmake discovered all components on your system:
-
- mkdir Build64
- cd Build64
- cmake -G "Visual Studio 15 2017 Win64" ..
-
-Note that in some cases cmake struggles to find the proper python interpreter
-(i.e. the cygwin one won't work). You can force overrule it by appending:
-
- -DPYTHON_EXECUTABLE:FILEPATH=C:/Python27/python.exe
-
-You can now load these in the Visual Studio IDE or use cmake to start the build:
-
- cmake --build . --config RelWithDebInfo
-
-The binaries need the ICU datafile `icudt54l.dat`, which is automatically copied into the directory containing the
-executable.
-
-### Unit tests (Optional)
-
-The unit tests require a [cygwin](https://www.cygwin.com/) environment.
-
-#### Cygwin Installation Hints
-
-You need at least `make` from cygwin. Cygwin also offers a `cmake`. Do **not** install the cygwin cmake.
-
-You should also issue these commands to generate user information for the cygwin commands:
-
- mkpasswd > /etc/passwd
- mkgroup > /etc/group
-
-Turning ACL off (noacl) for all mounts in cygwin fixes permissions troubles that may appear in the build:
-
- # /etc/fstab
- #
- # This file is read once by the first process in a Cygwin process tree.
- # To pick up changes, restart all Cygwin processes. For a description
- # see https://cygwin.com/cygwin-ug-net/using.html#mount-table
-
- # noacl = Ignore Access Control List and let Windows handle permissions
- C:/cygwin64/bin /usr/bin ntfs binary,auto,noacl 0 0
- C:/cygwin64/lib /usr/lib ntfs binary,auto,noacl 0 0
- C:/cygwin64 / ntfs override,binary,auto,noacl 0 0
- none /cygdrive cygdrive binary,posix=0,user,noacl 0 0
-
-#### Enable native symlinks for Cygwin and git
-
-Cygwin will create proprietary files as placeholders by default instead of
-actually symlinking files. The placeholders later tell Cygwin where to resolve
-paths to. It does not intercept every access to the placeholders however, so
-that 3rd party scripts break. Windows Vista and above support real symlinks,
-and Cygwin can be configured to make use of it:
-
- # use actual symlinks to prevent documentation build errors
- # (requires elevated rights!)
- export CYGWIN="winsymlinks:native"
-
-Note that you must run Cygwin as administrator or change the Windows group
-policies to allow user accounts to create symlinks (`gpedit.msc` if available).
-
-BTW: You can create symlinks manually on Windows like:
-
- mklink /H target/file.ext source/file.ext
- mklink /D target/path source/path
- mklink /J target/path source/path/for/junction
-
-And in Cygwin:
-
- ln -s source target
-
-#### Making the ICU database publically available
-
-If you intend to use the machine for development purposes, it may be more practical to copy it to a common place:
-
- cd 3rdParty/V8/v*/third_party/icu/source/data/in && cp icudt*.dat /cygdrive/c/Windows/
-
-And configure your environment (yes this instruction remembers to the hitchhikers guide to the galaxy...) so that
-`ICU_DATA` points to `c:\\Windows`. You do that by opening the explorer,
-right click on `This PC` in the tree on the left, choose `Properties` in the opening window `Advanced system settings`,
-in the Popup `Environment Variables`, another popup opens, in the `System Variables` part you click `New`,
-And variable name: `ICU_DATA` to the value: `c:\\Windows`
-
-![HowtoSetEnv](../assets/CompilingUnderWindows/SetEnvironmentVar.png)
-
-#### Running Unit tests
-
-You can then run the integration tests in the cygwin shell like that:
-
- Build64/bin/RelWithDebInfo/arangosh.exe \
- -c etc/relative/arangosh.conf \
- --log.level warning \
- --server.endpoint tcp://127.0.0.1:1024 \
- --javascript.execute UnitTests/unittest.js \
- -- \
- all \
- --build Build64 \
- --buildType RelWithDebInfo \
- --skipNondeterministic true \
- --skipTimeCritical true \
- --skipBoost true \
- --skipGeo true
-
-Additional options `--ruby c:/tools/ruby25/bin/ruby` and `--rspec c:/tools/ruby25/bin/rspec`
-should be used only if Ruby is not in the *PATH*.
-
-**Authors**:
-[Frank Celler](https://github.com/fceller),
-[Wilfried Goesgens](https://github.com/dothebart),
-[Simran Brucherseifer](https://github.com/Simran-B) and
-[Vadim Kondratyev](https://github.com/KVS85).
-
-**Tags**: #windows
diff --git a/Documentation/Books/Cookbook/Compiling/jemalloc.md b/Documentation/Books/Cookbook/Compiling/jemalloc.md
deleted file mode 100644
index dcec0774ec72..000000000000
--- a/Documentation/Books/Cookbook/Compiling/jemalloc.md
+++ /dev/null
@@ -1,40 +0,0 @@
-Jemalloc
-========
-
-**This article is only relevant if you intend to compile arangodb on Ubuntu 16.10 or debian testing**
-
-On more modern linux systems (development/floating at the time of this writing) you may get compile / link errors with arangodb regarding jemalloc.
-This is due to compilers switching their default behaviour regarding the `PIC` - Position Independend Code.
-It seems common that jemalloc remains in a stage where this change isn't followed and causes arangodb to error out during the linking phase.
-
-From now on cmake will detect this and give you this hint:
-
- the static system jemalloc isn't suitable! Recompile with the current compiler or disable using `-DCMAKE_CXX_FLAGS=-no-pie -DCMAKE_C_FLAGS=-no-pie`
-
-Now you've got three choices.
-
-Doing without jemalloc
-----------------------
-
-Fixes the compilation issue, but you will get problems with the glibcs heap fragmentation behaviour which in the longer run will lead to an ever increasing memory consumption of ArangoDB.
-
-So, while this may be suitable for development / testing systems, its definitely not for production.
-
-Disabling PIC altogether
-------------------------
-
-This will build an arangod which doesn't use this compiler feature. It may be not so nice for development builds. It can be achieved by specifying these options on cmake:
-
- -DCMAKE_CXX_FLAGS=-no-pie -DCMAKE_C_FLAGS=-no-pie
-
-Recompile jemalloc
-------------------
-
-The smartest way is to fix the jemalloc libraries packages on your system so its reflecting that new behaviour. On debian / ubuntu systems it can be achieved like this:
-
- apt-get install automake debhelper docbook-xsl xsltproc dpkg-dev
- apt source jemalloc
- cd jemalloc*
- dpkg-buildpackage
- cd ..
- dpkg -i *jemalloc*deb
diff --git a/Documentation/Books/Cookbook/DocumentInheritance.md b/Documentation/Books/Cookbook/DocumentInheritance.md
deleted file mode 100644
index df56394726c2..000000000000
--- a/Documentation/Books/Cookbook/DocumentInheritance.md
+++ /dev/null
@@ -1,68 +0,0 @@
-# Model document inheritance
-
-## Problem
-
-How do you model document inheritance given that collections do not support that feature?
-
-## Solution
-
-Lets assume you have three document collections: "subclass", "class" and "superclass". You also have two edge collections: "sub_extends_class" and "class_extends_super".
-
-You can create them via arangosh or Foxx:
-
-```js
-var graph_module = require("com/arangodb/general-graph");
-var g = graph_module._create("inheritance");
-g._extendEdgeDefinitions(graph_module. _directedRelation("sub_extends_class", ["subclass"], ["class"]));
-g._extendEdgeDefinitions(graph_module. _directedRelation("class_extends_super", ["class"], ["superclass"]));
-```
-
-This makes sure when using the graph interface that the inheritance looks like:
-
-* sub → class
-* class → super
-* super → sub
-
-To make sure everything works as expected you should use the built-in traversal in combination with Foxx. This allows you to add the inheritance security layer easily.
-To use traversals in Foxx simply add the following line before defining routes:
-
-```js
-var traversal = require("org/arangodb/graph/traversal");
-var Traverser = traversal.Traverser;
-```
-
-Also you can add the following endpoint in Foxx:
-
-```js
-var readerConfig = {
- datasource: traversal.graphDatasourceFactory("inheritance"),
- expander: traversal.outboundExpander, // Go upwards in the tree
- visitor: function (config, result, vertex, path) {
- for (key in vertex) {
- if (vertex.hasOwnProperty(key) && !result.hasOwnProperty(key)) {
- result[key] = vertex[key] // Store only attributes that have not yet been found
- }
- }
- }
-};
-
-controller.get("load/:collection/:key", function(req, res) {
- var result = {};
- var id = res.params("collection") + "/" + res.params("key");
- var traverser = new Traverser(readerConfig);
- traverser.traverse(result, g.getVertex(id));
- res.json(result);
-});
-```
-
-This will make sure to iterate the complete inheritance tree upwards to the root element and will return all values on the path
-were the first instance of this value is kept
-
-## Comment
-You should go with edges because it is much easier to query them if you have a theoretically unlimited depth in inheritance.
-If you have a fixed inheritance depth you could also go with an attribute in the document referencing the parent and execute joins in AQL.
-
-
-**Author:** [Michael Hackstein](https://github.com/mchacki)
-
-**Tags:** #graph #document
\ No newline at end of file
diff --git a/Documentation/Books/Cookbook/FOOTER.html b/Documentation/Books/Cookbook/FOOTER.html
deleted file mode 100644
index 239869bfaf6a..000000000000
--- a/Documentation/Books/Cookbook/FOOTER.html
+++ /dev/null
@@ -1 +0,0 @@
-© ArangoDB - the native multi-model NoSQL database
\ No newline at end of file
diff --git a/Documentation/Books/Cookbook/Graph/CustomVisitorFromNodeJs.md b/Documentation/Books/Cookbook/Graph/CustomVisitorFromNodeJs.md
deleted file mode 100644
index f1a305293cac..000000000000
--- a/Documentation/Books/Cookbook/Graph/CustomVisitorFromNodeJs.md
+++ /dev/null
@@ -1,106 +0,0 @@
-Using a custom visitor from node.js
-===================================
-
-Problem
--------
-
-I want to traverse a graph using a custom visitor from node.js.
-
-
-Solution
---------
-
-Use [arangojs](https://www.npmjs.com/package/arangojs) and an AQL query with a custom
-visitor.
-
-### Installing arangojs
-
-First thing is to install *arangojs*.
-This can be done using *npm* or *bower*:
-
-```
-npm install arangojs
-```
-
-or
-
-```
-bower install arangojs
-```
-
-### Example data setup
-
-For the following example, we need the example graph and data from
-[here](https://jsteemann.github.io/downloads/code/world-graph-setup.js).
-Please download the code from the link and store it in the filesystem using a filename
-of `world-graph-setup.js`. Then start the ArangoShell and run the code from the file:
-
-```js
-require("internal").load("/path/to/file/world-graph-setup.js");
-```
-
-The script will create the following two collections and load some data into them:
-
-- `v`: a collection with vertex documents
-- `e`: an edge collection containing the connections between vertices in `v`
-
-### Registering a custom visitor function
-
-Let's register a custom visitor function now. A custom visitor function is a JavaScript
-function that is executed every time the traversal processes a vertex in the graph.
-
-To register a custom visitor function, we can execute the following commands in the
-ArangoShell:
-
-```js
-var aqlfunctions = require("org/arangodb/aql/functions");
-
-aqlfunctions.register("myfunctions::leafNodeVisitor", function (config, result, vertex, path, connected) {
- if (connected && connected.length === 0) {
- return vertex.name + " (" + vertex.type + ")";
- }
-});
-```
-
-### Invoking the custom visitor
-
-The following code can be run in node.js to execute an AQL query that will
-make use of the custom visitor:
-
-```js
-Database = require('arangojs');
-
-/* connection the database, change as required */
-db = new Database('http://127.0.0.1:8529');
-
-/* the query string */
-var query = "FOR result IN TRAVERSAL(v, e, @vertex, 'inbound', @options) RETURN result";
-
-/* bind parameters */
-var bindVars = {
- vertex: "v/world", /* our start vertex */
- options: {
- order: "preorder-expander",
- visitor: "myfunctions::leafNodeVisitor",
- visitorReturnsResults: true
- }
-};
-
-db.query(query, bindVars, function (err, cursor) {
- if (err) {
- console.log('error: %j', err);
- } else {
- cursor.all(function(err2, list) {
- if (err) {
- console.log('error: %j', err2);
- } else {
- console.log("all document keys: %j", list);
- }
- });
- }
-});
-```
-
-**Author:** [Jan Steemann](https://github.com/jsteemann)
-
-**Tags**: #graph #traversal #aql #nodejs
diff --git a/Documentation/Books/Cookbook/Graph/ExampleActorsAndMovies.md b/Documentation/Books/Cookbook/Graph/ExampleActorsAndMovies.md
deleted file mode 100644
index 53544a8176f6..000000000000
--- a/Documentation/Books/Cookbook/Graph/ExampleActorsAndMovies.md
+++ /dev/null
@@ -1,790 +0,0 @@
-AQL Example Queries on an Actors and Movies Database
-====================================================
-
-Acknowledgments
----------------
-
-On [Stackoverflow][1] the user [Vincz][2] asked for some example queries based on graphs.
-So credits for this questions go to him. The datasets and queries have been taken from the guys of [neo4j](http://neo4j.com/docs/stable/cypherdoc-movie-database.html). Credits and thanks to them.
-As I also think this examples are yet missing I decided to write this recipe.
-
-
-Problem
--------
-
-(Copy from Stackoverflow)
-Given a collection of **actors** and a collection of **movies**. And a **actIn** edges collection (with a **year** property) to connect the vertex.
-
-\[Actor\] ← act in → \[Movie\]
-
- How could I get:
-
-* All actors who acted in "movie1" OR "movie2"
-* All actors who acted in both "movie1" AND "movie2" ?
-* All common movies between "actor1" and "actor2" ?
-* All actors who acted in 3 or more movies ?
-* All movies where exactly 6 actors acted in ?
-* The number of actors by movie ?
-* The number of movies by actor ?
-* The number of movies acted in between 2005 and 2010 by actor ?
-
-
-Solution
---------
-
-During this solution we will be using arangosh to create and query the data.
-All the AQL queries are strings and can simply be copied over to your favorite driver instead of arangosh.
-
-Create a Test Dataset in arangosh:
-
-```js
-var actors = db._create("actors");
-var movies = db._create("movies");
-var actsIn = db._createEdgeCollection("actsIn");
-
-var TheMatrix = movies.save({_key: "TheMatrix", title:'The Matrix', released:1999, tagline:'Welcome to the Real World'})._id;
-var Keanu = actors.save({_key: "Keanu", name:'Keanu Reeves', born:1964})._id;
-var Carrie = actors.save({_key: "Carrie", name:'Carrie-Anne Moss', born:1967})._id;
-var Laurence = actors.save({_key: "Laurence", name:'Laurence Fishburne', born:1961})._id;
-var Hugo = actors.save({_key: "Hugo", name:'Hugo Weaving', born:1960})._id;
-var Emil = actors.save({_key: "Emil", name:"Emil Eifrem", born: 1978});
-
-actsIn.save(Keanu, TheMatrix, {roles: ["Neo"], year: 1999});
-actsIn.save(Carrie, TheMatrix, {roles: ["Trinity"], year: 1999});
-actsIn.save(Laurence, TheMatrix, {roles: ["Morpheus"], year: 1999});
-actsIn.save(Hugo, TheMatrix, {roles: ["Agent Smith"], year: 1999});
-actsIn.save(Emil, TheMatrix, {roles: ["Emil"], year: 1999});
-
-var TheMatrixReloaded = movies.save({_key: "TheMatrixReloaded", title: "The Matrix Reloaded", released: 2003, tagline: "Free your mind"});
-actsIn.save(Keanu, TheMatrixReloaded, {roles: ["Neo"], year: 2003});
-actsIn.save(Carrie, TheMatrixReloaded, {roles: ["Trinity"], year: 2003});
-actsIn.save(Laurence, TheMatrixReloaded, {roles: ["Morpheus"], year: 2003});
-actsIn.save(Hugo, TheMatrixReloaded, {roles: ["Agent Smith"], year: 2003});
-
-var TheMatrixRevolutions = movies.save({_key: "TheMatrixRevolutions", title: "The Matrix Revolutions", released: 2003, tagline: "Everything that has a beginning has an end"});
-actsIn.save(Keanu, TheMatrixRevolutions, {roles: ["Neo"], year: 2003});
-actsIn.save(Carrie, TheMatrixRevolutions, {roles: ["Trinity"], year: 2003});
-actsIn.save(Laurence, TheMatrixRevolutions, {roles: ["Morpheus"], year: 2003});
-actsIn.save(Hugo, TheMatrixRevolutions, {roles: ["Agent Smith"], year: 2003});
-
-var TheDevilsAdvocate = movies.save({_key: "TheDevilsAdvocate", title:"The Devil's Advocate", released:1997, tagline:'Evil has its winning ways'})._id;
-var Charlize = actors.save({_key: "Charlize", name:'Charlize Theron', born:1975})._id;
-var Al = actors.save({_key: "Al", name:'Al Pacino', born:1940})._id;
-actsIn.save(Keanu, TheDevilsAdvocate, {roles: ["Kevin Lomax"], year: 1997});
-actsIn.save(Charlize, TheDevilsAdvocate, {roles: ["Mary Ann Lomax"], year: 1997});
-actsIn.save(Al, TheDevilsAdvocate, {roles: ["John Milton"], year: 1997});
-
-var AFewGoodMen = movies.save({_key: "AFewGoodMen", title:"A Few Good Men", released:1992, tagline:"In the heart of the nation's capital, in a courthouse of the U.S. government, one man will stop at nothing to keep his honor, and one will stop at nothing to find the truth."})._id;
-var TomC = actors.save({_key: "TomC", name:'Tom Cruise', born:1962})._id;
-var JackN = actors.save({_key: "JackN", name:'Jack Nicholson', born:1937})._id;
-var DemiM = actors.save({_key: "DemiM", name:'Demi Moore', born:1962})._id;
-var KevinB = actors.save({_key:"KevinB", name:'Kevin Bacon', born:1958})._id;
-var KieferS = actors.save({_key:"KieferS", name:'Kiefer Sutherland', born:1966})._id;
-var NoahW = actors.save({_key:"NoahW", name:'Noah Wyle', born:1971})._id;
-var CubaG = actors.save({_key:"CubaG", name:'Cuba Gooding Jr.', born:1968})._id;
-var KevinP = actors.save({_key:"KevinP", name:'Kevin Pollak', born:1957})._id;
-var JTW = actors.save({_key:"JTW", name:'J.T. Walsh', born:1943})._id;
-var JamesM = actors.save({_key:"JamesM", name:'James Marshall', born:1967})._id;
-var ChristopherG = actors.save({_key:"ChristopherG", name:'Christopher Guest', born:1948})._id;
-actsIn.save(TomC,AFewGoodMen,{roles:['Lt. Daniel Kaffee'], year: 1992});
-actsIn.save(JackN,AFewGoodMen,{roles:['Col. Nathan R. Jessup'], year: 1992});
-actsIn.save(DemiM,AFewGoodMen,{roles:['Lt. Cdr. JoAnne Galloway'], year: 1992});
-actsIn.save(KevinB,AFewGoodMen,{roles:['Capt. Jack Ross'], year: 1992});
-actsIn.save(KieferS,AFewGoodMen,{ roles:['Lt. Jonathan Kendrick'], year: 1992});
-actsIn.save(NoahW,AFewGoodMen,{roles:['Cpl. Jeffrey Barnes'], year: 1992});
-actsIn.save(CubaG,AFewGoodMen,{ roles:['Cpl. Carl Hammaker'], year: 1992});
-actsIn.save(KevinP,AFewGoodMen,{roles:['Lt. Sam Weinberg'], year: 1992});
-actsIn.save(JTW,AFewGoodMen,{roles:['Lt. Col. Matthew Andrew Markinson'], year: 1992});
-actsIn.save(JamesM,AFewGoodMen,{roles:['Pfc. Louden Downey'], year: 1992});
-actsIn.save(ChristopherG,AFewGoodMen,{ roles:['Dr. Stone'], year: 1992});
-
-var TopGun = movies.save({_key:"TopGun", title:"Top Gun", released:1986, tagline:'I feel the need, the need for speed.'})._id;
-var KellyM = actors.save({_key:"KellyM", name:'Kelly McGillis', born:1957})._id;
-var ValK = actors.save({_key:"ValK", name:'Val Kilmer', born:1959})._id;
-var AnthonyE = actors.save({_key:"AnthonyE", name:'Anthony Edwards', born:1962})._id;
-var TomS = actors.save({_key:"TomS", name:'Tom Skerritt', born:1933})._id;
-var MegR = actors.save({_key:"MegR", name:'Meg Ryan', born:1961})._id;
-actsIn.save(TomC,TopGun,{roles:['Maverick'], year: 1986});
-actsIn.save(KellyM,TopGun,{roles:['Charlie'], year: 1986});
-actsIn.save(ValK,TopGun,{roles:['Iceman'], year: 1986});
-actsIn.save(AnthonyE,TopGun,{roles:['Goose'], year: 1986});
-actsIn.save(TomS,TopGun,{roles:['Viper'], year: 1986});
-actsIn.save(MegR,TopGun,{roles:['Carole'], year: 1986});
-
-var JerryMaguire = movies.save({_key:"JerryMaguire", title:'Jerry Maguire', released:2000, tagline:'The rest of his life begins now.'})._id;
-var ReneeZ = actors.save({_key:"ReneeZ", name:'Renee Zellweger', born:1969})._id;
-var KellyP = actors.save({_key:"KellyP", name:'Kelly Preston', born:1962})._id;
-var JerryO = actors.save({_key:"JerryO", name:"Jerry O'Connell", born:1974})._id;
-var JayM = actors.save({_key:"JayM", name:'Jay Mohr', born:1970})._id;
-var BonnieH = actors.save({_key:"BonnieH", name:'Bonnie Hunt', born:1961})._id;
-var ReginaK = actors.save({_key:"ReginaK", name:'Regina King', born:1971})._id;
-var JonathanL = actors.save({_key:"JonathanL", name:'Jonathan Lipnicki', born:1996})._id;
-actsIn.save(TomC,JerryMaguire,{roles:['Jerry Maguire'], year: 2000});
-actsIn.save(CubaG,JerryMaguire,{roles:['Rod Tidwell'], year: 2000});
-actsIn.save(ReneeZ,JerryMaguire,{roles:['Dorothy Boyd'], year: 2000});
-actsIn.save(KellyP,JerryMaguire,{roles:['Avery Bishop'], year: 2000});
-actsIn.save(JerryO,JerryMaguire,{roles:['Frank Cushman'], year: 2000});
-actsIn.save(JayM,JerryMaguire,{roles:['Bob Sugar'], year: 2000});
-actsIn.save(BonnieH,JerryMaguire,{roles:['Laurel Boyd'], year: 2000});
-actsIn.save(ReginaK,JerryMaguire,{roles:['Marcee Tidwell'], year: 2000});
-actsIn.save(JonathanL,JerryMaguire,{roles:['Ray Boyd'], year: 2000});
-
-var StandByMe = movies.save({_key:"StandByMe", title:"Stand By Me", released:1986, tagline:"For some, it's the last real taste of innocence, and the first real taste of life. But for everyone, it's the time that memories are made of."})._id;
-var RiverP = actors.save({_key:"RiverP", name:'River Phoenix', born:1970})._id;
-var CoreyF = actors.save({_key:"CoreyF", name:'Corey Feldman', born:1971})._id;
-var WilW = actors.save({_key:"WilW", name:'Wil Wheaton', born:1972})._id;
-var JohnC = actors.save({_key:"JohnC", name:'John Cusack', born:1966})._id;
-var MarshallB = actors.save({_key:"MarshallB", name:'Marshall Bell', born:1942})._id;
-actsIn.save(WilW,StandByMe,{roles:['Gordie Lachance'], year: 1986});
-actsIn.save(RiverP,StandByMe,{roles:['Chris Chambers'], year: 1986});
-actsIn.save(JerryO,StandByMe,{roles:['Vern Tessio'], year: 1986});
-actsIn.save(CoreyF,StandByMe,{roles:['Teddy Duchamp'], year: 1986});
-actsIn.save(JohnC,StandByMe,{roles:['Denny Lachance'], year: 1986});
-actsIn.save(KieferS,StandByMe,{roles:['Ace Merrill'], year: 1986});
-actsIn.save(MarshallB,StandByMe,{roles:['Mr. Lachance'], year: 1986});
-
-var AsGoodAsItGets = movies.save({_key:"AsGoodAsItGets", title:'As Good as It Gets', released:1997, tagline:'A comedy from the heart that goes for the throat.'})._id;
-var HelenH = actors.save({_key:"HelenH", name:'Helen Hunt', born:1963})._id;
-var GregK = actors.save({_key:"GregK", name:'Greg Kinnear', born:1963})._id;
-actsIn.save(JackN,AsGoodAsItGets,{roles:['Melvin Udall'], year: 1997});
-actsIn.save(HelenH,AsGoodAsItGets,{roles:['Carol Connelly'], year: 1997});
-actsIn.save(GregK,AsGoodAsItGets,{roles:['Simon Bishop'], year: 1997});
-actsIn.save(CubaG,AsGoodAsItGets,{roles:['Frank Sachs'], year: 1997});
-
-var WhatDreamsMayCome = movies.save({_key:"WhatDreamsMayCome", title:'What Dreams May Come', released:1998, tagline:'After life there is more. The end is just the beginning.'})._id;
-var AnnabellaS = actors.save({_key:"AnnabellaS", name:'Annabella Sciorra', born:1960})._id;
-var MaxS = actors.save({_key:"MaxS", name:'Max von Sydow', born:1929})._id;
-var WernerH = actors.save({_key:"WernerH", name:'Werner Herzog', born:1942})._id;
-var Robin = actors.save({_key:"Robin", name:'Robin Williams', born:1951})._id;
-actsIn.save(Robin,WhatDreamsMayCome,{roles:['Chris Nielsen'], year: 1998});
-actsIn.save(CubaG,WhatDreamsMayCome,{roles:['Albert Lewis'], year: 1998});
-actsIn.save(AnnabellaS,WhatDreamsMayCome,{roles:['Annie Collins-Nielsen'], year: 1998});
-actsIn.save(MaxS,WhatDreamsMayCome,{roles:['The Tracker'], year: 1998});
-actsIn.save(WernerH,WhatDreamsMayCome,{roles:['The Face'], year: 1998});
-
-var SnowFallingonCedars = movies.save({_key:"SnowFallingonCedars", title:'Snow Falling on Cedars', released:1999, tagline:'First loves last. Forever.'})._id;
-var EthanH = actors.save({_key:"EthanH", name:'Ethan Hawke', born:1970})._id;
-var RickY = actors.save({_key:"RickY", name:'Rick Yune', born:1971})._id;
-var JamesC = actors.save({_key:"JamesC", name:'James Cromwell', born:1940})._id;
-actsIn.save(EthanH,SnowFallingonCedars,{roles:['Ishmael Chambers'], year: 1999});
-actsIn.save(RickY,SnowFallingonCedars,{roles:['Kazuo Miyamoto'], year: 1999});
-actsIn.save(MaxS,SnowFallingonCedars,{roles:['Nels Gudmundsson'], year: 1999});
-actsIn.save(JamesC,SnowFallingonCedars,{roles:['Judge Fielding'], year: 1999});
-
-var YouveGotMail = movies.save({_key:"YouveGotMail", title:"You've Got Mail", released:1998, tagline:'At odds in life... in love on-line.'})._id;
-var ParkerP = actors.save({_key:"ParkerP", name:'Parker Posey', born:1968})._id;
-var DaveC = actors.save({_key:"DaveC", name:'Dave Chappelle', born:1973})._id;
-var SteveZ = actors.save({_key:"SteveZ", name:'Steve Zahn', born:1967})._id;
-var TomH = actors.save({_key:"TomH", name:'Tom Hanks', born:1956})._id;
-actsIn.save(TomH,YouveGotMail,{roles:['Joe Fox'], year: 1998});
-actsIn.save(MegR,YouveGotMail,{roles:['Kathleen Kelly'], year: 1998});
-actsIn.save(GregK,YouveGotMail,{roles:['Frank Navasky'], year: 1998});
-actsIn.save(ParkerP,YouveGotMail,{roles:['Patricia Eden'], year: 1998});
-actsIn.save(DaveC,YouveGotMail,{roles:['Kevin Jackson'], year: 1998});
-actsIn.save(SteveZ,YouveGotMail,{roles:['George Pappas'], year: 1998});
-
-var SleeplessInSeattle = movies.save({_key:"SleeplessInSeattle", title:'Sleepless in Seattle', released:1993, tagline:'What if someone you never met, someone you never saw, someone you never knew was the only someone for you?'})._id;
-var RitaW = actors.save({_key:"RitaW", name:'Rita Wilson', born:1956})._id;
-var BillPull = actors.save({_key:"BillPull", name:'Bill Pullman', born:1953})._id;
-var VictorG = actors.save({_key:"VictorG", name:'Victor Garber', born:1949})._id;
-var RosieO = actors.save({_key:"RosieO", name:"Rosie O'Donnell", born:1962})._id;
-actsIn.save(TomH,SleeplessInSeattle,{roles:['Sam Baldwin'], year: 1993});
-actsIn.save(MegR,SleeplessInSeattle,{roles:['Annie Reed'], year: 1993});
-actsIn.save(RitaW,SleeplessInSeattle,{roles:['Suzy'], year: 1993});
-actsIn.save(BillPull,SleeplessInSeattle,{roles:['Walter'], year: 1993});
-actsIn.save(VictorG,SleeplessInSeattle,{roles:['Greg'], year: 1993});
-actsIn.save(RosieO,SleeplessInSeattle,{roles:['Becky'], year: 1993});
-
-var JoeVersustheVolcano = movies.save({_key:"JoeVersustheVolcano", title:'Joe Versus the Volcano', released:1990, tagline:'A story of love, lava and burning desire.'})._id;
-var Nathan = actors.save({_key:"Nathan", name:'Nathan Lane', born:1956})._id;
-actsIn.save(TomH,JoeVersustheVolcano,{roles:['Joe Banks'], year: 1990});
-actsIn.save(MegR,JoeVersustheVolcano,{roles:['DeDe', 'Angelica Graynamore', 'Patricia Graynamore'], year: 1990});
-actsIn.save(Nathan,JoeVersustheVolcano,{roles:['Baw'], year: 1990});
-
-var WhenHarryMetSally = movies.save({_key:"WhenHarryMetSally", title:'When Harry Met Sally', released:1998, tagline:'At odds in life... in love on-line.'})._id;
-var BillyC = actors.save({_key:"BillyC", name:'Billy Crystal', born:1948})._id;
-var CarrieF = actors.save({_key:"CarrieF", name:'Carrie Fisher', born:1956})._id;
-var BrunoK = actors.save({_key:"BrunoK", name:'Bruno Kirby', born:1949})._id;
-actsIn.save(BillyC,WhenHarryMetSally,{roles:['Harry Burns'], year: 1998});
-actsIn.save(MegR,WhenHarryMetSally,{roles:['Sally Albright'], year: 1998});
-actsIn.save(CarrieF,WhenHarryMetSally,{roles:['Marie'], year: 1998});
-actsIn.save(BrunoK,WhenHarryMetSally,{roles:['Jess'], year: 1998});
-```
-
-
-All actors who acted in "movie1" OR "movie2"
---------------------------------------------
-
-Say we want to find all actors who acted in "TheMatrix" OR "TheDevilsAdvocate":
-
-First lets try to get all actors for one movie:
-
-```js
-db._query("FOR x IN ANY 'movies/TheMatrix' actsIn OPTIONS {bfs: true, uniqueVertices: 'global'} RETURN x._id").toArray();
-```
-
-Result:
-```json
-[
- [
- "actors/Keanu",
- "actors/Hugo",
- "actors/Emil",
- "actors/Carrie",
- "actors/Laurence"
- ]
-]
-```
-
-Now we continue to form a UNION_DISTINCT of two NEIGHBORS queries which will be the solution:
-
-```js
-db._query("FOR x IN UNION_DISTINCT ((FOR y IN ANY 'movies/TheMatrix' actsIn OPTIONS {bfs: true, uniqueVertices: 'global'} RETURN y._id), (FOR y IN ANY 'movies/TheDevilsAdvocate' actsIn OPTIONS {bfs: true, uniqueVertices: 'global'} RETURN y._id)) RETURN x").toArray();
-```
-
-```json
-[
- [
- "actors/Emil",
- "actors/Hugo",
- "actors/Carrie",
- "actors/Laurence",
- "actors/Keanu",
- "actors/Al",
- "actors/Charlize"
- ]
-]
-```
-
-
-All actors who acted in both "movie1" AND "movie2" ?
-----------------------------------------------------
-
-This is almost identical to the question above.
-But this time we are not intrested in a UNION but in a INTERSECTION:
-
-```js
-db._query("FOR x IN INTERSECTION ((FOR y IN ANY 'movies/TheMatrix' actsIn OPTIONS {bfs: true, uniqueVertices: 'global'} RETURN y._id), (FOR y IN ANY 'movies/TheDevilsAdvocate' actsIn OPTIONS {bfs: true, uniqueVertices: 'global'} RETURN y._id)) RETURN x").toArray();
-```
-
-```json
-[
- [
- "actors/Keanu"
- ]
-]
-```
-
-
-All common movies between "actor1" and "actor2" ?
--------------------------------------------------
-
-This is actually identical to the question about common actors in movie1 and movie2.
-We just have to change the starting vertices.
-As an example let's find all movies where Hugo Weaving ("Hugo") and Keanu Reeves are co-starring:
-
-```js
-db._query("FOR x IN INTERSECTION ((FOR y IN ANY 'actors/Hugo' actsIn OPTIONS {bfs: true, uniqueVertices: 'global'} RETURN y._id), (FOR y IN ANY 'actors/Keanu' actsIn OPTIONS {bfs: true, uniqueVertices: 'global'} RETURN y._id)) RETURN x").toArray();
-```
-
-```json
-[
- [
- "movies/TheMatrixRevolutions",
- "movies/TheMatrixReloaded",
- "movies/TheMatrix"
- ]
-]
-```
-
-
-All actors who acted in 3 or more movies ?
-------------------------------------------
-
-This question is different, we cannot make use of the neighbors function here.
-Instead we will make use of the edge-index and the COLLECT statement of AQL for grouping.
-The basic idea is to group all edges by their startVertex (which in this dataset is always the actor).
-Then we remove all actors with less than 3 movies from the result.
-As I am also interested in the number of movies an actor has acted in, I included the value in the result as well:
-
-```js
-db._query("FOR x IN actsIn COLLECT actor = x._from WITH COUNT INTO counter FILTER counter >= 3 RETURN {actor: actor, movies: counter}").toArray()
-```
-
-```json
-[
- {
- "actor" : "actors/Carrie",
- "movies" : 3
- },
- {
- "actor" : "actors/CubaG",
- "movies" : 4
- },
- {
- "actor" : "actors/Hugo",
- "movies" : 3
- },
- {
- "actor" : "actors/Keanu",
- "movies" : 4
- },
- {
- "actor" : "actors/Laurence",
- "movies" : 3
- },
- {
- "actor" : "actors/MegR",
- "movies" : 5
- },
- {
- "actor" : "actors/TomC",
- "movies" : 3
- },
- {
- "actor" : "actors/TomH",
- "movies" : 3
- }
-]
-```
-
-
-All movies where exactly 6 actors acted in ?
---------------------------------------------
-
-The same idea as in the query before, but with equality filter, however now we need the movie instead of the actor, so we return the _to attribute:
-
-```js
-db._query("FOR x IN actsIn COLLECT movie = x._to WITH COUNT INTO counter FILTER counter == 6 RETURN movie").toArray()
-```
-
-```json
-[
- "movies/SleeplessInSeattle",
- "movies/TopGun",
- "movies/YouveGotMail"
-]
-```
-
-
-The number of actors by movie ?
--------------------------------
-
-We remember in our dataset _to on the edge corresponds to the movie, so we count how often the same _to appears.
-This is the number of actors.
-The query is almost identical to the ones before but without the FILTER after COLLECT:
-
-```js
-db._query("FOR x IN actsIn COLLECT movie = x._to WITH COUNT INTO counter RETURN {movie: movie, actors: counter}").toArray()
-```
-
-```json
-[
- {
- "movie" : "movies/AFewGoodMen",
- "actors" : 11
- },
- {
- "movie" : "movies/AsGoodAsItGets",
- "actors" : 4
- },
- {
- "movie" : "movies/JerryMaguire",
- "actors" : 9
- },
- {
- "movie" : "movies/JoeVersustheVolcano",
- "actors" : 3
- },
- {
- "movie" : "movies/SleeplessInSeattle",
- "actors" : 6
- },
- {
- "movie" : "movies/SnowFallingonCedars",
- "actors" : 4
- },
- {
- "movie" : "movies/StandByMe",
- "actors" : 7
- },
- {
- "movie" : "movies/TheDevilsAdvocate",
- "actors" : 3
- },
- {
- "movie" : "movies/TheMatrix",
- "actors" : 5
- },
- {
- "movie" : "movies/TheMatrixReloaded",
- "actors" : 4
- },
- {
- "movie" : "movies/TheMatrixRevolutions",
- "actors" : 4
- },
- {
- "movie" : "movies/TopGun",
- "actors" : 6
- },
- {
- "movie" : "movies/WhatDreamsMayCome",
- "actors" : 5
- },
- {
- "movie" : "movies/WhenHarryMetSally",
- "actors" : 4
- },
- {
- "movie" : "movies/YouveGotMail",
- "actors" : 6
- }
-]
-```
-
-
-The number of movies by actor ?
--------------------------------
-
-I think you get the picture by now ;)
-
-```js
-db._query("FOR x IN actsIn COLLECT actor = x._from WITH COUNT INTO counter RETURN {actor: actor, movies: counter}").toArray()
-```
-
-```json
-[
- {
- "actor" : "actors/Al",
- "movies" : 1
- },
- {
- "actor" : "actors/AnnabellaS",
- "movies" : 1
- },
- {
- "actor" : "actors/AnthonyE",
- "movies" : 1
- },
- {
- "actor" : "actors/BillPull",
- "movies" : 1
- },
- {
- "actor" : "actors/BillyC",
- "movies" : 1
- },
- {
- "actor" : "actors/BonnieH",
- "movies" : 1
- },
- {
- "actor" : "actors/BrunoK",
- "movies" : 1
- },
- {
- "actor" : "actors/Carrie",
- "movies" : 3
- },
- {
- "actor" : "actors/CarrieF",
- "movies" : 1
- },
- {
- "actor" : "actors/Charlize",
- "movies" : 1
- },
- {
- "actor" : "actors/ChristopherG",
- "movies" : 1
- },
- {
- "actor" : "actors/CoreyF",
- "movies" : 1
- },
- {
- "actor" : "actors/CubaG",
- "movies" : 4
- },
- {
- "actor" : "actors/DaveC",
- "movies" : 1
- },
- {
- "actor" : "actors/DemiM",
- "movies" : 1
- },
- {
- "actor" : "actors/Emil",
- "movies" : 1
- },
- {
- "actor" : "actors/EthanH",
- "movies" : 1
- },
- {
- "actor" : "actors/GregK",
- "movies" : 2
- },
- {
- "actor" : "actors/HelenH",
- "movies" : 1
- },
- {
- "actor" : "actors/Hugo",
- "movies" : 3
- },
- {
- "actor" : "actors/JackN",
- "movies" : 2
- },
- {
- "actor" : "actors/JamesC",
- "movies" : 1
- },
- {
- "actor" : "actors/JamesM",
- "movies" : 1
- },
- {
- "actor" : "actors/JayM",
- "movies" : 1
- },
- {
- "actor" : "actors/JerryO",
- "movies" : 2
- },
- {
- "actor" : "actors/JohnC",
- "movies" : 1
- },
- {
- "actor" : "actors/JonathanL",
- "movies" : 1
- },
- {
- "actor" : "actors/JTW",
- "movies" : 1
- },
- {
- "actor" : "actors/Keanu",
- "movies" : 4
- },
- {
- "actor" : "actors/KellyM",
- "movies" : 1
- },
- {
- "actor" : "actors/KellyP",
- "movies" : 1
- },
- {
- "actor" : "actors/KevinB",
- "movies" : 1
- },
- {
- "actor" : "actors/KevinP",
- "movies" : 1
- },
- {
- "actor" : "actors/KieferS",
- "movies" : 2
- },
- {
- "actor" : "actors/Laurence",
- "movies" : 3
- },
- {
- "actor" : "actors/MarshallB",
- "movies" : 1
- },
- {
- "actor" : "actors/MaxS",
- "movies" : 2
- },
- {
- "actor" : "actors/MegR",
- "movies" : 5
- },
- {
- "actor" : "actors/Nathan",
- "movies" : 1
- },
- {
- "actor" : "actors/NoahW",
- "movies" : 1
- },
- {
- "actor" : "actors/ParkerP",
- "movies" : 1
- },
- {
- "actor" : "actors/ReginaK",
- "movies" : 1
- },
- {
- "actor" : "actors/ReneeZ",
- "movies" : 1
- },
- {
- "actor" : "actors/RickY",
- "movies" : 1
- },
- {
- "actor" : "actors/RitaW",
- "movies" : 1
- },
- {
- "actor" : "actors/RiverP",
- "movies" : 1
- },
- {
- "actor" : "actors/Robin",
- "movies" : 1
- },
- {
- "actor" : "actors/RosieO",
- "movies" : 1
- },
- {
- "actor" : "actors/SteveZ",
- "movies" : 1
- },
- {
- "actor" : "actors/TomC",
- "movies" : 3
- },
- {
- "actor" : "actors/TomH",
- "movies" : 3
- },
- {
- "actor" : "actors/TomS",
- "movies" : 1
- },
- {
- "actor" : "actors/ValK",
- "movies" : 1
- },
- {
- "actor" : "actors/VictorG",
- "movies" : 1
- },
- {
- "actor" : "actors/WernerH",
- "movies" : 1
- },
- {
- "actor" : "actors/WilW",
- "movies" : 1
- }
-]
-```
-
-
-The number of movies acted in between 2005 and 2010 by actor ?
---------------------------------------------------------------
-
-This query is where a Multi Model database actually shines.
-First of all we want to use it in production, so we set a skiplistindex on year.
-This allows as to execute fast range queries like between 2005 and 2010.
-
-```js
-db.actsIn.ensureSkiplist("year")
-```
-
-Now we slightly modify our movies by actor query.
-However my dataset contains only older movies, so I changed the year range from 1990 - 1995:
-
-```js
-db._query("FOR x IN actsIn FILTER x.year >= 1990 && x.year <= 1995 COLLECT actor = x._from WITH COUNT INTO counter RETURN {actor: actor, movies: counter}").toArray()
-```
-
-```json
-[
- {
- "actor" : "actors/BillPull",
- "movies" : 1
- },
- {
- "actor" : "actors/ChristopherG",
- "movies" : 1
- },
- {
- "actor" : "actors/CubaG",
- "movies" : 1
- },
- {
- "actor" : "actors/DemiM",
- "movies" : 1
- },
- {
- "actor" : "actors/JackN",
- "movies" : 1
- },
- {
- "actor" : "actors/JamesM",
- "movies" : 1
- },
- {
- "actor" : "actors/JTW",
- "movies" : 1
- },
- {
- "actor" : "actors/KevinB",
- "movies" : 1
- },
- {
- "actor" : "actors/KevinP",
- "movies" : 1
- },
- {
- "actor" : "actors/KieferS",
- "movies" : 1
- },
- {
- "actor" : "actors/MegR",
- "movies" : 2
- },
- {
- "actor" : "actors/Nathan",
- "movies" : 1
- },
- {
- "actor" : "actors/NoahW",
- "movies" : 1
- },
- {
- "actor" : "actors/RitaW",
- "movies" : 1
- },
- {
- "actor" : "actors/RosieO",
- "movies" : 1
- },
- {
- "actor" : "actors/TomC",
- "movies" : 1
- },
- {
- "actor" : "actors/TomH",
- "movies" : 2
- },
- {
- "actor" : "actors/VictorG",
- "movies" : 1
- }
-]
-```
-
-
-Comment
--------
-
-**Author:** [Michael Hackstein](https://github.com/mchacki)
-
-**Tags:** #graph #examples
-
-[1]: http://stackoverflow.com/questions/32729314/aql-graph-queries-examples
-[2]: http://stackoverflow.com/users/1126414/vincz
diff --git a/Documentation/Books/Cookbook/Graph/FulldepthTraversal.md b/Documentation/Books/Cookbook/Graph/FulldepthTraversal.md
deleted file mode 100644
index a75d27ed8e14..000000000000
--- a/Documentation/Books/Cookbook/Graph/FulldepthTraversal.md
+++ /dev/null
@@ -1,81 +0,0 @@
-Fulldepth Graph-Traversal
-=========================
-
-Problem
--------
-
-Lets assume you have a database and some edges and vertices. Now you need the node with the most connections in fulldepth.
-
-Solution
---------
-
-You need a custom traversal with the following properties:
-
-- Store all vertices you have visited already
-- If you visit an already visited vertex return the connections + 1 and do not touch the edges
-- If you visit a fresh vertex visit all its children and sum up their connections. Store this sum and return it + 1
-- Repeat for all vertices.
-
-```js
-var traversal = require("org/arangodb/graph/traversal");
-
-var knownFilter = function(config, vertex, path) {
- if (config.known[vertex._key] !== undefined) {
- return "prune";
- }
- return "";
-};
-
-var sumVisitor = function(config, result, vertex, path) {
- if (config.known[vertex._key] !== undefined) {
- result.sum += config.known[vertex._key];
- } else {
- config.known[vertex._key] = result.sum;
- }
- result.sum += 1;
- return;
-};
-
-var config = {
- datasource: traversal.collectionDatasourceFactory(db.e), // e is my edge collection
- strategy: "depthfirst",
- order: "preorder",
- filter: knownFilter,
- expander: traversal.outboundExpander,
- visitor: sumVisitor,
- known: {}
-};
-
-var traverser = new traversal.Traverser(config);
-var cursor = db.v.all(); // v is my vertex collection
-while(cursor.hasNext()) {
- var node = cursor.next();
- traverser.traverse({sum: 0}, node);
-}
-
-config.known; // Returns the result of type name: counter. In arangosh this will print out complete result
-```
-
-To execute this script accordingly replace db.v and db.e with your collections
-(v is vertices, e is edges) and write it to a file, e.g. traverse.js,
-then execute it in arangosh:
-
-```
-cat traverse.js | arangosh
-```
-
-If you want to use it in production you should have a look at the Foxx framework which allows
-you to store and execute this script on server side and make it accessible via your own API:
-[Foxx](../../Manual/Foxx/index.html)
-
-
-Comment
--------
-
-You only compute the connections of one vertex once and cache it then.
-Complexity is almost equal to the amount of edges.
-In the code below config.known contains the result of all vertices, you then can add the sorting on it.
-
-**Author:** [Michael Hackstein](https://github.com/mchacki)
-
-**Tags:** #graph
diff --git a/Documentation/Books/Cookbook/Graph/README.md b/Documentation/Books/Cookbook/Graph/README.md
deleted file mode 100644
index 9204e2d9b872..000000000000
--- a/Documentation/Books/Cookbook/Graph/README.md
+++ /dev/null
@@ -1,8 +0,0 @@
-Graph
-=====
-
-- [Fulldepth Graph-Traversal](FulldepthTraversal.md)
-
-- [Using a custom Visitor](CustomVisitorFromNodeJs.md)
-
-- [Example AQL Queries for Graphs](ExampleActorsAndMovies.md)
diff --git a/Documentation/Books/Cookbook/Monitoring/Collectd.md b/Documentation/Books/Cookbook/Monitoring/Collectd.md
deleted file mode 100644
index 778d14dab730..000000000000
--- a/Documentation/Books/Cookbook/Monitoring/Collectd.md
+++ /dev/null
@@ -1,351 +0,0 @@
-Monitoring ArangoDB using collectd
-==================================
-
-Problem
--------
-
-The ArangoDB web interface shows a nice summary of the current state. I want to see similar numbers in my monitoring system so I can analyze the system usage post mortem or send alarms on failure.
-
-Solution
---------
-
-[Collectd](http://collectd.org) is an excellent tool to gather all kinds of metrics from a system,
-and deliver it to a central monitoring like [Graphite](http://graphite.wikidot.com/screen-shots)
-and / or [Nagios](http://www.nagios.org/).
-
-### Ingredients
-
-For this recipe you need to install the following tools:
-
-- [collectd >= 5.4.2](https://collectd.org/) The aggregation Daemon
-- [kcollectd](https://www.forwiss.uni-passau.de/~berberic/Linux/kcollectd.html) for inspecting the data
-
-### Configuring collectd
-
-For aggregating the values we will use the [cURL-JSON plug-in](https://collectd.org/wiki/index.php/Plugin:cURL-JSON).
-We will store the values using the [Round-Robin-Database writer](https://collectd.org/wiki/index.php/RRD)(RRD) which `kcollectd` can later on present to you.
-
-We assume your `collectd` comes from your distribution and reads its config from `/etc/collectd/collectd.conf`. Since this file tends to become pretty unreadable quickly, we use the `include` mechanism:
-
-
- Filter "*.conf"
-
-
-This way we can make each metric group on compact set config files. It consists of three components:
-
-* loading the plug-in
-* adding metrics to the TypesDB
-* the configuration for the plug-in itself
-
-### rrdtool
-
-We will use the [Round-Robin-Database](http://oss.oetiker.ch/rrdtool/) as storage backend for now. It creates its own database files of fixed size for each specific time range. Later you may choose more advanced writer-plug-ins, which may do network distribution of your metrics or integrate the above mentioned Graphite or your already established monitoring, etc.
-
-For the RRD we will go pretty much with defaults:
-
- # Load the plug-in:
- LoadPlugin rrdtool
-
- DataDir "/var/lib/collectd/rrd"
- # CacheTimeout 120
- # CacheFlush 900
- # WritesPerSecond 30
- # CreateFilesAsync false
- # RandomTimeout 0
- #
- # The following settings are rather advanced
- # and should usually not be touched:
- # StepSize 10
- # HeartBeat 20
- # RRARows 1200
- # RRATimespan 158112000
- # XFF 0.1
-
-
-### cURL JSON
-
-`Collectd` comes with a wide range of metric aggregation plug-ins.
-Many tools today use [JSON](http://json.org) as data formatting grammar; so does ArangoDB.
-
-Therefore a plug-in offering to fetch JSON documents via HTTP is the perfect match to query ArangoDBs [administrative Statistics interface](../../HTTP/AdministrationAndMonitoring/index.html#read-the-statistics):
-
- # Load the plug-in:
- LoadPlugin curl_json
- # we need to use our own types to generate individual names for our gauges:
- # TypesDB "/etc/collectd/arangodb_types.db"
-
- # Adjust the URL so collectd can reach your arangod:
-
- # Set your authentication to Aardvark here:
- User "root"
- # Password "bar"
-
- Type "gauge"
-
-
- Type "gauge"
-
-
- Type "gauge"
-
-
- Type "gauge"
-
-
- Type "gauge"
-
-
- Type "gauge"
-
-
- Type "gauge"
-
-
- Type "gauge"
-
-
- Type "gauge"
-
-
- Type "gauge"
-
-
-
-
- Type "gauge"
-
-
- Type "gauge"
-
-
- Type "gauge"
-
-
- Type "gauge"
-
-
- Type "gauge"
-
-
- Type "gauge"
-
-
- Type "gauge"
-
-
- Type "gauge"
-
-
-
- Type "gauge"
-
-
- Type "gauge"
-
-
- Type "gauge"
-
-
- Type "gauge"
-
-
- Type "gauge"
-
-
- Type "gauge"
-
-
-
- Type "gauge"
-
-
- Type "gauge"
-
-
- Type "gauge"
-
-
- Type "gauge"
-
-
- Type "gauge"
-
-
-
- Type "client_totalTime_count"
-
-
- Type "client_totalTime_sum"
-
-
- Type "client_totalTime_counts0"
-
-
-
- Type "client_bytesReceived_count"
-
-
- Type "client_bytesReceived_sum"
-
-
- Type "client_bytesReceived_counts0"
-
-
-
- Type "client_requestTime_count"
-
-
- Type "client_requestTime_sum"
-
-
- Type "client_requestTime_counts0"
-
-
-
- Type "client_connectionTime_count"
-
-
- Type "client_connectionTime_sum"
-
-
- Type "client_connectionTime_counts0"
-
-
-
- Type "client_queueTime_count"
-
-
- Type "client_queueTime_sum"
-
-
- Type "client_queueTime_counts0"
-
-
-
- Type "client_bytesSent_count"
-
-
- Type "client_bytesSent_sum"
-
-
- Type "client_bytesSent_counts0"
-
-
-
- Type "client_ioTime_count"
-
-
- Type "client_ioTime_sum"
-
-
- Type "client_ioTime_counts0"
-
-
-
- Type "gauge"
-
-
-
-
-To circumvent the shortcoming of the curl_JSON plug-in to only take the last path element as name for the metric, we need to give them a name using our own `types.db` file in `/etc/collectd/arangodb_types.db`:
-
- client_totalTime_count value:GAUGE:0:9223372036854775807
- client_totalTime_sum value:GAUGE:U:U
- client_totalTime_counts0 value:GAUGE:U:U
-
- client_bytesReceived_count value:GAUGE:0:9223372036854775807
- client_bytesReceived_sum value:GAUGE:U:U
- client_bytesReceived_counts0 value:GAUGE:U:U
-
- client_requestTime_count value:GAUGE:0:9223372036854775807
- client_requestTime_sum value:GAUGE:U:U
- client_requestTime_counts0 value:GAUGE:U:U
-
- client_connectionTime_count value:GAUGE:0:9223372036854775807
- client_connectionTime_sum value:GAUGE:U:U
- client_connectionTime_counts0 value:GAUGE:U:U
-
- client_queueTime_count value:GAUGE:0:9223372036854775807
- client_queueTime_sum value:GAUGE:U:U
- client_queueTime_counts0 value:GAUGE:U:U
-
- client_bytesSent_count value:GAUGE:0:9223372036854775807
- client_bytesSent_sum value:GAUGE:U:U
- client_bytesSent_counts0 value:GAUGE:U:U
-
- client_ioTime_count value:GAUGE:0:9223372036854775807
- client_ioTime_sum value:GAUGE:U:U
- client_ioTime_counts0 value:GAUGE:U:U
-
-Please note that you probably need to uncomment this line from the main collectd.conf:
-
- # TypesDB "/usr/share/collectd/types.db" "/etc/collectd/my_types.db"
-
-in order to make it still load its main types definition file.
-
-### Rolling your own
-
-You may want to monitor your own metrics from ArangoDB. Here is a simple example how to use the `config`:
-
- {
- "testArray":[1,2],
- "testArrayInbetween":[{"blarg":3},{"blub":4}],
- "testDirectHit":5,
- "testSubLevelHit":{"oneMoreLevel":6}
- }
-
-This `config` snippet will parse the JSON above:
-
-
- Type "gauge"
- # Expect: 1
-
-
- Type "gauge"
- # Expect: 2
-
-
- Type "gauge"
- # Expect: 3
-
-
- Type "gauge"
- # Expect: 4
-
-
- Type "gauge"
- # Expect: 5
-
-
- Type "gauge"
- # Expect: 6
- = @until collect value=x.value with count into counter RETURN {[[CONCAT("choice", value)] : counter }';
-controller.get('/firstCollection/lastSeconds/:nSeconds', function (req, res) {
- var until = Date.now() - req.params('nSeconds') * 1000;
- res.json(
- db._query(searchQuery, {
- '@collection': FirstCollection_repo.collection.name(),
- 'until': until
- }).toArray()
- );
-}).pathParam('nSeconds', {
- description: 'look up to n Seconds into the past',
- type: joi.string().required()
-});
-```
-
-We inspect the return document using curl and [jq](http://stedolan.github.io/jq/) for nice formatting:
-
-```javascript
-curl 'http://localhost:8529/_db/_system/collectable_foxx/data/firstCollection/firstCollection/lastSeconds/10' |jq "."
-
-[
- {
- "1": 3
- "3": 7
- }
-]
-```
-
-We have to design the return values in a way that collectd's config syntax can simply grab it. This Route returns an object with flat key values where keys may range from 0 to 5.
-We create a simple collectd configuration in `/etc/collectd/collectd.conf.d/foxx_simple.conf` that matches our API:
-
-```javascript
-# Load the plug-in:
-LoadPlugin curl_json
-# we need to use our own types to generate individual names for our gauges:
-TypesDB "/etc/collectd/collectd.conf.d/foxx_simple_types.db"
-
- # Adjust the URL so collectd can reach your arangod:
-
- # Set your authentication to Aardvark here:
- # User "foo"
- # Password "bar"
-
- Type "the_values"
-
-
- Type "first_values"
-
-
- Type "second_values"
-
-
- Type "third_values"
-
-
- Type "fourth_values"
-
-
- Type "fifth_values"
-
-
-
-```
-
-To get nice metric names, we specify our own `types.db` file in `/etc/collectd/collectd.conf.d/foxx_simple_types.db`:
-
-```
-the_values value:GAUGE:U:U
-first_values value:GAUGE:U:U
-second_values value:GAUGE:U:U
-third_values value:GAUGE:U:U
-fourth_values value:GAUGE:U:U
-fifth_values value:GAUGE:U:U
-```
-
-**Author:** [Wilfried Goesgens](https://github.com/dothebart)
-
-**Tags:** #monitoring #foxx #json
diff --git a/Documentation/Books/Cookbook/Monitoring/OtherRelevantMetrics.md b/Documentation/Books/Cookbook/Monitoring/OtherRelevantMetrics.md
deleted file mode 100644
index 806d57685d09..000000000000
--- a/Documentation/Books/Cookbook/Monitoring/OtherRelevantMetrics.md
+++ /dev/null
@@ -1,137 +0,0 @@
-Monitoring other relevant metrics of ArangoDB
-=============================================
-
-Problem
--------
-
-Aside of the values which ArangoDB already offers for monitoring, other system metrics may be relevant for continuously operating ArangoDB. be it a single instance or a cluster setup. [Collectd offers a pleathora of plugins](https://collectd.org/wiki/index.php/Table_of_Plugins) - lets have a look at some of them which may be useful for us.
-
-Solution
---------
-
-### Ingedients
-
-For this recipe you need to install the following tools:
-
-- [collectd](https://collectd.org/): The metrics aggregation Daemon
-- we base on [Monitoring with Collecd recipe](Collectd.md) for understanding the basics about collectd
-
-### Disk usage
-You may want to monitor that ArangoDB doesn't run out of disk space. The [df Plugin](https://collectd.org/wiki/index.php/Plugin:DF) can aggregate these values for you.
-
-First we need to find out which disks are used by your ArangoDB. By default you need to find **/var/lib/arango** in the mount points. Since nowadays many virtual file systems are also mounted on a typical \*nix system we want to sort the output of mount:
-
- mount | sort
- /dev/sda3 on /local/home type ext4 (rw,relatime,data=ordered)
- /dev/sda4 on / type ext4 (rw,relatime,data=ordered)
- /dev/sdb1 on /mnt type vfat (rw,relatime,fmask=0022,dmask=0022,codepage=437,iocharset=utf8,shortname=mixed,errors=remount-ro)
- binfmt_misc on /proc/sys/fs/binfmt_misc type binfmt_misc (rw,relatime)
- cgroup on /sys/fs/cgroup/blkio type cgroup (rw,nosuid,nodev,noexec,relatime,blkio)
- ....
- udev on /dev type devtmpfs (rw,relatime,size=10240k,nr_inodes=1022123,mode=755)
-
-So here we can see the mount points are `/`, `/local/home`, `/mnt/` so `/var/lib/` can be found on the root partition (`/`) `/dev/sda3` here. A production setup may be different so the OS doesn't interfere with the services.
-
-The collectd configuration `/etc/collectd/collectd.conf.d/diskusage.conf` looks like this:
-
- LoadPlugin df
-
- Device "/dev/sda3"
- # Device "192.168.0.2:/mnt/nfs"
- # MountPoint "/home"
- # FSType "ext4"
- # ignore rootfs; else, the root file-system would appear twice, causing
- # one of the updates to fail and spam the log
- FSType rootfs
- # ignore the usual virtual / temporary file-systems
- FSType sysfs
- FSType proc
- FSType devtmpfs
- FSType devpts
- FSType tmpfs
- FSType fusectl
- FSType cgroup
- IgnoreSelected true
- # ReportByDevice false
- # ReportReserved false
- # ReportInodes false
- # ValuesAbsolute true
- # ValuesPercentage false
-
-
-### Disk I/O Usage
-
-Another interesting metric is the amount of data read/written to disk - its an estimate how busy your ArangoDB or the whole system currently is.
-The [Disk plugin](https://collectd.org/wiki/index.php/Plugin:Disk) aggregates these values.
-
-According to the mount points above our configuration `/etc/collectd/collectd.conf.d/disk_io.conf` looks like this:
-
- LoadPlugin disk
-
- Disk "hda"
- Disk "/sda[23]/"
- IgnoreSelected false
-
-
-
-### CPU Usage
-
-While the ArangoDB self monitoring already offers some overview of the running threads etc. you can get a deeper view using the [Process Plugin](https://collectd.org/wiki/index.php/Plugin:Processes).
-
-If you're running a single Arango instance, a simple match by process name is sufficient, `/etc/collectd/collectd.conf.d/arango_process.conf` looks like this:
-
- LoadPlugin processes
-
- Process "arangod"
-
-
-If you're running a cluster, you can match the specific instances by command-line parameters, `/etc/collectd/collectd.conf.d/arango_cluster.conf` looks like this:
-
- LoadPlugin processes
-
- ProcessMatch "Claus" "/usr/bin/arangod .*--cluster.my-address *:8530"
- ProcessMatch "Pavel" "/usr/bin/arangod .*--cluster.my-address *:8629"
- ProcessMatch "Perry" "/usr/bin/arangod .*--cluster.my-address *:8630"
- Process "etcd-arango"
-
-
-### More Plugins
-
-As mentioned above, the list of available plugins is huge; Here are some more one could be interested in:
-- use the [CPU Plugin](https://collectd.org/wiki/index.php/CPU) to monitor the overall CPU utilization
-- use the [Memory Plugin](https://collectd.org/wiki/index.php/Plugin:Memory) to monitor main memory availability
-- use the [Swap Plugin](https://collectd.org/documentation/manpages/collectd.conf.5.shtml#plugin_swap)
- to see whether excess RAM usage forces the system to page and thus slow down
-- [Ethernet Statistics](https://collectd.org/wiki/index.php/Plugin:Ethstat)
- with whats going on at your Network cards to get a more broad overview of network traffic
-- you may [Tail logfiles](https://collectd.org/wiki/index.php/Plugin:Tail)
- like an apache request log and pick specific requests by regular expressions
-- [Parse tabular files](https://collectd.org/wiki/index.php/Plugin:Table) in the `/proc` file system
-- you can use [filters](https://collectd.org/documentation/manpages/collectd.conf.5.shtml#filter_configuration)
- to reduce the amount of data created by plugins (i.e. if you have many CPU cores, you may want the combined result).
- It can also decide where to route data and to which writer plugin
-- while you may have seen that metrics are stored at a fixed rate or frequency,
- your metrics (i.e. the durations of web requests) may come in a random & higher frequency.
- Thus you want to burn them down to a fixed frequency, and know Min/Max/Average/Median.
- So you want to [Aggregate values using the statsd pattern](https://collectd.org/wiki/index.php/Plugin:StatsD).
-- You may start rolling your own in [Python](https://collectd.org/wiki/index.php/Plugin:Python),
- [java](https://collectd.org/wiki/index.php/Plugin:Java),
- [Perl](https://collectd.org/wiki/index.php/Plugin:Perl) or for sure in
- [C](https://collectd.org/wiki/index.php/Plugin_architecture), the language collectd is implemented in
-
-Finally while kcollectd is nice to get a quick success at inspecting your collected metrics during working your way into collectd,
-its not as sufficient for operating a production site. Since collectds default storage RRD is already widespread in system monitoring,
-there are [many webfrontents](https://collectd.org/wiki/index.php/List_of_front-ends) to choose for the visualization.
-Some of them replace the RRD storage by simply adding a writer plugin,
-most prominent the [Graphite graphing framework](http://graphite.wikidot.com/screen-shots) with the
-[Graphite writer](https://collectd.org/wiki/index.php/Plugin:Write_Graphite) which allows you to combine random metrics in single graphs
-- to find coincidences in your data [you never dreamed of](http://metrics20.org/media/).
-
-If you already run [Nagios](http://www.nagios.org) you can use the
-[Nagios tool](https://collectd.org/documentation/manpages/collectd-nagios.1.shtml) to submit values.
-
-We hope you now have a good overview of whats possible, but as usual its a good idea to browse the [Fine Manual](https://collectd.org/documentation.shtml).
-
-**Author:** [Wilfried Goesgens](https://github.com/dothebart)
-
-**Tags:** #monitoring
diff --git a/Documentation/Books/Cookbook/Monitoring/SlaveStatus.md b/Documentation/Books/Cookbook/Monitoring/SlaveStatus.md
deleted file mode 100644
index 264a4e3f565e..000000000000
--- a/Documentation/Books/Cookbook/Monitoring/SlaveStatus.md
+++ /dev/null
@@ -1,132 +0,0 @@
-Monitoring replication slave
-============================
-
-**Note**: this recipe is working with ArangoDB 2.5, you need a collectd curl_json plugin with correct boolean type mapping.
-
-Problem
--------
-
-How to monitor the slave status using the `collectd curl_JSON` plugin.
-
-Solution
---------
-
-Since arangodb [reports the replication status in JSON](../../HTTP/Replications/ReplicationApplier.html#state-of-the-replication-applier),
-integrating it with the [collectd curl_JSON plugin](Collectd.md)
-should be an easy exercise. However, only very recent versions of collectd will handle boolean flags correctly.
-
-Our test master/slave setup runs with the the master listening on `tcp://127.0.0.1:8529` and the slave (which we query) listening on `tcp://127.0.0.1:8530`.
-They replicate a dabatase by the name `testDatabase`.
-
-Since replication appliers are active per database and our example doesn't use the default `_system`, we need to specify its name in the URL like this: `_db/testDatabase`.
-
-We need to parse a document from a request like this:
-
- curl --dump - http://localhost:8530/_db/testDatabase/_api/replication/applier-state
-
-If the replication is not running the document will look like that:
-
-```javascript
-{
- "state": {
- "running": false,
- "lastAppliedContinuousTick": null,
- "lastProcessedContinuousTick": null,
- "lastAvailableContinuousTick": null,
- "safeResumeTick": null,
- "progress": {
- "time": "2015-11-02T13:24:07Z",
- "message": "applier shut down",
- "failedConnects": 0
- },
- "totalRequests": 1,
- "totalFailedConnects": 0,
- "totalEvents": 0,
- "totalOperationsExcluded": 0,
- "lastError": {
- "time": "2015-11-02T13:24:07Z",
- "errorMessage": "no start tick",
- "errorNum": 1413
- },
- "time": "2015-11-02T13:31:53Z"
- },
- "server": {
- "version": "2.7.0",
- "serverId": "175584498800385"
- },
- "endpoint": "tcp://127.0.0.1:8529",
- "database": "testDatabase"
-}
-```
-
-A running replication will return something like this:
-
-```javascript
-{
- "state": {
- "running": true,
- "lastAppliedContinuousTick": "1150610894145",
- "lastProcessedContinuousTick": "1150610894145",
- "lastAvailableContinuousTick": "1151639153985",
- "safeResumeTick": "1150610894145",
- "progress": {
- "time": "2015-11-02T13:49:56Z",
- "message": "fetching master log from tick 1150610894145",
- "failedConnects": 0
- },
- "totalRequests": 12,
- "totalFailedConnects": 0,
- "totalEvents": 2,
- "totalOperationsExcluded": 0,
- "lastError": {
- "errorNum": 0
- },
- "time": "2015-11-02T13:49:57Z"
- },
- "server": {
- "version": "2.7.0",
- "serverId": "175584498800385"
- },
- "endpoint": "tcp://127.0.0.1:8529",
- "database": "testDatabase"
-}
-```
-
-We create a simple collectd configuration in `/etc/collectd/collectd.conf.d/slave_testDatabase.conf` that matches our API:
-
-```javascript
-TypesDB "/etc/collectd/collectd.conf.d/slavestate_types.db"
-
- # Adjust the URL so collectd can reach your arangod slave instance:
-
- # Set your authentication to that database here:
- # User "foo"
- # Password "bar"
-
- Type "boolean"
-
-
- Type "counter"
-
-
- Type "counter"
-
-
- Type "counter"
-
-
-
-```
-
-To get nice metric names, we specify our own `types.db` file in `/etc/collectd/collectd.conf.d/slavestate_types.db`:
-
-```
-boolean value:ABSOLUTE:0:1
-```
-
-So, basically `state/running` will give you `0`/`1` if its (not / ) running through the collectd monitor.
-
-
-**Author:** [Wilfried Goesgens](https://github.com/dothebart)
-
-**Tags:** #monitoring #foxx #json
diff --git a/Documentation/Books/Cookbook/Monitoring/TrafficWithIPAccounting.md b/Documentation/Books/Cookbook/Monitoring/TrafficWithIPAccounting.md
deleted file mode 100644
index 7b601f70e931..000000000000
--- a/Documentation/Books/Cookbook/Monitoring/TrafficWithIPAccounting.md
+++ /dev/null
@@ -1,206 +0,0 @@
-Monitoring ArangoDB Cluster network usage
-=========================================
-
-Problem
--------
-
-We run a cluster and want to know whether the traffic is unbalanced or something like that. We want a cheap estimate which host has how much traffic.
-
-Solution
---------
-
-As we already run [Collectd](http://collectd.org) as our metric-hub, we want to utilize it to also give us these figures. A very cheap way to generate these values are the counters in the IPTables firewall of our system.
-
-### Ingredients
-
-For this recipe you need to install the following tools:
-
-- [collectd](https://collectd.org/): the aggregation Daemon
-- [kcollectd](https://www.forwiss.uni-passau.de/~berberic/Linux/kcollectd.html) for inspecting the data
-- [iptables](http://en.wikipedia.org/wiki/Iptables) - should come with your Linux distribution
-- [ferm](http://ferm.foo-projects.org/download/2.2/ferm.html#basic_iptables_match_keywords) for compact firewall code
-- we base on [Monitoring with Collecd recipe](Collectd.md) for understanding the basics about collectd
-
-### Getting the state and the Ports of your cluster
-
-Now we need to find out the current configuration of our cluster. For the time being we assume you simply issued
-
- ./scripts/startLocalCluster.sh
-
-to get you set up. So you know you've got two DB-Servers - one Coordinator, one agent:
-
- ps -eaf |grep arango
- arangod 21406 1 1 16:59 pts/14 00:00:00 bin/etcd-arango --data-dir /var/tmp/tmp-21550-1347489353/shell_server/agentarango4001 --name agentarango4001 --bind-addr 127.0.0.1:4001 --addr 127.0.0.1:4001 --peer-bind-addr 127.0.0.1:7001 --peer-addr 127.0.0.1:7001 --initial-cluster-state new --initial-cluster agentarango4001=http://127.0.0.1:7001
- arangod 21408 1 4 16:56 pts/14 00:00:01 bin/arangod --database.directory cluster/data8629 --cluster.agency-endpoint tcp://localhost:4001 --cluster.my-address tcp://localhost:8629 --server.endpoint tcp://localhost:8629 --log.file cluster/8629.log
- arangod 21410 1 5 16:56 pts/14 00:00:02 bin/arangod --database.directory cluster/data8630 --cluster.agency-endpoint tcp://localhost:4001 --cluster.my-address tcp://localhost:8630 --server.endpoint tcp://localhost:8630 --log.file cluster/8630.log
- arangod 21416 1 5 16:56 pts/14 00:00:02 bin/arangod --database.directory cluster/data8530 --cluster.agency-endpoint tcp://localhost:4001 --cluster.my-address tcp://localhost:8530 --server.endpoint tcp://localhost:8530 --log.file cluster/8530.log
-
-We can now check which ports they occupied:
-
- netstat -aplnt |grep arango
- tcp 0 0 127.0.0.1:7001 0.0.0.0:* LISTEN 21406/etcd-arango
- tcp 0 0 127.0.0.1:4001 0.0.0.0:* LISTEN 21406/etcd-arango
- tcp 0 0 127.0.0.1:8530 0.0.0.0:* LISTEN 21416/arangod
- tcp 0 0 127.0.0.1:8629 0.0.0.0:* LISTEN 21408/arangod
- tcp 0 0 127.0.0.1:8630 0.0.0.0:* LISTEN 21410/arangod
-
-- The agent has 7001 and 4001. Since it's running in single server mode its cluster port (7001) should not show any traffic, port 4001 is the interesting one.
-- Claus - This is the coordinator. Your Application will talk to it on port 8530
-- Pavel - This is the first DB-Server; Claus will talk to it on port 8629
-- Perry - This is the second DB-Server; Claus will talk to it on port 8630
-
-### Configuring IPTables / ferm
-
-Since the usual solution using shell scripts calling iptables
-brings the [DRY principle](http://en.wikipedia.org/wiki/Don%27t_repeat_yourself) to a grinding hold,
-we need something better. Here [ferm](http://ferm.foo-projects.org/download/2.2/ferm.html#basic_iptables_match_keywords) comes to the rescue -
-It enables you to produce very compact and well readable firewall configurations.
-
-According to the ports we found in the last section, we will configure our firewall in `/etc/ferm/ferm.conf`, and put the identities into the comments so we have a persistent naming scheme:
-
- # blindly forward these to the accounting chain:
- @def $ARANGO_RANGE=4000:9000;
-
- @def &TCP_ACCOUNTING($PORT, $COMMENT, $SRCCHAIN) = {
- @def $FULLCOMMENT=@cat($COMMENT, "_", $SRCCHAIN);
- dport $PORT mod comment comment $FULLCOMMENT NOP;
- }
-
- @def &ARANGO_ACCOUNTING($CHAINNAME) = {
- # The coordinators:
- &TCP_ACCOUNTING(8530, "Claus", $CHAINNAME);
- # The db-servers:
- &TCP_ACCOUNTING(8629, "Pavel", $CHAINNAME);
- &TCP_ACCOUNTING(8630, "Perry", $CHAINNAME);
- # The agency:
- &TCP_ACCOUNTING(4001, "etcd_client", $CHAINNAME);
- # it shouldn't talk to itself if it is only running with a single instance:
- &TCP_ACCOUNTING(7007, "etcd_cluster", $CHAINNAME);
- }
-
- table filter {
- chain INPUT {
- proto tcp dport $ARANGO_RANGE @subchain "Accounting" {
- &ARANGO_ACCOUNTING("input");
- }
- policy DROP;
-
- # connection tracking
- mod state state INVALID DROP;
- mod state state (ESTABLISHED RELATED) ACCEPT;
-
- # allow local packet
- interface lo ACCEPT;
-
- # respond to ping
- proto icmp ACCEPT;
-
- # allow IPsec
- proto udp dport 500 ACCEPT;
- proto (esp ah) ACCEPT;
-
- # allow SSH connections
- proto tcp dport ssh ACCEPT;
- }
- chain OUTPUT {
- policy ACCEPT;
-
- proto tcp dport $ARANGO_RANGE @subchain "Accounting" {
- &ARANGO_ACCOUNTING("output");
- }
-
- # connection tracking
- #mod state state INVALID DROP;
- mod state state (ESTABLISHED RELATED) ACCEPT;
- }
- chain FORWARD {
- policy DROP;
-
- # connection tracking
- mod state state INVALID DROP;
- mod state state (ESTABLISHED RELATED) ACCEPT;
- }
- }
-
-**Note**: This is a very basic configuration, mainly with the purpose to demonstrate the accounting feature - so don't run this in production)
-
-After activating it interactively with
-
- ferm -i /etc/ferm/ferm.conf
-
-We now use the iptables command line utility directly to review the status our current setting:
-
- iptables -L -nvx
- Chain INPUT (policy DROP 85 packets, 6046 bytes)
- pkts bytes target prot opt in out source destination
- 7636 1821798 Accounting tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpts:4000:9000
- 0 0 DROP all -- * * 0.0.0.0/0 0.0.0.0/0 state INVALID
- 14700 14857709 ACCEPT all -- * * 0.0.0.0/0 0.0.0.0/0 state RELATED,ESTABLISHED
- 130 7800 ACCEPT all -- lo * 0.0.0.0/0 0.0.0.0/0
- 0 0 ACCEPT icmp -- * * 0.0.0.0/0 0.0.0.0/0
- 0 0 ACCEPT udp -- * * 0.0.0.0/0 0.0.0.0/0 udp dpt:500
- 0 0 ACCEPT esp -- * * 0.0.0.0/0 0.0.0.0/0
- 0 0 ACCEPT ah -- * * 0.0.0.0/0 0.0.0.0/0
- 0 0 ACCEPT tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpt:22
-
- Chain FORWARD (policy DROP 0 packets, 0 bytes)
- pkts bytes target prot opt in out source destination
- 0 0 DROP all -- * * 0.0.0.0/0 0.0.0.0/0 state INVALID
- 0 0 ACCEPT all -- * * 0.0.0.0/0 0.0.0.0/0 state RELATED,ESTABLISHED
-
- Chain OUTPUT (policy ACCEPT 296 packets, 19404 bytes)
- pkts bytes target prot opt in out source destination
- 7720 1882404 Accounting tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpts:4000:9000
- 14575 14884356 ACCEPT all -- * * 0.0.0.0/0 0.0.0.0/0 state RELATED,ESTABLISHED
-
- Chain Accounting (2 references)
- pkts bytes target prot opt in out source destination
- 204 57750 tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpt:8530 /* Claus_input */
- 20 17890 tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpt:8629 /* Pavel_input */
- 262 97352 tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpt:8630 /* Perry_input */
- 2604 336184 tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpt:4001 /* etcd_client_input */
- 0 0 tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpt:7007 /* etcd_cluster_input */
- 204 57750 tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpt:8530 /* Claus_output */
- 20 17890 tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpt:8629 /* Pavel_output */
- 262 97352 tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpt:8630 /* Perry_output */
- 2604 336184 tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpt:4001 /* etcd_client_output */
- 0 0 tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpt:7007 /* etcd_cluster_output */
-
-
-You can see nicely the Accounting sub-chain with our comments. These should be pretty straight forward to match.
-We also see the **pkts** and **bytes** columns. They contain the current value of these counters of your system.
-
-Read more about [linux firewalling](http://lartc.org) and
-[ferm configuration](http://ferm.foo-projects.org/download/2.2/ferm.html) to be sure you do the right thing.
-
-### Configuring Collectd to pick up these values
-
-Since your system now generates these numbers, we want to configure collectd with its [iptables plugin](https://collectd.org/wiki/index.php/Plugin:IPTables) to aggregate them.
-
-We do so in the `/etc/collectd/collectd.conf.d/iptables.conf`:
-
- LoadPlugin iptables
-
- Chain filter "Accounting" "Claus_input"
- Chain filter "Accounting" "Pavel_input"
- Chain filter "Accounting" "Perry_input"
- Chain filter "Accounting" "etcd_client_input"
- Chain filter "Accounting" "etcd_cluster_input"
- Chain filter "Accounting" "Claus_output"
- Chain filter "Accounting" "Pavel_output"
- Chain filter "Accounting" "Perry_output"
- Chain filter "Accounting" "etcd_client_output"
- Chain filter "Accounting" "etcd_cluster_output"
-
-
-Now we restart collectd with `/etc/init.d/collectd restart`, watch the syslog for errors. If everything is OK, our values should show up in:
-
- /var/lib/collectd/rrd/localhost/iptables-filter-Accounting/ipt_packets-Claus_output.rrd
-
-We can inspect our values with kcollectd:
-
-![Kcollectd screenshot](../assets/MonitoringWithCollectd/KCollectdIPtablesAccounting.png)
-
-**Author:** [Wilfried Goesgens](https://github.com/dothebart)
-
-**Tags:** #monitoring
diff --git a/Documentation/Books/Cookbook/README.md b/Documentation/Books/Cookbook/README.md
deleted file mode 100644
index a5ec1219c79c..000000000000
--- a/Documentation/Books/Cookbook/README.md
+++ /dev/null
@@ -1,25 +0,0 @@
-# Cookbook
-
-This cookbook is filled with recipes to help you understand the [multi-model database ArangoDB](https://www.arangodb.com/) better
-and to help you with specific problems.
-
-You can participate and [write your own recipes][2].
-You only need to write a recipe in markdown and make a [pull request to our repository][2].
-
-**Recipes**
-
-There will be some simple recipes to bring you closer to ArangoDB and show you the amount of possibilities
-of our Database.
-There also will be more complex problems to show you solution to specific problems and the depth of ArangoDB.
-
-Every recipe is divided into three parts:
-
-1. **Problem**: A description of the problem
-2. **Solution**: A detailed solution of the given problem with code if any is needed
-3. **Comment**: Explanation of the solution. This part is optional depending on the complexity of the problem
-
-Every recipe has tags to for a better overview:
-
-*#api*, *#aql*, *#arangosh*, *#collection*, *#database*, *#debian*, *#docker*, *#document*, *#driver*, *#foxx*, *#giantswarm*, *#graph*, *#howto*, *#java*, *#javascript*, *#join*, *#nodejs*, *#windows*
-
-[2]: https://github.com/arangodb/arangodb/tree/devel/Documentation/Books/Cookbook
diff --git a/Documentation/Books/Cookbook/SUMMARY.md b/Documentation/Books/Cookbook/SUMMARY.md
deleted file mode 100644
index 321a3ac0b518..000000000000
--- a/Documentation/Books/Cookbook/SUMMARY.md
+++ /dev/null
@@ -1,46 +0,0 @@
-# Summary
-* [Introduction](README.md)
-* [Modelling Document Inheritance](DocumentInheritance.md)
-* [Accessing Shapes Data](AccessingShapesData.md)
-* [AQL](AQL/README.md)
- * [Using Joins in AQL](AQL/Joins.md)
- * [Using Dynamic Attribute Names](AQL/DynamicAttributeNames.md)
- * [Creating Test-data using AQL](AQL/CreatingTestData.md)
- * [Diffing Documents](AQL/DiffingDocuments.md)
- * [Avoiding Parameter Injection](AQL/AvoidingInjection.md)
- * [Multiline Query Strings](AQL/MultilineQueryStrings.md)
- * [Migrating named graph functions to 3.0](AQL/MigratingGraphFunctionsTo3.md)
- * [Migrating anonymous graph functions to 3.0](AQL/MigratingEdgeFunctionsTo3.md)
- * [Migrating graph measurements to 3.0](AQL/MigratingMeasurementsTo3.md)
-* [Graph](Graph/README.md)
- * [Fulldepth Graph-Traversal](Graph/FulldepthTraversal.md)
- * [Using a custom Visitor](Graph/CustomVisitorFromNodeJs.md)
- * [Example AQL Queries for Graphs](Graph/ExampleActorsAndMovies.md)
-* [Use Cases / Examples](UseCases/README.md)
- * [Monetary data without precision loss](UseCases/MonetaryDataWithoutPrecisionLoss.md)
- * [Populating a Textbox](UseCases/PopulatingAnAutocompleteTextbox.md)
- * [Exporting Data](UseCases/ExportingData.md)
- * [Accessing base documents with Java](UseCases/JavaDriverBaseDocument.md)
- * [Add XML data to ArangoDB with Java](UseCases/JavaDriverXmlData.md)
-* [Administration](Administration/README.md)
- * [Using Authentication](Administration/Authentication.md)
- * [Importing Data](Administration/ImportingData.md)
- * [Replication](Administration/Replication/README.md)
- * [Replicating Data](Administration/ReplicatingData.md)
- * [Slave Initialization](Administration/Replication/ReplicationFromBackup.md)
- * [Silent installation on Windows](Administration/NSISSilentMode.md)
- * [Migrating 2.8 to 3.0](Administration/Migrate2.8to3.0.md)
- * [Show grants function](Administration/ShowUsersGrants.md)
-* [Compiling / Build](Compiling/README.md)
- * [Compile on Debian](Compiling/Debian.md)
- * [Compile on Windows](Compiling/Windows.md)
- * [Running Custom Build](Compiling/RunningCustomBuild.md)
- * [Recompiling jemalloc](Compiling/jemalloc.md)
-* [Docker](Cloud/README.md)
- * [Docker ArangoDB](Cloud/DockerContainer.md)
- * [Docker with NodeJS App](Cloud/NodeJsDocker.md)
-* [Monitoring](Monitoring/Collectd.md)
- * [Collectd - Replication Slaves](Monitoring/SlaveStatus.md)
- * [Collectd - Network usage](Monitoring/TrafficWithIPAccounting.md)
- * [Collectd - more Metrics](Monitoring/OtherRelevantMetrics.md)
- * [Collectd - Monitoring Foxx](Monitoring/FoxxApps.md)
diff --git a/Documentation/Books/Cookbook/UseCases/ExportingData.md b/Documentation/Books/Cookbook/UseCases/ExportingData.md
deleted file mode 100644
index 03d8ac8bcf86..000000000000
--- a/Documentation/Books/Cookbook/UseCases/ExportingData.md
+++ /dev/null
@@ -1,310 +0,0 @@
-Exporting Data for Offline Processing
-=====================================
-
-In this recipe we will learn how to use the [export API][1] to extract data and process it with PHP. At the end of the recipe you can download the complete PHP script.
-
-**Note**: The following recipe is written using an ArangoDB server with version 2.6 or higher. You can also use the `devel` branch, since version 2.6 hasn't been an official release yet.
-
-Howto
------
-
-### Importing example data
-
-First of all we need some data in an ArangoDB collection. For this example we will use a collection named `users` which we will populate with 100.000 [example documents][2]. This way you can get the data into ArangoDB:
-
-```bash
-# download data file
-wget https://jsteemann.github.io/downloads/code/users-100000.json.tar.gz
-# uncompress it
-tar xvfz users-100000.json.tar.gz
-# import into ArangoDB
-arangoimport --file users-100000.json --collection users --create-collection true
-```
-
-### Setting up ArangoDB-PHP
-
-For this recipe we will use the [ArangoDB PHP driver][3]:
-
-```bash
-git clone -b devel "https://github.com/arangodb/arangodb-php.git"
-```
-
-We will now write a simple PHP script that establishes a connection to ArangoDB on localhost:
-
-```php
- 'tcp://localhost:8529',
- // can use Keep-Alive connection
- ConnectionOptions::OPTION_CONNECTION => 'Keep-Alive',
- // use basic authorization
- ConnectionOptions::OPTION_AUTH_TYPE => 'Basic',
- // user for basic authorization
- ConnectionOptions::OPTION_AUTH_USER => 'root',
- // password for basic authorization
- ConnectionOptions::OPTION_AUTH_PASSWD => '',
- // timeout in seconds
- ConnectionOptions::OPTION_TIMEOUT => 30,
- // database name
- ConnectionOptions::OPTION_DATABASE => '_system'
- );
-
-try {
- // establish connection
- $connection = new Connection($connectionOptions);
-
- echo 'Connected!' . PHP_EOL;
-
- // TODO: now do something useful with the connection!
-
-} catch (ConnectException $e) {
- print $e . PHP_EOL;
-} catch (ServerException $e) {
- print $e . PHP_EOL;
-} catch (ClientException $e) {
- print $e . PHP_EOL;
-}
-```
-
-After running the script you should see `Connected!` in the bash if successful.
-
-### Extracting the data
-
-Now we can run an export of the data in the collection `users`. Place the following code into the `TODO` part of the first code:
-
-```php
-function export($collection, Connection $connection) {
- $fp = fopen('output.json', 'w');
-
- if (! $fp) {
- throw new Exception('could not open output file!');
- }
-
- // settings to use for the export
- $settings = array(
- 'batchSize' => 5000, // export in chunks of 5K documents
- '_flat' => true // use simple PHP arrays
- );
-
- $export = new Export($connection, $collection, $settings);
-
- // execute the export. this will return an export cursor
- $cursor = $export->execute();
-
- // statistics
- $count = 0;
- $batches = 0;
- $bytes = 0;
-
- // now we can fetch the documents from the collection in batches
- while ($docs = $cursor->getNextBatch()) {
- $output = '';
- foreach ($docs as $doc) {
- $output .= json_encode($doc) . PHP_EOL;
- }
-
- // write out chunk
- fwrite($fp, $output);
-
- // update statistics
- $count += count($docs);
- $bytes += strlen($output);
- ++$batches;
- }
-
- fclose($fp);
-
- echo sprintf('written %d documents in %d batches with %d total bytes',
- $count,
- $batches,
- $bytes) . PHP_EOL;
-}
-
-// run the export
-export('users', $connection);
-```
-
-The function extracts all documents from the collection and writes them into an output file `output.json`. In addition it will print some statistics about the number of documents and the total data size:
-
-```json
-written 100000 documents in 20 batches with 40890013 total bytes
-```
-
-### Applying some transformations
-
-We now will use PHP to transform data as we extract it:
-
-```php
-function transformDate($value) {
- return preg_replace('/^(\\d+)-(\\d+)-(\\d+)$/', '\\2/\\3/\\1', $value);
-}
-
-function transform(array $document) {
- static $genders = array('male' => 'm', 'female' => 'f');
-
- $transformed = array(
- 'gender' => $genders[$document['gender']],
- 'dob' => transformDate($document['birthday']),
- 'memberSince' => transformDate($document['memberSince']),
- 'fullName' => $document['name']['first'] . ' ' . $document['name']['last'],
- 'email' => $document['contact']['email'][0]
- );
-
- return $transformed;
-}
-
-function export($collection, Connection $connection) {
- $fp = fopen('output-transformed.json', 'w');
-
- if (! $fp) {
- throw new Exception('could not open output file!');
- }
-
- // settings to use for the export
- $settings = array(
- 'batchSize' => 5000, // export in chunks of 5K documents
- '_flat' => true // use simple PHP arrays
- );
-
- $export = new Export($connection, $collection, $settings);
-
- // execute the export. this will return an export cursor
- $cursor = $export->execute();
-
- // now we can fetch the documents from the collection in batches
- while ($docs = $cursor->getNextBatch()) {
- $output = '';
- foreach ($docs as $doc) {
- $output .= json_encode(transform($doc)) . PHP_EOL;
- }
-
- // write out chunk
- fwrite($fp, $output);
- }
-
- fclose($fp);
-}
-
-// run the export
-export('users', $connection);
-```
-
-With this script the following changes will be made on the data:
-- rewrite the contents of the `gender`attribute. `female` becomes `f` and `male` becomes `m`
-- `birthday` now becomes `dob`
-- the date formations will be changed from YYYY-MM-DD to MM/DD/YYYY
-- concatenate the contents of `name.first` and `name.last`
-- `contact.email` will be transformed from an array to a flat string
-- every other attribute will be removed
-
-**Note**: The output will be in a file named `output-transformed.json`.
-
-### Filtering attributes
-
-#### Exclude certain attributes
-
-Instead of filtering out as done in the previous example we can easily configure the export to exclude these attributes server-side:
-
-```php
-// settings to use for the export
-$settings = array(
- 'batchSize' => 5000, // export in chunks of 5K documents
- '_flat' => true, // use simple PHP arrays
- 'restrict' => array(
- 'type' => 'exclude',
- 'fields' => array('_id', '_rev', '_key', 'likes')
- )
-);
-```
-
-This script will exclude the attributes `_id`, `_rev`. `_key` and `likes`.
-
-#### Include certain attributes
-
-We can also include attributes with the following script:
-
-```php
-function export($collection, Connection $connection) {
- // settings to use for the export
- $settings = array(
- 'batchSize' => 5000, // export in chunks of 5K documents
- '_flat' => true, // use simple PHP arrays
- 'restrict' => array(
- 'type' => 'include',
- 'fields' => array('_key', 'name')
- )
- );
-
- $export = new Export($connection, $collection, $settings);
-
- // execute the export. this will return an export cursor
- $cursor = $export->execute();
-
- // now we can fetch the documents from the collection in batches
- while ($docs = $cursor->getNextBatch()) {
- $output = '';
-
- foreach ($docs as $doc) {
- $values = array(
- $doc['_key'],
- $doc['name']['first'] . ' ' . $doc['name']['last']
- );
-
- $output .= '"' . implode('","', $values) . '"' . PHP_EOL;
- }
-
- // print out the data directly
- print $output;
- }
-}
-
-// run the export
-export('users', $connection);
-```
-
-In this script only the `_key` and `name` attributes are extracted. In the prints the `_key`/`name` pairs are in CSV format.
-
-**Note**: The whole script [can be downloaded][4].
-
-### Using the API without PHP
-
-The export API REST interface can be used with any client that can speak HTTP like curl. With the following command you can fetch the documents from the `users` collection:
-
-```bash
-curl
- -X POST
- http://localhost:8529/_api/export?collection=users
---data '{"batchSize":5000}'
-```
-
-The HTTP response will contatin a `result` attribute that contains the actual documents. The attribute `hasMore` will indicate if there are more documents for the client to fetch.
-The HTTP will also contain an attribute `id` if set to _true_.
-
-With the `id` you can send follow-up requests like this:
-
-```bash
-curl
- -X PUT
- http://localhost:8529/_api/export/13979338067709
-```
-
-
-**Authors:** [Thomas Schmidts](https://github.com/13abylon)
- and [Jan Steemann](https://github.com/jsteemann)
-
-**Tags**: #howto #php
-
-
-[1]: https://jsteemann.github.io/blog/2015/04/04/more-efficient-data-exports/
-[2]: https://jsteemann.github.io/downloads/code/users-100000.json.tar.gz
-[3]: https://github.com/arangodb/arangodb-php
-[4]: https://jsteemann.github.io/downloads/code/export-csv.php
diff --git a/Documentation/Books/Cookbook/UseCases/JavaDriverBaseDocument.md b/Documentation/Books/Cookbook/UseCases/JavaDriverBaseDocument.md
deleted file mode 100644
index 9a0ac66ad1dc..000000000000
--- a/Documentation/Books/Cookbook/UseCases/JavaDriverBaseDocument.md
+++ /dev/null
@@ -1,66 +0,0 @@
-How to retrieve documents from ArangoDB without knowing the structure?
-======================================================================
-
-Problem
--------
-
-If you use a NoSQL database it's common to retrieve documents with an unknown attribute structure. Furthermore, the amount and types of attributes may differ in documents resulting from a single query. Another problem is that you want to add one ore more attributes to a document.
-
-In Java you are used to work with objects. Regarding the upper requirements it is possible to directly retrieve objects with the same attribute structure as the document out of the database. Adding attributes to an object at runtime could be very messy.
-
-**Note**: ArangoDB 3.1 and the corresponding [Java driver](https://github.com/arangodb/arangodb-java-driver#supported-versions) is needed.
-
-
-Solution
---------
-
-With the latest version of the Java driver of ArangoDB an object called `BaseDocument` is provided.
-
-The structure is very simple: It only has four attributes:
-
-```java
-public class BaseDocument {
-
- String id;
- String key;
- String revision;
- Map properties;
-
-}
-```
-
-The first three attributes are the system attributes `_id`, `_key` and `_rev`. The fourth attribute is a `HashMap`. The key always is a String, the value an object. These properties contain all non system attributes of the document.
-
-The map can contain values of the following types:
-
-* Map
-* List
-* Boolean
-* Number
-* String
-* null
-
-**Note**: `Map` and `List` contain objects, which are of the same types as listed above.
-
-To retrieve a document is similar to the known procedure, except that you use `BaseDocument` as type.
-
-```java
-ArangoDB.Builder arango = new ArangoDB.Builder().builder();
-DocumentEntity myObject = arango.db().collection("myCollection").getDocument("myDocumentKey", BaseDocument.class);
-```
-
-
-Other resources
----------------
-
-More documentation about the ArangoDB Java driver is available:
-
-- [Tutorial: Java in ten minutes](https://www.arangodb.com/tutorials/tutorial-sync-java-driver/)
-- [Java driver at Github](https://github.com/arangodb/arangodb-java-driver)
-- [Example source code](https://github.com/arangodb/arangodb-java-driver/tree/master/src/test/java/com/arangodb/example)
-- [JavaDoc](http://arangodb.github.io/arangodb-java-driver/javadoc-4_1/index.html)
-
-**Author**: [gschwab](https://github.com/gschwab),
- [Mark Vollmary](https://github.com/mpv1989)
-
-**Tags**: #java #driver
diff --git a/Documentation/Books/Cookbook/UseCases/JavaDriverXmlData.md b/Documentation/Books/Cookbook/UseCases/JavaDriverXmlData.md
deleted file mode 100644
index 356169ad8cb4..000000000000
--- a/Documentation/Books/Cookbook/UseCases/JavaDriverXmlData.md
+++ /dev/null
@@ -1,208 +0,0 @@
-How to add XML data to ArangoDB?
-================================
-
-Problem
--------
-
-You want to store XML data files into a database to have the ability to make queries onto them.
-
-**Note**: ArangoDB 3.1 and the corresponding Java driver is needed.
-
-Solution
---------
-
-Since version 3.1.0 the arangodb-java-driver supports writing, reading and querying of raw strings containing the JSON documents.
-
-With [JsonML](http://www.jsonml.org/) you can convert a XML string into a JSON string and back to XML again.
-
-Converting XML into JSON with JsonML example:
-
-```java
-String string = " "
- + "Basic bread "
- + "Flour "
- + "Yeast "
- + "Water "
- + "Salt "
- + " "
- + "Mix all ingredients together. "
- + "Knead thoroughly. "
- + "Cover with a cloth, and leave for one hour in warm room. "
- + "Knead again. "
- + "Place in a bread baking tin. "
- + "Cover with a cloth, and leave for one hour in warm room. "
- + "Bake in the oven at 180(degrees)C for 30 minutes. "
- + " "
- + " ";
-
-JSONObject jsonObject = JSONML.toJSONObject(string);
-System.out.println(jsonObject.toString());
-```
-
-The converted JSON string:
-
-```json
-{
- "prep_time" : "5 mins",
- "name" : "bread",
- "cook_time" : "3 hours",
- "tagName" : "recipe",
- "childNodes" : [
- {
- "childNodes" : [
- "Basic bread"
- ],
- "tagName" : "title"
- },
- {
- "childNodes" : [
- "Flour"
- ],
- "tagName" : "ingredient",
- "amount" : 8,
- "unit" : "dL"
- },
- {
- "unit" : "grams",
- "amount" : 10,
- "tagName" : "ingredient",
- "childNodes" : [
- "Yeast"
- ]
- },
- {
- "childNodes" : [
- "Water"
- ],
- "tagName" : "ingredient",
- "amount" : 4,
- "unit" : "dL",
- "state" : "warm"
- },
- {
- "childNodes" : [
- "Salt"
- ],
- "tagName" : "ingredient",
- "unit" : "teaspoon",
- "amount" : 1
- },
- {
- "childNodes" : [
- {
- "tagName" : "step",
- "childNodes" : [
- "Mix all ingredients together."
- ]
- },
- {
- "tagName" : "step",
- "childNodes" : [
- "Knead thoroughly."
- ]
- },
- {
- "childNodes" : [
- "Cover with a cloth, and leave for one hour in warm room."
- ],
- "tagName" : "step"
- },
- {
- "tagName" : "step",
- "childNodes" : [
- "Knead again."
- ]
- },
- {
- "childNodes" : [
- "Place in a bread baking tin."
- ],
- "tagName" : "step"
- },
- {
- "tagName" : "step",
- "childNodes" : [
- "Cover with a cloth, and leave for one hour in warm room."
- ]
- },
- {
- "tagName" : "step",
- "childNodes" : [
- "Bake in the oven at 180(degrees)C for 30 minutes."
- ]
- }
- ],
- "tagName" : "instructions"
- }
- ]
-}
-```
-
-Saving the converted JSON to ArangoDB example:
-
-```java
-ArangoDB.Builder arango = new ArangoDB.Builder().build();
-ArangoCollection collection = arango.db().collection("testCollection")
-DocumentCreateEntity entity = collection.insertDocument(
- jsonObject.toString());
-String key = entity.getKey();
-```
-
-Reading the stored JSON as a string and convert it back to XML example:
-
-```java
-String rawJsonString = collection.getDocument(key, String.class);
-String xml = JSONML.toString(rawJsonString);
-System.out.println(xml);
-```
-
-Example output:
-
-```xml
-
- Basic bread
- Flour
- Yeast
- Water
- Salt
-
- Mix all ingredients together.
- Knead thoroughly.
- Cover with a cloth, and leave for one hour in warm room.
- Knead again.
- Place in a bread baking tin.
- Cover with a cloth, and leave for one hour in warm room.
- Bake in the oven at 180(degrees)C for 30 minutes.
-
-
-```
-
-**Note:** The [fields mandatory to ArangoDB documents](../../Manual/DataModeling/Documents/DocumentAddress.html) are added; If they break your XML schema you have to remove them.
-
-Query raw data example:
-
-```java
-String queryString = "FOR t IN testCollection FILTER t.cook_time == '3 hours' RETURN t";
-ArangoCursor cursor = arango.db().query(queryString, null, null, String.class);
-while (cursor.hasNext()) {
- JSONObject jsonObject = new JSONObject(cursor.next());
- String xml = JSONML.toString(jsonObject);
- System.out.println("XML value: " + xml);
-}
-```
-
-Other resources
----------------
-
-More documentation about the ArangoDB Java driver is available:
-
-- [Tutorial: Java in ten minutes](https://www.arangodb.com/tutorials/tutorial-sync-java-driver/)
-- [Java driver at Github](https://github.com/arangodb/arangodb-java-driver)
-- [Example source code](https://github.com/arangodb/arangodb-java-driver/tree/master/src/test/java/com/arangodb/example)
-- [JavaDoc](http://arangodb.github.io/arangodb-java-driver/javadoc-4_1/index.html)
-
-**Author**: [Achim Brandt](https://github.com/a-brandt),
- [Mark Vollmary](https://github.com/mpv1989)
-
-**Tags**: #java #driver
diff --git a/Documentation/Books/Cookbook/UseCases/MonetaryDataWithoutPrecisionLoss.md b/Documentation/Books/Cookbook/UseCases/MonetaryDataWithoutPrecisionLoss.md
deleted file mode 100644
index 41abd402d5bf..000000000000
--- a/Documentation/Books/Cookbook/UseCases/MonetaryDataWithoutPrecisionLoss.md
+++ /dev/null
@@ -1,37 +0,0 @@
-Working with monetary data without precision loss in ArangoDB
-=============================================================
-
-Problem
--------
-
-Applications that handle monetary data often require to capture fractional units
-of currency and need to emulate decimal rounding without precision loss.
-Compared to relational databases, JSON does not support arbitrary precision
-out-of-the-box but there are suitable workarounds.
-
-Solution
---------
-
-In ArangoDB there are two ways to handle monetary data:
-
-1. Monetary data **as integer**:
-
- If you store data as integer, decimals can be avoided by using a general
- scale factor, eg. `100` making `19.99` to `1999`. This solution will work
- for digits of up to (excluding) 253 without precision loss. Calculations
- can then be done on the server side.
-
-2. Monetary data **as string**:
-
- If you only want to store and retrieve monetary data you can do so without
- any precision loss by storing this data as string. However, when using
- strings for monetary data values it will not be possible to do calculations
- on them on the server. Calculations have to happen in application logic
- that is capable of doing arithmetic on string-encoded integers.
-
- **Authors:**
- [Jan Stücke](https://github.com/MrPieces),
- [Jan Steemann](https://github.com/jsteemann)
-
- **Tags**: #howto #datamodel #numbers
-
diff --git a/Documentation/Books/Cookbook/UseCases/PopulatingAnAutocompleteTextbox.md b/Documentation/Books/Cookbook/UseCases/PopulatingAnAutocompleteTextbox.md
deleted file mode 100644
index 618e3ddb4b79..000000000000
--- a/Documentation/Books/Cookbook/UseCases/PopulatingAnAutocompleteTextbox.md
+++ /dev/null
@@ -1,123 +0,0 @@
-Populating an autocomplete textbox
-==================================
-
-Problem
--------
-
-I want to populate an autocomplete textbox with values from a collection. The completions
-should adjust dynamically based on user input.
-
-Solution
---------
-
-Use a web framework for the client-side autocomplete rendering and event processing. Use
-a collection with a (sorted) skiplist index and a range query on it to efficiently fetch
-the completion values dynamically. Connect the two using a simple Foxx route.
-
-### Install an example app
-
-[This app](https://github.com/jsteemann/autocomplete) contains a jquery-powered web page
-with an autocomplete textbox. It uses [jquery autocomplete](http://jqueryui.com/autocomplete/),
-but every other web framework will also do.
-
-The app can be installed as follows:
-
-* in the ArangoDB web interface, switch into the **Applications** tab
-* there, click *Add Application*
-* switch on the *Github* tab
-* for *Repository*, enter `jsteemann/autocomplete`
-* for *Version*, enter `master`
-* click *Install*
-
-Now enter a mountpoint for the application. This is the URL path under which the
-application will become available. For the example app, the mount point does not matter.
-The web page in the example app assumes it is served by ArangoDB, too. So it uses a
-relative URL `autocomplete`. This is easiest to set up, but in reality you might want
-to have your web page served by a different server. In this case, your web page will
-have to call the app mount point you just entered.
-
-To see the example app in action, click on **Open**. The autocomplete textbox should be
-populated with server data when at least two letters are entered.
-
-### Backend code, setup script
-
-The app also contains a backend route `/autocomplete` which is called by the web page to
-fetch completions based on user input. The HTML code for the web page is
-[here](https://github.com/jsteemann/autocomplete/blob/master/assets/index.html).
-
-Contained in the app is a [setup script](https://github.com/jsteemann/autocomplete/blob/master/scripts/setup.js)
-that will create a collection named `completions` and load some initial data into it. The
-example app provides autocompletion for US city names, and the setup script populates the
-collection with about 10K city names.
-
-The setup script also [creates a skiplist index on the lookup attribute](https://github.com/jsteemann/autocomplete/blob/master/scripts/setup.js#L10561),
-so this attribute can be used for efficient filtering and sorting later.
-The `lookup` attribute contains the city names already lower-cased, and the original
-(*pretty*) names are stored in attribute `pretty`. This attribute will be returned to
-users.
-
-### Backend code, Foxx route controller
-
-The app contains a [controller](https://github.com/jsteemann/autocomplete/blob/master/demo.js).
-The backend action `/autocomplete` that is called by the web page is also contained herein:
-
-```js
-controller.get("/autocomplete", function (req, res) {
- // search phrase entered by user
- var searchString = req.params("q").trim() || "";
- // lower bound for search range
- var begin = searchString.replace(/[^a-zA-Z]/g, " ").toLowerCase();
- if (begin.length === 0) {
- // search phrase is empty - no need to perfom a search at all
- res.json([]);
- return;
- }
- // upper bound for search range
- var end = begin.substr(0, begin.length - 1) + String.fromCharCode(begin.charCodeAt(begin.length - 1) + 1);
- // bind parameters for query
- var queryParams = {
- "@collection" : "completions",
- "begin" : begin,
- "end" : end
- };
- // the search query
- var query = "FOR doc IN @@collection FILTER doc.lookup >= @begin && doc.lookup < @end SORT doc.lookup RETURN { label: doc.pretty, value: doc.pretty, id: doc._key }";
- res.json(db._query(query, queryParams).toArray());
-}
-```
-
-The backend code first fetches the search string from the URL parameter `q`. This is what the
-web page will send us.
-
-Based on the search string, a lookup range is calculated. First of all, the search string is
-lower-cased and all non-letter characters are removed from it. The resulting string is the
-lower bound for the lookup. For the upper bound, we can use the lower bound with its last
-letter character code increased by one.
-
-For example, if the user entered `Los A` into the textbox, the web page will send us the string
-`Los A` in URL parameter `q`. Lower-casing and removing non-letter characters from the string,
-we'll get `losa`. This is the lower bound. The upper bound is `losa`, with its last letter adjusted
-to `b` (i.e. `losb`).
-
-Finally, the lower and upper bounds are inserted into the following query using bind parameters
-`@begin` and `@end`:
-
-```
-FOR doc IN @@collection
- FILTER doc.lookup >= @begin && doc.lookup < @end
- SORT doc.lookup
- RETURN {
- label: doc.pretty,
- value: doc.pretty,
- id: doc._key
- }
-```
-
-The city names in the lookup range will be returned sorted. For each city, three values are
-returned (the `id` contains the document key, the other two values are for display purposes).
-Other frameworks may require a different return format, but that can easily be done by
-adjusting the AQL query.
-
-**Author:** [Jan Steemann](https://github.com/jsteemann)
-
-**Tags**: #aql #autocomplete #jquery
diff --git a/Documentation/Books/Cookbook/UseCases/README.md b/Documentation/Books/Cookbook/UseCases/README.md
deleted file mode 100644
index 2acffb3d9fc1..000000000000
--- a/Documentation/Books/Cookbook/UseCases/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
-Use Cases / Examples
-====================
-
-- [Working with monetary data without precision loss](MonetaryDataWithoutPrecisionLoss.md)
-
-- [Populating a Textbox](PopulatingAnAutocompleteTextbox.md)
-
-- [Exporting Data](ExportingData.md)
-
-- [Accessing base documents with Java](JavaDriverBaseDocument.md)
-
-- [Add XML data to ArangoDB with Java](JavaDriverXmlData.md)
diff --git a/Documentation/Books/Cookbook/assets/CompilingUnderWindows/SetEnvironmentVar.png b/Documentation/Books/Cookbook/assets/CompilingUnderWindows/SetEnvironmentVar.png
deleted file mode 100644
index 603f0b885237..000000000000
Binary files a/Documentation/Books/Cookbook/assets/CompilingUnderWindows/SetEnvironmentVar.png and /dev/null differ
diff --git a/Documentation/Books/Cookbook/assets/MonitoringWithCollectd/KCollectdIPtablesAccounting.png b/Documentation/Books/Cookbook/assets/MonitoringWithCollectd/KCollectdIPtablesAccounting.png
deleted file mode 100644
index d08add50def4..000000000000
Binary files a/Documentation/Books/Cookbook/assets/MonitoringWithCollectd/KCollectdIPtablesAccounting.png and /dev/null differ
diff --git a/Documentation/Books/Cookbook/assets/MonitoringWithCollectd/KCollectdJson.png b/Documentation/Books/Cookbook/assets/MonitoringWithCollectd/KCollectdJson.png
deleted file mode 100644
index b65604a5f2bc..000000000000
Binary files a/Documentation/Books/Cookbook/assets/MonitoringWithCollectd/KCollectdJson.png and /dev/null differ
diff --git a/Documentation/Books/Cookbook/book.json b/Documentation/Books/Cookbook/book.json
deleted file mode 100644
index b58ba398dea8..000000000000
--- a/Documentation/Books/Cookbook/book.json
+++ /dev/null
@@ -1,56 +0,0 @@
-{
- "gitbook": "^3.2.2",
- "title": "ArangoDB VERSION_NUMBER Cookbook",
- "version": "VERSION_NUMBER",
- "author": "ArangoDB GmbH",
- "description": "Recipes for ArangoDB - the native multi-model NoSQL database",
- "language": "en",
- "plugins": [
- "-search",
- "-lunr",
- "-sharing",
- "toggle-chapters",
- "addcssjs",
- "anchorjs",
- "sitemap-general@git+https://github.com/Simran-B/gitbook-plugin-sitemap-general.git",
- "ga",
- "callouts@git+https://github.com/Simran-B/gitbook-plugin-callouts.git",
- "edit-link",
- "page-toc@git+https://github.com/Simran-B/gitbook-plugin-page-toc.git",
- "localized-footer"
- ],
- "pdf": {
- "fontSize": 12,
- "toc": true,
- "margin": {
- "right": 60,
- "left": 60,
- "top": 35,
- "bottom": 35
- }
- },
- "styles": {
- "website": "styles/website.css"
- },
- "pluginsConfig": {
- "addcssjs": {
- "js": ["styles/header.js", "styles/hs.js"],
- "css": ["styles/header.css"]
- },
- "sitemap-general": {
- "prefix": "https://docs.arangodb.com/devel/Cookbook/",
- "changefreq": "@GCHANGE_FREQ@",
- "priority": @GPRIORITY@
- },
- "ga": {
- "token": "UA-81053435-2"
- },
- "edit-link": {
- "base": "https://github.com/arangodb/arangodb/edit/devel/Documentation/Books/Cookbook",
- "label": "Edit Page"
- },
- "localized-footer": {
- "filename": "FOOTER.html"
- }
- }
-}
diff --git a/Documentation/Books/Cookbook/styles/header.css b/Documentation/Books/Cookbook/styles/header.css
deleted file mode 100644
index 4ec87c77b0e5..000000000000
--- a/Documentation/Books/Cookbook/styles/header.css
+++ /dev/null
@@ -1,305 +0,0 @@
-/* Design fix because of the header */
-@import url(https://fonts.googleapis.com/css?family=Roboto:400,500,300,700);
-
-body {
- overflow: hidden;
- font-family: Roboto, Helvetica, sans-serif;
- background: #444444;
-}
-
-.book .book-header h1 a, .book .book-header h1 a:hover {
- display: none;
-}
-
-/* GOOGLE START */
-
-.google-search #gsc-iw-id1{
- border: none !important;
-}
-
-.google-search .gsst_b {
- position: relative;
- top: 10px;
- left: -25px;
- width: 1px;
-}
-
-.gsst_a .gscb_a {
- color: #c01a07 !important;
-}
-
-.google-search input {
- background-color: #fff !important;
- font-family: Roboto, Helvetica, sans-serif;
- font-size: 10pt !important;
- padding-left: 5px !important;
- float: right;
- position: relative;
- top: 8px;
- width: 100% !important;
- height: 30px !important;
-}
-
-.google-search input:active {
-}
-
-.google-search {
- margin-right: 10px;
- margin-left: 10px !important;
- float: right !important;
-}
-
-.google-search td,
-.google-search table,
-.google-search tr,
-.google-search th {
- background-color: #444444 !important;
-}
-
-.google-search .gsc-input-box,
-.google-search .gsc-input-box input {
- border-radius: 3px !important;
- width: 200px;
-}
-
-.gsc-branding-text,
-.gsc-branding-img,
-.gsc-user-defined-text {
- display: none !important;
-}
-
-.google-search .gsc-input-box input {
- font-size: 16px !important;
-}
-
-.google-search .gsc-search-button {
- display: none !important;
-}
-
-.google-search .gsc-control-cse {
- padding: 10px !important;
-}
-
-.google-search > div {
- float: left !important;
- width: 200px !important;
-}
-
-/* GOOGLE END */
-
-.book-summary,
-.book-body {
- margin-top: 48px;
-}
-
-.arangodb-logo, .arangodb-logo-small {
- display: inline;
- float: left;
- padding-top: 12px;
- margin-left: 10px;
-}
-
-.arangodb-logo img {
- height: 23px;
-}
-
-.arangodb-logo-small {
- display: none;
-}
-
-.arangodb-version-switcher {
- width: 65px;
- height: 44px;
- margin-left: 16px;
- float: left;
- display: inline;
- font-weight: bold;
- color: #fff;
- background-color: inherit;
- border: 0;
-}
-
-.arangodb-version-switcher option {
- background-color: white;
- color: black;
-}
-
-
-.arangodb-header {
- position: fixed;
- width: 100%;
- height: 48px;
- z-index: 1;
-}
-
-.arangodb-header .socialIcons-googlegroups a img {
- position: relative;
- height: 14px;
- top: 3px;
-}
-
-.arangodb-navmenu {
- display: block;
- float: right;
- margin: 0;
- padding: 0;
-}
-
-.arangodb-navmenu li {
- display: block;
- float: left;
-}
-
-.arangodb-navmenu li a {
- display: block;
- float: left;
- padding: 0 10px;
- line-height: 48px;
- font-size: 16px;
- font-weight: 400;
- color: #fff;
- text-decoration: none;
- font-family: Roboto, Helvetica, sans-serif;
-}
-
-.arangodb-navmenu li.active-tab a, .arangodb-navmenu li a:hover {
- background-color: #88A049 !important;
-}
-
-.downloadIcon {
- margin-right: 10px;
-}
-
-/** simple responsive updates **/
-
-@media screen and (max-width: 1000px) {
- .arangodb-navmenu li a {
- padding: 0 6px;
- }
-
- .arangodb-logo {
- margin-left: 10px;
- }
-
- .google-search {
- margin-right: 5px !important;
- }
-
- .downloadIcon {
- margin-right: 0;
- }
-
- .socialIcons {
- display: none !important;
- }
-}
-
-
-@media screen and (max-width: 800px) {
-
- .google-search,
- .google-search .gsc-input-box,
- .google-search .gsc-input-box input {
- width: 130px !important;
- }
-
- .arangodb-navmenu li a {
- font-size: 15px;
- padding: 0 7px;
- }
-
- .arangodb-logo {
- display: none;
- }
-
- .arangodb-logo-small {
- display: inline;
- margin-left: 10px;
- }
-
- .arangodb-logo-small img {
- height: 20px;
- }
-
- .arangodb-version-switcher {
- margin: 0;
- }
-
-}
-
-@media screen and (max-width: 600px) {
- .arangodb-navmenu li a {
- font-size: 15px;
- padding: 0 7px;
- }
-
- .arangodb-version-switcher,
- .downloadIcon {
- display: none !important;
- }
-
- .google-search,
- .google-search .gsc-input-box,
- .google-search .gsc-input-box input {
- width: 24px !important;
- }
-
- .google-search .gsc-input-box input[style] {
- background: url(https://docs.arangodb.com/assets/searchIcon.png) left center no-repeat rgb(255, 255, 255) !important;
- }
-
- .google-search .gsc-input-box input:focus {
- width: 200px !important;
- position: relative;
- left: -176px;
- background-position: -9999px -9999px !important;
- }
-
-}
-
-@media screen and (max-width: 400px) {
- .arangodb-navmenu li a {
- font-size: 13px;
- padding: 0 5px;
- }
- .google-search {
- display: none;
- }
-}
-
-/*Hubspot Cookie notice */
-
-body div#hs-eu-cookie-confirmation {
- bottom: 0;
- top: auto;
- position: fixed;
- text-align: center !important;
-}
-
-body div#hs-eu-cookie-confirmation.can-use-gradients {
- background-image: linear-gradient(to bottom, rgba(255,255,255,0.9),rgba(255,255,255,0.75));
-}
-
-body div#hs-eu-cookie-confirmation div#hs-eu-cookie-confirmation-inner {
- display: inline-block;
- padding: 15px 18px 0;
-}
-
-body div#hs-eu-cookie-confirmation div#hs-eu-cookie-confirmation-inner #hs-en-cookie-confirmation-buttons-area {
- float: left;
-}
-
-body div#hs-eu-cookie-confirmation div#hs-eu-cookie-confirmation-inner a#hs-eu-confirmation-button {
- background-color: #577138 !important;
- border: none !important;
- text-shadow: none !important;
- box-shadow: none;
- padding: 5px 15px !important;
- margin-left: 10px;
-}
-
-body div#hs-eu-cookie-confirmation div#hs-eu-cookie-confirmation-inner > p {
- float: left;
- color: #000 !important;
- text-shadow: none;
-}
diff --git a/Documentation/Books/Cookbook/styles/header.js b/Documentation/Books/Cookbook/styles/header.js
deleted file mode 100644
index 8c34d6bdb6ae..000000000000
--- a/Documentation/Books/Cookbook/styles/header.js
+++ /dev/null
@@ -1,161 +0,0 @@
-// Try to set the version number early, jQuery not available yet
-var searcheable_versions = [@BROWSEABLE_VERSIONS@];
-var cx = '@GSEARCH_ID@';
-
-document.addEventListener("DOMContentLoaded", function(event) {
- if (!gitbook.state.root) return;
- var bookVersion = gitbook.state.root.match(/\/(\d\.\d(\.\d)?|devel)\//);
- var switcher = document.getElementsByClassName("arangodb-version-switcher")[0];
- if (bookVersion) {
- switcher.value = bookVersion[1];
- } else {
- switcher.style.display = "none";
- }
-});
-
-window.onload = function(){
-window.localStorage.removeItem(":keyword");
-
-$(document).ready(function() {
-
-function appendHeader() {
- var VERSION_SELECTOR = ""
- var i = 0;
- var prefix;
- for (i = 0; i < searcheable_versions.length; i++ ) {
- if (searcheable_versions[i] === 'devel') {
- prefix = '';
- } else {
- prefix = 'v';
- }
- VERSION_SELECTOR += '' + prefix +
- searcheable_versions[i] +
- ' \n';
- }
-
- var div = document.createElement('div');
- div.innerHTML = '\n';
-
- $('.book').before(div.innerHTML);
-
- };
-
-
- function rerenderNavbar() {
- $('.arangodb-header').remove();
- appendHeader();
- };
-
- //render header
- rerenderNavbar();
- function addGoogleSrc() {
- var gcse = document.createElement('script');
- gcse.type = 'text/javascript';
- gcse.async = true;
- gcse.src = (document.location.protocol == 'https:' ? 'https:' : 'http:') +
- '//cse.google.com/cse.js?cx=' + cx;
- var s = document.getElementsByTagName('script')[0];
- s.parentNode.insertBefore(gcse, s);
- };
- addGoogleSrc();
-
- $(".arangodb-navmenu a[data-book]").on("click", function(e) {
- e.preventDefault();
- var urlSplit = gitbook.state.root.split("/");
- urlSplit.pop(); // ""
- urlSplit.pop(); // e.g. "Manual"
- window.location.href = urlSplit.join("/") + "/" + e.target.getAttribute("data-book") + "/index.html";
- });
-
- // set again using jQuery to accommodate non-standard browsers (*cough* IE *cough*)
- var bookVersion = gitbook.state.root.match(/\/(\d\.\d(\.\d)?|devel)\//);
- var switcher = $(".arangodb-version-switcher");
- if (bookVersion) {
- switcher.val(bookVersion[1]);
- } else {
- switcher.hide();
- }
-
- $(".arangodb-version-switcher").on("change", function(e) {
- var urlSplit = gitbook.state.root.split("/");
- urlSplit.pop(); // ""
- var currentBook = urlSplit.pop(); // e.g. "Manual"
- urlSplit.pop() // e.g. "3.0"
- if (e.target.value == "2.8") {
- var legacyMap = {
- "Manual": "",
- "AQL": "/Aql",
- "HTTP": "/HttpApi",
- "Cookbook": "/Cookbook"
- };
- currentBook = legacyMap[currentBook];
- } else {
- currentBook = "/" + currentBook;
- }
- window.location.href = urlSplit.join("/") + "/" + e.target.value + currentBook + "/index.html";
- });
-
-});
-
-};
diff --git a/Documentation/Books/Cookbook/styles/hs.js b/Documentation/Books/Cookbook/styles/hs.js
deleted file mode 100644
index 9a8ae18a61d2..000000000000
--- a/Documentation/Books/Cookbook/styles/hs.js
+++ /dev/null
@@ -1,33 +0,0 @@
-// HubSpot Script Loader. Please do not block this resource. See more: http://hubs.ly/H0702_H0
-
-(function (id, src, attrs) {
- if (document.getElementById(id)) {
- try { console.warn('duplicate hubspot script with id: "' + id + '" included on page'); }
- finally { return; }
- }
- var js = document.createElement('script');
- js.src = src;
- js.type = 'text/javascript';
- js.id = id;
- for (var name in attrs) { if(attrs.hasOwnProperty(name)) { js.setAttribute(name, attrs[name]); } }
- var e = document.getElementsByTagName('script')[0];
- e.parentNode.insertBefore(js, e);
-})('hubspot-messages-loader', 'https://js.usemessages.com/messageswidgetshell.js', {"data-loader":"hs-scriptloader","data-hsjs-portal":2482448,"data-hsjs-env":"prod"});
-
-(function (id, src) {
- if (document.getElementById(id)) { return; }
- var js = document.createElement('script');
- js.src = src;
- js.type = 'text/javascript';
- js.id = id;
- var e = document.getElementsByTagName('script')[0];
- e.parentNode.insertBefore(js, e);
-})('hs-analytics', '//js.hs-analytics.net/analytics/1508760300000/2482448.js');
-
-window.setTimeout(function () {
- $('body').on('click', 'a', function () {
- var _hsq = window._hsq = window._hsq || [];
- _hsq.push(['setPath', window.location.pathname]);
- _hsq.push(['trackPageView']);
- });
-}, 1000);
diff --git a/Documentation/Books/Cookbook/styles/website.css b/Documentation/Books/Cookbook/styles/website.css
deleted file mode 100755
index 0bbc2f1eff37..000000000000
--- a/Documentation/Books/Cookbook/styles/website.css
+++ /dev/null
@@ -1,84 +0,0 @@
-.markdown-section small {
- font-size: 80%;
-}
-.markdown-section sub, .markdown-section sup {
- font-size: 75%;
- line-height: 0;
- position: relative;
- vertical-align: baseline;
-}
-.markdown-section sup {
- top: -.5em;
-}
-.markdown-section sub {
- bottom: -.25em;
-}
-
-div.example_show_button {
- border: medium solid lightgray;
- text-align: center;
- position: relative;
- top: -10px;
- display: flex;
- justify-content: center;
-}
-
-.book .book-body .navigation.navigation-next {
- right: 10px !important;
-}
-
-.book .book-summary ul.summary li.active>a,.book .book-summary ul.summary li a:hover {
- color: #fff !important;
- background: #80A54D !important;
- text-decoration: none;
-}
-
-.book .book-body .page-wrapper .page-inner section.normal .deprecated{
- background-color: rgba(240,240,0,0.4);
-}
-
-.book .book-body section > ul li:last-child {
- margin-bottom: 0.85em;
-}
-
-.book .book-body .alert p:last-child {
- margin-bottom: 0;
-}
-
-.columns-3 {
- -webkit-column-count: 3;
- -moz-column-count: 3;
- -ms-column-count: 3;
- -o-column-count: 3;
- column-count: 3;
- columns: 3;
-}
-
-.localized-footer {
- opacity: 0.5;
-}
-
-.example-container {
- position: relative;
-}
-
-.example-container a.anchorjs-link {
- position: absolute;
- top: 10px;
- right: 10px;
- font: 1em/1 anchorjs-icons;
-}
-
-.gsib_a {
-padding: 0px !important;
-}
-
-.gsc-control-cse {
-border: 0px !important;
-background-color: transparent !important;
-}
-
-
-.gsc-input {
-margin: 0px !important;
-}
diff --git a/Documentation/Books/Drivers/.gitkeep b/Documentation/Books/Drivers/.gitkeep
new file mode 100644
index 000000000000..936ca3adc4e3
--- /dev/null
+++ b/Documentation/Books/Drivers/.gitkeep
@@ -0,0 +1,5 @@
+Git can not track empty repositories.
+This file ensures that the directory is kept.
+
+Some of the old documentation building scripts are still
+used by the new system which copy files into this folder.
\ No newline at end of file
diff --git a/Documentation/Books/Drivers/FOOTER.html b/Documentation/Books/Drivers/FOOTER.html
deleted file mode 100644
index 239869bfaf6a..000000000000
--- a/Documentation/Books/Drivers/FOOTER.html
+++ /dev/null
@@ -1 +0,0 @@
-© ArangoDB - the native multi-model NoSQL database
\ No newline at end of file
diff --git a/Documentation/Books/Drivers/GO/ConnectionManagement/README.md b/Documentation/Books/Drivers/GO/ConnectionManagement/README.md
deleted file mode 100644
index 67de1a7218c7..000000000000
--- a/Documentation/Books/Drivers/GO/ConnectionManagement/README.md
+++ /dev/null
@@ -1,85 +0,0 @@
-
-# ArangoDB GO Driver - Connection Management
-## Failover
-
-The driver supports multiple endpoints to connect to. All request are in principle
-send to the same endpoint until that endpoint fails to respond.
-In that case a new endpoint is chosen and the operation is retried.
-
-The following example shows how to connect to a cluster of 3 servers.
-
-```go
-conn, err := http.NewConnection(http.ConnectionConfig{
- Endpoints: []string{"http://server1:8529", "http://server2:8529", "http://server3:8529"},
-})
-if err != nil {
- // Handle error
-}
-c, err := driver.NewClient(driver.ClientConfig{
- Connection: conn,
-})
-if err != nil {
- // Handle error
-}
-```
-
-Note that a valid endpoint is an URL to either a standalone server, or a URL to a coordinator
-in a cluster.
-
-## Failover: Exact behavior
-
-The driver monitors the request being send to a specific server (endpoint).
-As soon as the request has been completely written, failover will no longer happen.
-The reason for that is that several operations cannot be (safely) retried.
-E.g. when a request to create a document has been send to a server and a timeout
-occurs, the driver has no way of knowing if the server did or did not create
-the document in the database.
-
-If the driver detects that a request has been completely written, but still gets
-an error (other than an error response from Arango itself), it will wrap the
-error in a `ResponseError`. The client can test for such an error using `IsResponseError`.
-
-If a client received a `ResponseError`, it can do one of the following:
-- Retry the operation and be prepared for some kind of duplicate record / unique constraint violation.
-- Perform a test operation to see if the "failed" operation did succeed after all.
-- Simply consider the operation failed. This is risky, since it can still be the case that the operation did succeed.
-
-## Failover: Timeouts
-
-To control the timeout of any function in the driver, you must pass it a context
-configured with `context.WithTimeout` (or `context.WithDeadline`).
-
-In the case of multiple endpoints, the actual timeout used for requests will be shorter than
-the timeout given in the context.
-The driver will divide the timeout by the number of endpoints with a maximum of 3.
-This ensures that the driver can try up to 3 different endpoints (in case of failover) without
-being canceled due to the timeout given by the client.
-E.g.
-- With 1 endpoint and a given timeout of 1 minute, the actual request timeout will be 1 minute.
-- With 3 endpoints and a given timeout of 1 minute, the actual request timeout will be 20 seconds.
-- With 8 endpoints and a given timeout of 1 minute, the actual request timeout will be 20 seconds.
-
-For most requests you want a actual request timeout of at least 30 seconds.
-
-## Secure connections (SSL)
-
-The driver supports endpoints that use SSL using the `https` URL scheme.
-
-The following example shows how to connect to a server that has a secure endpoint using
-a self-signed certificate.
-
-```go
-conn, err := http.NewConnection(http.ConnectionConfig{
- Endpoints: []string{"https://localhost:8529"},
- TLSConfig: &tls.Config{InsecureSkipVerify: true},
-})
-if err != nil {
- // Handle error
-}
-c, err := driver.NewClient(driver.ClientConfig{
- Connection: conn,
-})
-if err != nil {
- // Handle error
-}
-```
diff --git a/Documentation/Books/Drivers/GO/ExampleRequests/README.md b/Documentation/Books/Drivers/GO/ExampleRequests/README.md
deleted file mode 100644
index 9bc9909658dc..000000000000
--- a/Documentation/Books/Drivers/GO/ExampleRequests/README.md
+++ /dev/null
@@ -1,183 +0,0 @@
-
-# ArangoDB GO Driver - Example requests
-
-## Connecting to ArangoDB
-
-```go
-conn, err := http.NewConnection(http.ConnectionConfig{
- Endpoints: []string{"http://localhost:8529"},
- TLSConfig: &tls.Config{ /*...*/ },
-})
-if err != nil {
- // Handle error
-}
-c, err := driver.NewClient(driver.ClientConfig{
- Connection: conn,
- Authentication: driver.BasicAuthentication("user", "password"),
-})
-if err != nil {
- // Handle error
-}
-```
-
-## Opening a database
-
-```go
-ctx := context.Background()
-db, err := client.Database(ctx, "myDB")
-if err != nil {
- // handle error
-}
-```
-
-## Opening a collection
-
-```go
-ctx := context.Background()
-col, err := db.Collection(ctx, "myCollection")
-if err != nil {
- // handle error
-}
-```
-
-## Checking if a collection exists
-
-```go
-ctx := context.Background()
-found, err := db.CollectionExists(ctx, "myCollection")
-if err != nil {
- // handle error
-}
-```
-
-## Creating a collection
-
-```go
-ctx := context.Background()
-options := &driver.CreateCollectionOptions{ /* ... */ }
-col, err := db.CreateCollection(ctx, "myCollection", options)
-if err != nil {
- // handle error
-}
-```
-
-## Reading a document from a collection
-
-```go
-var doc MyDocument
-ctx := context.Background()
-meta, err := col.ReadDocument(ctx, myDocumentKey, &doc)
-if err != nil {
- // handle error
-}
-```
-
-## Reading a document from a collection with an explicit revision
-
-```go
-var doc MyDocument
-revCtx := driver.WithRevision(ctx, "mySpecificRevision")
-meta, err := col.ReadDocument(revCtx, myDocumentKey, &doc)
-if err != nil {
- // handle error
-}
-```
-
-## Creating a document
-
-```go
-doc := MyDocument{
- Name: "jan",
- Counter: 23,
-}
-ctx := context.Background()
-meta, err := col.CreateDocument(ctx, doc)
-if err != nil {
- // handle error
-}
-fmt.Printf("Created document with key '%s', revision '%s'\n", meta.Key, meta.Rev)
-```
-
-## Removing a document
-
-```go
-ctx := context.Background()
-err := col.RemoveDocument(revCtx, myDocumentKey)
-if err != nil {
- // handle error
-}
-```
-
-## Removing a document with an explicit revision
-
-```go
-revCtx := driver.WithRevision(ctx, "mySpecificRevision")
-err := col.RemoveDocument(revCtx, myDocumentKey)
-if err != nil {
- // handle error
-}
-```
-
-## Updating a document
-
-```go
-ctx := context.Background()
-patch := map[string]interface{}{
- "Name": "Frank",
-}
-meta, err := col.UpdateDocument(ctx, myDocumentKey, patch)
-if err != nil {
- // handle error
-}
-```
-
-## Querying documents, one document at a time
-
-```go
-ctx := context.Background()
-query := "FOR d IN myCollection LIMIT 10 RETURN d"
-cursor, err := db.Query(ctx, query, nil)
-if err != nil {
- // handle error
-}
-defer cursor.Close()
-for {
- var doc MyDocument
- meta, err := cursor.ReadDocument(ctx, &doc)
- if driver.IsNoMoreDocuments(err) {
- break
- } else if err != nil {
- // handle other errors
- }
- fmt.Printf("Got doc with key '%s' from query\n", meta.Key)
-}
-```
-
-## Querying documents, fetching total count
-
-```go
-ctx := driver.WithQueryCount(context.Background())
-query := "FOR d IN myCollection RETURN d"
-cursor, err := db.Query(ctx, query, nil)
-if err != nil {
- // handle error
-}
-defer cursor.Close()
-fmt.Printf("Query yields %d documents\n", cursor.Count())
-```
-
-## Querying documents, with bind variables
-
-```go
-ctx := context.Background()
-query := "FOR d IN myCollection FILTER d.Name == @name RETURN d"
-bindVars := map[string]interface{}{
- "name": "Some name",
-}
-cursor, err := db.Query(ctx, query, bindVars)
-if err != nil {
- // handle error
-}
-defer cursor.Close()
-...
-```
diff --git a/Documentation/Books/Drivers/GO/GettingStarted/README.md b/Documentation/Books/Drivers/GO/GettingStarted/README.md
deleted file mode 100644
index cfc6ec990d65..000000000000
--- a/Documentation/Books/Drivers/GO/GettingStarted/README.md
+++ /dev/null
@@ -1,142 +0,0 @@
-
-# ArangoDB GO Driver - Getting Started
-
-## Supported versions
-
-- ArangoDB versions 3.1 and up.
- - Single server & cluster setups
- - With or without authentication
-- Go 1.7 and up.
-
-## Go dependencies
-
-- None (Additional error libraries are supported).
-
-## Configuration
-
-To use the driver, first fetch the sources into your GOPATH.
-
-```sh
-go get github.com/arangodb/go-driver
-```
-
-Using the driver, you always need to create a `Client`.
-The following example shows how to create a `Client` for a single server
-running on localhost.
-
-```go
-import (
- "fmt"
-
- driver "github.com/arangodb/go-driver"
- "github.com/arangodb/go-driver/http"
-)
-
-...
-
-conn, err := http.NewConnection(http.ConnectionConfig{
- Endpoints: []string{"http://localhost:8529"},
-})
-if err != nil {
- // Handle error
-}
-c, err := driver.NewClient(driver.ClientConfig{
- Connection: conn,
-})
-if err != nil {
- // Handle error
-}
-```
-
-Once you have a `Client` you can access/create databases on the server,
-access/create collections, graphs, documents and so on.
-
-The following example shows how to open an existing collection in an existing database
-and create a new document in that collection.
-
-```go
-// Open "examples_books" database
-db, err := c.Database(nil, "examples_books")
-if err != nil {
- // Handle error
-}
-
-// Open "books" collection
-col, err := db.Collection(nil, "books")
-if err != nil {
- // Handle error
-}
-
-// Create document
-book := Book{
- Title: "ArangoDB Cookbook",
- NoPages: 257,
-}
-meta, err := col.CreateDocument(nil, book)
-if err != nil {
- // Handle error
-}
-fmt.Printf("Created document in collection '%s' in database '%s'\n", col.Name(), db.Name())
-```
-
-## API design
-
-### Concurrency
-
-All functions of the driver are stricly synchronous. They operate and only return a value (or error)
-when they're done.
-
-If you want to run operations concurrently, use a go routine. All objects in the driver are designed
-to be used from multiple concurrent go routines, except `Cursor`.
-
-All database objects (except `Cursor`) are considered static. After their creation they won't change.
-E.g. after creating a `Collection` instance you can remove the collection, but the (Go) instance
-will still be there. Calling functions on such a removed collection will of course fail.
-
-### Structured error handling & wrapping
-
-All functions of the driver that can fail return an `error` value. If that value is not `nil`, the
-function call is considered to be failed. In that case all other return values are set to their `zero`
-values.
-
-All errors are structured using error checking functions named `Is`.
-E.g. `IsNotFound(error)` return true if the given error is of the category "not found".
-There can be multiple internal error codes that all map onto the same category.
-
-All errors returned from any function of the driver (either internal or exposed) wrap errors
-using the `WithStack` function. This can be used to provide detail stack trackes in case of an error.
-All error checking functions use the `Cause` function to get the cause of an error instead of the error wrapper.
-
-Note that `WithStack` and `Cause` are actually variables to you can implement it using your own error
-wrapper library.
-
-If you for example use https://github.com/pkg/errors, you want to initialize to go driver like this:
-```go
-import (
- driver "github.com/arangodb/go-driver"
- "github.com/pkg/errors"
-)
-
-func init() {
- driver.WithStack = errors.WithStack
- driver.Cause = errors.Cause
-}
-```
-
-### Context aware
-
-All functions of the driver that involve some kind of long running operation or
-support additional options not given as function arguments, have a `context.Context` argument.
-This enables you cancel running requests, pass timeouts/deadlines and pass additional options.
-
-In all methods that take a `context.Context` argument you can pass `nil` as value.
-This is equivalent to passing `context.Background()`.
-
-Many functions support 1 or more optional (and infrequently used) additional options.
-These can be used with a `With` function.
-E.g. to force a create document call to wait until the data is synchronized to disk,
-use a prepared context like this:
-```go
-ctx := driver.WithWaitForSync(parentContext)
-collection.CreateDocument(ctx, yourDocument)
-```
diff --git a/Documentation/Books/Drivers/GO/README.md b/Documentation/Books/Drivers/GO/README.md
deleted file mode 100644
index 4701bd6c6e29..000000000000
--- a/Documentation/Books/Drivers/GO/README.md
+++ /dev/null
@@ -1,9 +0,0 @@
-
-# ArangoDB GO Driver
-
-The official [ArangoDB](https://arangodb.com) GO Driver
-
-- [Getting Started](GettingStarted/README.md)
-- [Example Requests](ExampleRequests/README.md)
-- [Connection Management](ConnectionManagement/README.md)
-- [Reference](https://godoc.org/github.com/arangodb/go-driver)
diff --git a/Documentation/Books/Drivers/JS/GettingStarted/README.md b/Documentation/Books/Drivers/JS/GettingStarted/README.md
deleted file mode 100644
index d8fb21cef3ba..000000000000
--- a/Documentation/Books/Drivers/JS/GettingStarted/README.md
+++ /dev/null
@@ -1,234 +0,0 @@
-
-# ArangoDB JavaScript Driver - Getting Started
-
-## Compatibility
-
-ArangoJS is compatible with the latest stable version of ArangoDB available at
-the time of the driver release.
-
-The [_arangoVersion_ option](../Reference/Database/README.md)
-can be used to tell arangojs to target a specific
-ArangoDB version. Depending on the version this will enable or disable certain
-methods and change behavior to maintain compatibility with the given version.
-The oldest version of ArangoDB supported by arangojs when using this option
-is 2.8.0 (using `arangoVersion: 20800`).
-
-The yarn/npm distribution of ArangoJS is compatible with Node.js versions 9.x
-(latest), 8.x (LTS) and 6.x (LTS). Node.js version support follows
-[the official Node.js long-term support schedule](https://github.com/nodejs/LTS).
-
-The included browser build is compatible with Internet Explorer 11 and recent
-versions of all modern browsers (Edge, Chrome, Firefox and Safari).
-
-Versions outside this range may be compatible but are not actively supported.
-
-**Note**: Starting with arangojs 6.0.0, all asynchronous functions return
-promises. If you are using a version of Node.js older than Node.js 6.x LTS
-("Boron") make sure you replace the native `Promise` implementation with a
-substitute like [bluebird](https://github.com/petkaantonov/bluebird)
-to avoid a known memory leak in older versions of the V8 JavaScript engine.
-
-## Versions
-
-The version number of this driver does not indicate supported ArangoDB versions!
-
-This driver uses semantic versioning:
-
-- A change in the bugfix version (e.g. X.Y.0 -> X.Y.1) indicates internal
- changes and should always be safe to upgrade.
-- A change in the minor version (e.g. X.1.Z -> X.2.0) indicates additions and
- backwards-compatible changes that should not affect your code.
-- A change in the major version (e.g. 1.Y.Z -> 2.0.0) indicates _breaking_
- changes that require changes in your code to upgrade.
-
-If you are getting weird errors or functions seem to be missing, make sure you
-are using the latest version of the driver and following documentation written
-for a compatible version. If you are following a tutorial written for an older
-version of arangojs, you can install that version using the `@`
-syntax:
-
-```sh
-# for version 4.x.x
-yarn add arangojs@4
-# - or -
-npm install --save arangojs@4
-```
-
-You can find the documentation for each version by clicking on the corresponding
-date on the left in
-[the list of version tags](https://github.com/arangodb/arangojs/tags).
-
-## Install
-
-### With Yarn or NPM
-
-```sh
-yarn add arangojs
-# - or -
-npm install --save arangojs
-```
-
-### With Bower
-
-Starting with arangojs 6.0.0 Bower is no longer supported and the browser
-build is now included in the NPM release (see below).
-
-### From source
-
-```sh
-git clone https://github.com/arangodb/arangojs.git
-cd arangojs
-npm install
-npm run dist
-```
-
-### For browsers
-
-For production use arangojs can be installed with Yarn or NPM like any
-other dependency. Just use arangojs like you would in your server code:
-
-```js
-import { Database } from "arangojs";
-// -- or --
-var arangojs = require("arangojs");
-```
-
-Additionally the NPM release comes with a precompiled browser build:
-
-```js
-var arangojs = require("arangojs/lib/web");
-```
-
-You can also use [unpkg](https://unpkg.com) during development:
-
-```html
-< !-- note the path includes the version number (e.g. 6.0.0) -- >
-
-
-```
-
-If you are targetting browsers older than Internet Explorer 11 you may want to
-use [babel](https://babeljs.io) with a
-[polyfill](https://babeljs.io/docs/usage/polyfill) to provide missing
-functionality needed to use arangojs.
-
-When loading the browser build with a script tag make sure to load the polyfill first:
-
-```html
-
-
-```
-
-## Basic usage example
-
-```js
-// Modern JavaScript
-import { Database, aql } from "arangojs";
-const db = new Database();
-(async function() {
- const now = Date.now();
- try {
- const cursor = await db.query(aql`
- RETURN ${now}
- `);
- const result = await cursor.next();
- // ...
- } catch (err) {
- // ...
- }
-})();
-
-// or plain old Node-style
-var arangojs = require("arangojs");
-var db = new arangojs.Database();
-var now = Date.now();
-db.query({
- query: "RETURN @value",
- bindVars: { value: now }
-})
- .then(function(cursor) {
- return cursor.next().then(function(result) {
- // ...
- });
- })
- .catch(function(err) {
- // ...
- });
-
-// Using different databases
-const db = new Database({
- url: "http://localhost:8529"
-});
-db.useDatabase("pancakes");
-db.useBasicAuth("root", "");
-// The database can be swapped at any time
-db.useDatabase("waffles");
-db.useBasicAuth("admin", "maplesyrup");
-
-// Using ArangoDB behind a reverse proxy
-const db = new Database({
- url: "http://myproxy.local:8000",
- isAbsolute: true // don't automatically append database path to URL
-});
-
-// Trigger ArangoDB 2.8 compatibility mode
-const db = new Database({
- arangoVersion: 20800
-});
-```
-
-For AQL please check out the [aql template tag](../Reference/Database/Queries.md#aql) for writing parametrized
-AQL queries without making your code vulnerable to injection attacks.
-
-## Error responses
-
-If arangojs encounters an API error, it will throw an _ArangoError_ with an
-[_errorNum_ error code](../../..//Manual/Appendix/ErrorCodes.html)
-as well as a _code_ and _statusCode_ property indicating the intended and
-actual HTTP status code of the response.
-
-For any other error responses (4xx/5xx status code), it will throw an
-_HttpError_ error with the status code indicated by the _code_ and _statusCode_ properties.
-
-If the server response did not indicate an error but the response body could
-not be parsed, a _SyntaxError_ may be thrown instead.
-
-In all of these cases the error object will additionally have a _response_
-property containing the server response object.
-
-If the request failed at a network level or the connection was closed without
-receiving a response, the underlying error will be thrown instead.
-
-**Examples**
-
-```js
-// Using async/await
-try {
- const info = await db.createDatabase("mydb");
- // database created
-} catch (err) {
- console.error(err.stack);
-}
-
-// Using promises with arrow functions
-db.createDatabase("mydb").then(
- info => {
- // database created
- },
- err => console.error(err.stack)
-);
-```
-
-{% hint 'tip' %}
-The examples in the remainder of this documentation use `async`/`await`
-and other modern language features like multi-line strings and template tags.
-When developing for an environment without support for these language features,
-substitute promises for `await` syntax as in the above example.
-{% endhint %}
diff --git a/Documentation/Books/Drivers/JS/README.md b/Documentation/Books/Drivers/JS/README.md
deleted file mode 100644
index bb12b7aa50be..000000000000
--- a/Documentation/Books/Drivers/JS/README.md
+++ /dev/null
@@ -1,16 +0,0 @@
-
-# ArangoDB JavaScript Driver
-
-The official ArangoDB low-level JavaScript client.
-
-**Note:** if you are looking for the ArangoDB JavaScript API in
-[Foxx](https://foxx.arangodb.com) (or the `arangosh` interactive shell) please
-refer to the documentation about the
-[`@arangodb` module](../..//Manual/Foxx/Reference/Modules/index.html#the-arangodb-module)
-instead; specifically the `db` object exported by the `@arangodb` module. The
-JavaScript driver is **only** meant to be used when accessing ArangoDB from
-**outside** the database.
-
-- [Getting Started](GettingStarted/README.md)
-- [Reference](Reference/README.md)
-- [Changelog](https://github.com/arangodb/arangojs/blob/master/CHANGELOG.md#readme)
diff --git a/Documentation/Books/Drivers/JS/Reference/Aql.md b/Documentation/Books/Drivers/JS/Reference/Aql.md
deleted file mode 100644
index 313c8c4ee088..000000000000
--- a/Documentation/Books/Drivers/JS/Reference/Aql.md
+++ /dev/null
@@ -1,151 +0,0 @@
-
-# AQL Helpers
-
-These helpers are available via the `aql` export from the arangojs module:
-
-```js
-import arangojs, { aql } from "arangojs";
-
-// or CommonJS:
-
-const arangojs = require("arangojs");
-const aql = arangojs.aql;
-```
-
-## aql
-
-`aql: AqlQuery`
-
-The `aql` function is a JavaScript template string handler (or template tag).
-It can be used to write complex AQL queries as multi-line strings without
-having to worry about bindVars and the distinction between collections
-and regular parameters.
-
-To use it just prefix a JavaScript template string (the ones with backticks
-instead of quotes) with its import name (e.g. `aql`) and pass in variables
-like you would with a regular template string. The string will automatically
-be converted into an object with `query` and `bindVars` attributes which you
-can pass directly to `db.query` to execute. If you pass in a collection it
-will be automatically recognized as a collection reference
-and handled accordingly.
-
-The `aql` template tag can also be used inside other `aql` template strings,
-allowing arbitrary nesting. Bind parameters of nested queries will be merged
-automatically.
-
-**Examples**
-
-```js
-const filterValue = 23;
-const mydata = db.collection("mydata");
-const result = await db.query(aql`
- FOR d IN ${mydata}
- FILTER d.num > ${filterValue}
- RETURN d
-`);
-
-// nested queries
-
-const color = "green";
-const filterByColor = aql`FILTER d.color == ${color}'`;
-const result2 = await db.query(aql`
- FOR d IN ${mydata}
- ${filterByColor}
- RETURN d
-`);
-```
-
-## aql.literal
-
-`aql.literal(value): AqlLiteral`
-
-The `aql.literal` helper can be used to mark strings to be inlined into an AQL
-query when using the `aql` template tag, rather than being treated as a bind
-parameter.
-
-{% hint 'danger' %}
-Any value passed to `aql.literal` will be treated as part of the AQL query.
-To avoid becoming vulnerable to AQL injection attacks you should always prefer
-nested `aql` queries if possible.
-{% endhint %}
-
-**Arguments**
-
-- **value**: `string`
-
- An arbitrary string that will be treated as a literal AQL fragment when used
- in an `aql` template.
-
-**Examples**
-
-```js
-const filterGreen = aql.literal('FILTER d.color == "green"');
-const result = await db.query(aql`
- FOR d IN ${mydata}
- ${filterGreen}
- RETURN d
-`);
-```
-
-## aql.join
-
-`aql.join(values)`
-
-The `aql.join` helper takes an array of queries generated using the `aql` tag
-and combines them into a single query. The optional second argument will be
-used as literal string to combine the queries.
-
-**Arguments**
-
-- **values**: `Array`
-
- An array of arbitrary values, typically AQL query objects or AQL literals.
-
-- **sep**: `string` (Default: `" "`)
-
- String that will be used in between the values.
-
-**Examples**
-
-```js
-// Basic usage
-const parts = [aql`FILTER`, aql`x`, aql`%`, aql`2`];
-const joined = aql.join(parts); // aql`FILTER x % 2`
-
-// Merge without the extra space
-const parts = [aql`FIL`, aql`TER`];
-const joined = aql.join(parts, ""); // aql`FILTER`;
-
-// Real world example: translate keys into document lookups
-const users = db.collection("users");
-const keys = ["abc123", "def456"];
-const docs = keys.map(key => aql`DOCUMENT(${users}, ${key})`);
-const aqlArray = aql`[${aql.join(docs, ", ")}]`;
-const result = await db.query(aql`
- FOR d IN ${aqlArray}
- RETURN d
-`);
-// Query:
-// FOR d IN [DOCUMENT(@@value0, @value1), DOCUMENT(@@value0, @value2)]
-// RETURN d
-// Bind parameters:
-// @value0: "users"
-// value1: "abc123"
-// value2: "def456"
-
-// Alternative without `aql.join`
-const users = db.collection("users");
-const keys = ["abc123", "def456"];
-const result = await db.query(aql`
- FOR key IN ${keys}
- LET d = DOCUMENT(${users}, key)
- RETURN d
-`);
-// Query:
-// FOR key IN @value0
-// LET d = DOCUMENT(@@value1, key)
-// RETURN d
-// Bind parameters:
-// value0: ["abc123", "def456"]
-// @value1: "users"
-```
diff --git a/Documentation/Books/Drivers/JS/Reference/Collection/BulkImport.md b/Documentation/Books/Drivers/JS/Reference/Collection/BulkImport.md
deleted file mode 100644
index 460ad269fc46..000000000000
--- a/Documentation/Books/Drivers/JS/Reference/Collection/BulkImport.md
+++ /dev/null
@@ -1,152 +0,0 @@
-
-# Bulk importing documents
-
-This function implements the
-[HTTP API for bulk imports](../../../..//HTTP/BulkImports/index.html).
-
-## collection.import
-
-`async collection.import(data, [opts]): Object`
-
-Bulk imports the given _data_ into the collection.
-
-**Arguments**
-
-- **data**: `Array | Buffer | string`
-
- The data to import. Depending on the _type_ option this can be any of the
- following:
-
- For type `"documents"` or `"auto"`:
-
- - an array of documents, e.g.
-
- ```json
- [
- { "_key": "banana", "color": "yellow" },
- { "_key": "peach", "color": "pink" }
- ]
- ```
-
- - a string or buffer containing one JSON document per line, e.g.
-
- ```
- {"_key":"banana","color":"yellow"}
- {"_key":"peach","color":"pink"}
- ```
-
- For type `"array"` or `"auto"`:
-
- - a string or buffer containing a JSON array of documents, e.g.
-
- ```json
- [
- { "_key": "banana", "color": "yellow" },
- { "_key": "peach", "color": "pink" }
- ]
- ```
-
- For type `null`:
-
- - an array containing an array of keys followed by arrays of values, e.g.
-
- ```
- [
- ["_key", "color"],
- ["banana", "yellow"],
- ["peach", "pink"]
- ]
- ```
-
- - a string or buffer containing a JSON array of keys followed by
- one JSON array of values per line, e.g.
-
- ```
- ["_key", "color"]
- ["banana", "yellow"]
- ["peach", "pink"]
- ```
-
-- **opts**: `Object` (optional)
-
- If _opts_ is set, it must be an object with any of the following properties:
-
- - **type**: `string | null` (Default: `"auto"`)
-
- Indicates which format the data uses.
- Can be `"documents"`, `"array"` or `"auto"`.
- Use `null` to explicitly set no type.
-
- - **fromPrefix**: `string` (optional)
-
- Prefix to prepend to `_from` attributes.
-
- - **toPrefix**: `string` (optional)
-
- Prefix to prepend to `_to` attributes.
-
- - **overwrite**: `boolean` (Default: `false`)
-
- If set to `true`, the collection is truncated before the data is imported.
-
- - **waitForSync**: `boolean` (Default: `false`)
-
- Wait until the documents have been synced to disk.
-
- - **onDuplicate**: `string` (Default: `"error"`)
-
- Controls behavior when a unique constraint is violated.
- Can be `"error"`, `"update"`, `"replace"` or `"ignore"`.
-
- - **complete**: `boolean` (Default: `false`)
-
- If set to `true`, the import will abort if any error occurs.
-
- - **details**: `boolean` (Default: `false`)
-
- Whether the response should contain additional details about documents that
- could not be imported.
-
-For more information on the _opts_ object, see the
-[HTTP API documentation for bulk imports](../../../..//HTTP/BulkImports/index.html).
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.collection("users");
-
-const result = await collection.import(
- [
- { username: "jcd", password: "bionicman" },
- { username: "jreyes", password: "amigo" },
- { username: "ghermann", password: "zeitgeist" }
- ],
- { type: "documents" } // optional
-);
-
-// -- or --
-
-const buf = fs.readFileSync("dx_users.json");
-// [
-// {"username": "jcd", "password": "bionicman"},
-// {"username": "jreyes", "password": "amigo"},
-// {"username": "ghermann", "password": "zeitgeist"}
-// ]
-const result = await collection.import(
- buf,
- { type: "array" } // optional
-);
-
-// -- or --
-
-const result = await collection.import(
- [
- ["username", "password"],
- ["jcd", "bionicman"],
- ["jreyes", "amigo"],
- ["ghermann", "zeitgeist"]
- ],
- { type: null } // required
-);
-```
diff --git a/Documentation/Books/Drivers/JS/Reference/Collection/CollectionManipulation.md b/Documentation/Books/Drivers/JS/Reference/Collection/CollectionManipulation.md
deleted file mode 100644
index eb6d4aa1c159..000000000000
--- a/Documentation/Books/Drivers/JS/Reference/Collection/CollectionManipulation.md
+++ /dev/null
@@ -1,175 +0,0 @@
-
-# Manipulating the collection
-
-These functions implement the
-[HTTP API for modifying collections](../../../..//HTTP/Collection/Modifying.html).
-
-## collection.create
-
-`async collection.create([properties]): Object`
-
-Creates a collection with the given _properties_ for this collection's name,
-then returns the server response.
-
-**Arguments**
-
-- **properties**: `Object` (optional)
-
- For more information on the _properties_ object, see the
- [HTTP API documentation for creating collections](../../../..//HTTP/Collection/Creating.html).
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.collection('potatoes');
-await collection.create()
-// the document collection "potatoes" now exists
-
-// -- or --
-
-const collection = db.edgeCollection('friends');
-await collection.create({
- waitForSync: true // always sync document changes to disk
-});
-// the edge collection "friends" now exists
-```
-
-## collection.load
-
-`async collection.load([count]): Object`
-
-Tells the server to load the collection into memory.
-
-**Arguments**
-
-- **count**: `boolean` (Default: `true`)
-
- If set to `false`, the return value will not include the number of documents
- in the collection (which may speed up the process).
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.collection('some-collection');
-await collection.load(false)
-// the collection has now been loaded into memory
-```
-
-## collection.unload
-
-`async collection.unload(): Object`
-
-Tells the server to remove the collection from memory.
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.collection('some-collection');
-await collection.unload()
-// the collection has now been unloaded from memory
-```
-
-## collection.setProperties
-
-`async collection.setProperties(properties): Object`
-
-Replaces the properties of the collection.
-
-**Arguments**
-
-- **properties**: `Object`
-
- For information on the _properties_ argument see the
- [HTTP API for modifying collections](../../../..//HTTP/Collection/Modifying.html).
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.collection('some-collection');
-const result = await collection.setProperties({waitForSync: true})
-assert.equal(result.waitForSync, true);
-// the collection will now wait for data being written to disk
-// whenever a document is changed
-```
-
-## collection.rename
-
-`async collection.rename(name): Object`
-
-Renames the collection. The _Collection_ instance will automatically update its
-name when the rename succeeds.
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.collection('some-collection');
-const result = await collection.rename('new-collection-name')
-assert.equal(result.name, 'new-collection-name');
-assert.equal(collection.name, result.name);
-// result contains additional information about the collection
-```
-
-## collection.rotate
-
-`async collection.rotate(): Object`
-
-Rotates the journal of the collection.
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.collection('some-collection');
-const data = await collection.rotate();
-// data.result will be true if rotation succeeded
-```
-
-## collection.truncate
-
-`async collection.truncate(): Object`
-
-Deletes **all documents** in the collection in the database.
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.collection('some-collection');
-await collection.truncate();
-// the collection "some-collection" is now empty
-```
-
-## collection.drop
-
-`async collection.drop([properties]): Object`
-
-Deletes the collection from the database.
-
-**Arguments**
-
-- **properties**: `Object` (optional)
-
- An object with the following properties:
-
- - **isSystem**: `Boolean` (Default: `false`)
-
- Whether the collection should be dropped even if it is a system collection.
-
- This parameter must be set to `true` when dropping a system collection.
-
- For more information on the _properties_ object, see the
- [HTTP API documentation for dropping collections](../../../..//HTTP/Collection/Creating.html#drops-a-collection).
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.collection('some-collection');
-await collection.drop();
-// the collection "some-collection" no longer exists
-```
diff --git a/Documentation/Books/Drivers/JS/Reference/Collection/DocumentCollection.md b/Documentation/Books/Drivers/JS/Reference/Collection/DocumentCollection.md
deleted file mode 100644
index 2d690f5a7f6a..000000000000
--- a/Documentation/Books/Drivers/JS/Reference/Collection/DocumentCollection.md
+++ /dev/null
@@ -1,185 +0,0 @@
-
-# DocumentCollection API
-
-The _DocumentCollection API_ extends the
-[_Collection API_](README.md) with the following methods.
-
-## documentCollection.document
-
-`async documentCollection.document(documentHandle, [opts]): Document`
-
-Retrieves the document with the given _documentHandle_ from the collection.
-
-**Arguments**
-
-- **documentHandle**: `string`
-
- The handle of the document to retrieve. This can be either the `_id` or the
- `_key` of a document in the collection, or a document (i.e. an object with an
- `_id` or `_key` property).
-
-- **opts**: `Object` (optional)
-
- If _opts_ is set, it must be an object with any of the following properties:
-
- - **graceful**: `boolean` (Default: `false`)
-
- If set to `true`, the method will return `null` instead of throwing an
- error if the document does not exist.
-
- - **allowDirtyRead**: `boolean` (Default: `false`)
-
- {% hint 'info' %}
- This option is only available when targeting ArangoDB 3.4 or later,
- see [Compatibility](../../GettingStarted/README.md#compatibility).
- {% endhint %}
-
- If set to `true`, the request will explicitly permit ArangoDB to return a
- potentially dirty or stale result and arangojs will load balance the
- request without distinguishing between leaders and followers.
-
-If a boolean is passed instead of an options object, it will be interpreted as
-the _graceful_ option.
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.collection("my-docs");
-
-try {
- const doc = await collection.document("some-key");
- // the document exists
- assert.equal(doc._key, "some-key");
- assert.equal(doc._id, "my-docs/some-key");
-} catch (err) {
- // something went wrong or
- // the document does not exist
-}
-
-// -- or --
-
-try {
- const doc = await collection.document("my-docs/some-key");
- // the document exists
- assert.equal(doc._key, "some-key");
- assert.equal(doc._id, "my-docs/some-key");
-} catch (err) {
- // something went wrong or
- // the document does not exist
-}
-
-// -- or --
-
-const doc = await collection.document("some-key", true);
-if (doc === null) {
- // the document does not exist
-}
-```
-
-## documentCollection.documentExists
-
-`async documentCollection.documentExists(documentHandle): boolean`
-
-Checks whether the document with the given _documentHandle_ exists.
-
-**Arguments**
-
-- **documentHandle**: `string`
-
- The handle of the document to retrieve. This can be either the `_id` or the
- `_key` of a document in the collection, or a document (i.e. an object with an
- `_id` or `_key` property).
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.collection("my-docs");
-
-const exists = await collection.documentExists("some-key");
-if (exists === false) {
- // the document does not exist
-}
-```
-
-## documentCollection.save
-
-`async documentCollection.save(data, [opts]): Object`
-
-Creates a new document with the given _data_ and returns an object containing
-the document's metadata (`_id`, `_key` and `_rev` attributes).
-
-Multiple documents can be created in a single call by passing an array of
-objects as argument for _data_. The result will be an array too, of which
-some elements can be error objects if the documents couldn't be saved.
-
-**Arguments**
-
-- **data**: `Object | Object[]`
-
- The data of the new document, may include a `_key`.
-
-- **opts**: `Object` (optional)
-
- If _opts_ is set, it must be an object with any of the following properties:
-
- - **waitForSync**: `boolean` (Default: `false`)
-
- Wait until document has been synced to disk.
-
- - **returnNew**: `boolean` (Default: `false`)
-
- If set to `true`, return additionally the complete new document(s) under the
- attribute `new` in the result.
-
- - **returnOld**: `boolean` (Default: `false`)
-
- If set to `true`, return additionally the complete old document(s) under the
- attribute `old` in the result.
-
- - **silent**: `boolean` (Default: `false`)
-
- If set to true, an empty object will be returned as response. No meta-data
- will be returned for the created document. This option can be used to save
- some network traffic.
-
- - **overwrite**: `boolean` (Default: `false`)
-
- {% hint 'warning' %}
- This option is only available when targeting ArangoDB v3.4.0 and later.
- {% endhint %}
-
- If set to true, the insert becomes a replace-insert. If a document with the
- same \_key already exists the new document is not rejected with unique
- constraint violated but will replace the old document.
-
-If a boolean is passed instead of an options object, it will be interpreted as
-the _returnNew_ option.
-
-For more information on the _opts_ object, see the
-[HTTP API documentation for working with documents](../../../..//HTTP/Document/WorkingWithDocuments.html).
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.collection("my-docs");
-const data = { some: "data" };
-const info = await collection.save(data);
-assert.equal(info._id, "my-docs/" + info._key);
-const doc2 = await collection.document(info);
-assert.equal(doc2._id, info._id);
-assert.equal(doc2._rev, info._rev);
-assert.equal(doc2.some, data.some);
-
-// -- or --
-
-const db = new Database();
-const collection = db.collection("my-docs");
-const data = { some: "data" };
-const opts = { returnNew: true };
-const doc = await collection.save(data, opts);
-assert.equal(doc1._id, "my-docs/" + doc1._key);
-assert.equal(doc1.new.some, data.some);
-```
diff --git a/Documentation/Books/Drivers/JS/Reference/Collection/DocumentManipulation.md b/Documentation/Books/Drivers/JS/Reference/Collection/DocumentManipulation.md
deleted file mode 100644
index d12220792cbc..000000000000
--- a/Documentation/Books/Drivers/JS/Reference/Collection/DocumentManipulation.md
+++ /dev/null
@@ -1,318 +0,0 @@
-
-# Manipulating documents
-
-These functions implement the
-[HTTP API for manipulating documents](../../../..//HTTP/Document/index.html).
-
-## collection.replace
-
-`async collection.replace(documentHandle, newValue, [opts]): Object`
-
-Replaces the content of the document with the given _documentHandle_ with the
-given _newValue_ and returns an object containing the document's metadata.
-
-**Arguments**
-
-- **documentHandle**: `string`
-
- The handle of the document to replace. This can either be the `_id` or the
- `_key` of a document in the collection, or a document (i.e. an object with an
- `_id` or `_key` property).
-
-- **newValue**: `Object`
-
- The new data of the document.
-
-- **opts**: `Object` (optional)
-
- If _opts_ is set, it must be an object with any of the following properties:
-
- - **waitForSync**: `boolean` (Default: `false`)
-
- Wait until the document has been synced to disk. Default: `false`.
-
- - **rev**: `string` (optional)
-
- Only replace the document if it matches this revision.
-
- - **policy**: `string` (optional)
-
- {% hint 'warning' %}
- This option has no effect in ArangoDB 3.0 and later.
- {% endhint %}
-
- Determines the behavior when the revision is not matched:
-
- - if _policy_ is set to `"last"`, the document will be replaced regardless
- of the revision.
- - if _policy_ is set to `"error"` or not set, the replacement will fail with
- an error.
-
-If a string is passed instead of an options object, it will be interpreted as
-the _rev_ option.
-
-For more information on the _opts_ object, see the
-[HTTP API documentation for working with documents](../../../..//HTTP/Document/WorkingWithDocuments.html).
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.collection("some-collection");
-const data = { number: 1, hello: "world" };
-const info1 = await collection.save(data);
-const info2 = await collection.replace(info1, { number: 2 });
-assert.equal(info2._id, info1._id);
-assert.notEqual(info2._rev, info1._rev);
-const doc = await collection.document(info1);
-assert.equal(doc._id, info1._id);
-assert.equal(doc._rev, info2._rev);
-assert.equal(doc.number, 2);
-assert.equal(doc.hello, undefined);
-```
-
-## collection.update
-
-`async collection.update(documentHandle, newValue, [opts]): Object`
-
-Updates (merges) the content of the document with the given _documentHandle_
-with the given _newValue_ and returns an object containing the document's
-metadata.
-
-**Arguments**
-
-- **documentHandle**: `string`
-
- Handle of the document to update. This can be either the `_id` or the `_key`
- of a document in the collection, or a document (i.e. an object with an `_id`
- or `_key` property).
-
-- **newValue**: `Object`
-
- The new data of the document.
-
-- **opts**: `Object` (optional)
-
- If _opts_ is set, it must be an object with any of the following properties:
-
- - **waitForSync**: `boolean` (Default: `false`)
-
- Wait until document has been synced to disk.
-
- - **keepNull**: `boolean` (Default: `true`)
-
- If set to `false`, properties with a value of `null` indicate that a
- property should be deleted.
-
- - **mergeObjects**: `boolean` (Default: `true`)
-
- If set to `false`, object properties that already exist in the old document
- will be overwritten rather than merged. This does not affect arrays.
-
- - **returnOld**: `boolean` (Default: `false`)
-
- If set to `true`, return additionally the complete previous revision of the
- changed documents under the attribute `old` in the result.
-
- - **returnNew**: `boolean` (Default: `false`)
-
- If set to `true`, return additionally the complete new documents under the
- attribute `new` in the result.
-
- - **ignoreRevs**: `boolean` (Default: `true`)
-
- By default, or if this is set to true, the `_rev` attributes in the given
- documents are ignored. If this is set to false, then any `_rev` attribute
- given in a body document is taken as a precondition. The document is only
- updated if the current revision is the one specified.
-
- - **rev**: `string` (optional)
-
- Only update the document if it matches this revision.
-
- - **policy**: `string` (optional)
-
- {% hint 'warning' %}
- This option has no effect in ArangoDB 3.0 and later.
- {% endhint %}
-
- Determines the behavior when the revision is not matched:
-
- - if _policy_ is set to `"last"`, the document will be replaced regardless
- of the revision.
- - if _policy_ is set to `"error"` or not set, the replacement will fail with
- an error.
-
-If a string is passed instead of an options object, it will be interpreted as
-the _rev_ option.
-
-For more information on the _opts_ object, see the
-[HTTP API documentation for working with documents](../../../..//HTTP/Document/WorkingWithDocuments.html).
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.collection("some-collection");
-const doc = { number: 1, hello: "world" };
-const doc1 = await collection.save(doc);
-const doc2 = await collection.update(doc1, { number: 2 });
-assert.equal(doc2._id, doc1._id);
-assert.notEqual(doc2._rev, doc1._rev);
-const doc3 = await collection.document(doc2);
-assert.equal(doc3._id, doc2._id);
-assert.equal(doc3._rev, doc2._rev);
-assert.equal(doc3.number, 2);
-assert.equal(doc3.hello, doc.hello);
-```
-
-## collection.bulkUpdate
-
-`async collection.bulkUpdate(documents, [opts]): Object`
-
-Updates (merges) the content of the documents with the given _documents_ and
-returns an array containing the documents' metadata.
-
-{% hint 'info' %}
-This method is only available when targeting ArangoDB 3.0 or later,
-see [Compatibility](../../GettingStarted/README.md#compatibility).
-{% endhint %}
-
-**Arguments**
-
-- **documents**: `Array`
-
- Documents to update. Each object must have either the `_id` or the `_key`
- property.
-
-- **opts**: `Object` (optional)
-
- If _opts_ is set, it must be an object with any of the following properties:
-
- - **waitForSync**: `boolean` (Default: `false`)
-
- Wait until document has been synced to disk.
-
- - **keepNull**: `boolean` (Default: `true`)
-
- If set to `false`, properties with a value of `null` indicate that a
- property should be deleted.
-
- - **mergeObjects**: `boolean` (Default: `true`)
-
- If set to `false`, object properties that already exist in the old document
- will be overwritten rather than merged. This does not affect arrays.
-
- - **returnOld**: `boolean` (Default: `false`)
-
- If set to `true`, return additionally the complete previous revision of the
- changed documents under the attribute `old` in the result.
-
- - **returnNew**: `boolean` (Default: `false`)
-
- If set to `true`, return additionally the complete new documents under the
- attribute `new` in the result.
-
- - **ignoreRevs**: `boolean` (Default: `true`)
-
- By default, or if this is set to true, the `_rev` attributes in the given
- documents are ignored. If this is set to false, then any `_rev` attribute
- given in a body document is taken as a precondition. The document is only
- updated if the current revision is the one specified.
-
-For more information on the _opts_ object, see the
-[HTTP API documentation for working with documents](../../../..//HTTP/Document/WorkingWithDocuments.html).
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.collection("some-collection");
-const doc1 = { number: 1, hello: "world1" };
-const info1 = await collection.save(doc1);
-const doc2 = { number: 2, hello: "world2" };
-const info2 = await collection.save(doc2);
-const result = await collection.bulkUpdate(
- [{ _key: info1._key, number: 3 }, { _key: info2._key, number: 4 }],
- { returnNew: true }
-);
-```
-
-## collection.remove
-
-`async collection.remove(documentHandle, [opts]): Object`
-
-Deletes the document with the given _documentHandle_ from the collection.
-
-**Arguments**
-
-- **documentHandle**: `string`
-
- The handle of the document to delete. This can be either the `_id` or the
- `_key` of a document in the collection, or a document (i.e. an object with an
- `_id` or `_key` property).
-
-- **opts**: `Object` (optional)
-
- If _opts_ is set, it must be an object with any of the following properties:
-
- - **waitForSync**: `boolean` (Default: `false`)
-
- Wait until document has been synced to disk.
-
- - **rev**: `string` (optional)
-
- Only update the document if it matches this revision.
-
- - **policy**: `string` (optional)
-
- {% hint 'warning' %}
- This option has no effect in ArangoDB 3.0 and later.
- {% endhint %}
-
- Determines the behavior when the revision is not matched:
-
- - if _policy_ is set to `"last"`, the document will be replaced regardless
- of the revision.
- - if _policy_ is set to `"error"` or not set, the replacement will fail with
- an error.
-
-If a string is passed instead of an options object, it will be interpreted as
-the _rev_ option.
-
-For more information on the _opts_ object, see the
-[HTTP API documentation for working with documents](../../../..//HTTP/Document/WorkingWithDocuments.html).
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.collection("some-collection");
-
-await collection.remove("some-doc");
-// document 'some-collection/some-doc' no longer exists
-
-// -- or --
-
-await collection.remove("some-collection/some-doc");
-// document 'some-collection/some-doc' no longer exists
-```
-
-## collection.list
-
-`async collection.list([type]): Array`
-
-Retrieves a list of references for all documents in the collection.
-
-**Arguments**
-
-- **type**: `string` (Default: `"id"`)
-
- The format of the document references:
-
- - if _type_ is set to `"id"`, each reference will be the `_id` of the
- document.
- - if _type_ is set to `"key"`, each reference will be the `_key` of the
- document.
- - if _type_ is set to `"path"`, each reference will be the URI path of the
- document.
diff --git a/Documentation/Books/Drivers/JS/Reference/Collection/EdgeCollection.md b/Documentation/Books/Drivers/JS/Reference/Collection/EdgeCollection.md
deleted file mode 100644
index 26dc648ca5cd..000000000000
--- a/Documentation/Books/Drivers/JS/Reference/Collection/EdgeCollection.md
+++ /dev/null
@@ -1,323 +0,0 @@
-
-# EdgeCollection API
-
-The _EdgeCollection API_ extends the
-[_Collection API_](README.md) with the following methods.
-
-## edgeCollection.document
-
-`async edgeCollection.document(documentHandle, [opts]): Edge`
-
-Alias: `edgeCollection.edge`.
-
-Retrieves the edge with the given _documentHandle_ from the collection.
-
-**Arguments**
-
-- **documentHandle**: `string`
-
- The handle of the edge to retrieve. This can be either the `_id` or the `_key`
- of an edge in the collection, or an edge (i.e. an object with an `_id` or
- `_key` property).
-
-- **opts**: `Object` (optional)
-
- If _opts_ is set, it must be an object with any of the following properties:
-
- - **graceful**: `boolean` (Default: `false`)
-
- If set to `true`, the method will return `null` instead of throwing an
- error if the edge does not exist.
-
- - **allowDirtyRead**: `boolean` (Default: `false`)
-
- {% hint 'info' %}
- This option is only available when targeting ArangoDB 3.4 or later,
- see [Compatibility](../../GettingStarted/README.md#compatibility).
- {% endhint %}
-
- If set to `true`, the request will explicitly permit ArangoDB to return a
- potentially dirty or stale result and arangojs will load balance the
- request without distinguishing between leaders and followers.
-
-If a boolean is passed instead of an options object, it will be interpreted as
-the _graceful_ option.
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.edgeCollection("edges");
-
-const edge = await collection.document("some-key");
-// the edge exists
-assert.equal(edge._key, "some-key");
-assert.equal(edge._id, "edges/some-key");
-
-// -- or --
-
-const edge = await collection.document("edges/some-key");
-// the edge exists
-assert.equal(edge._key, "some-key");
-assert.equal(edge._id, "edges/some-key");
-
-// -- or --
-
-const edge = await collection.document("some-key", true);
-if (edge === null) {
- // the edge does not exist
-}
-```
-
-## edgeCollection.documentExists
-
-`async edgeCollection.documentExists(documentHandle): boolean`
-
-Checks whether the edge with the given _documentHandle_ exists.
-
-**Arguments**
-
-- **documentHandle**: `string`
-
- The handle of the edge to retrieve. This can be either the `_id` or the
- `_key` of a edge in the collection, or an edge (i.e. an object with an
- `_id` or `_key` property).
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.edgeCollection("my-docs");
-
-const exists = await collection.documentExists("some-key");
-if (exists === false) {
- // the edge does not exist
-}
-```
-
-## edgeCollection.save
-
-`async edgeCollection.save(data, [fromId, toId], [opts]): Object`
-
-Creates a new edge between the documents _fromId_ and _toId_ with the given
-_data_ and returns an object containing the edge's metadata.
-
-**Arguments**
-
-- **data**: `Object`
-
- The data of the new edge. If _fromId_ and _toId_ are not specified, the _data_
- needs to contain the properties `_from` and `_to`.
-
-- **fromId**: `string` (optional)
-
- The handle of the start vertex of this edge. This can be either the `_id` of a
- document in the database, the `_key` of an edge in the collection, or a
- document (i.e. an object with an `_id` or `_key` property).
-
-- **toId**: `string` (optional)
-
- The handle of the end vertex of this edge. This can be either the `_id` of a
- document in the database, the `_key` of an edge in the collection, or a
- document (i.e. an object with an `_id` or `_key` property).
-
-- **opts**: `Object` (optional)
-
- If _opts_ is set, it must be an object with any of the following properties:
-
- - **waitForSync**: `boolean` (Default: `false`)
-
- Wait until document has been synced to disk.
-
- - **returnNew**: `boolean` (Default: `false`)
-
- If set to `true`, return additionally the complete new documents under the
- attribute `new` in the result.
-
- - **returnOld**: `boolean` (Default: `false`)
-
- If set to `true`, return additionally the complete old documents under the
- attribute `old` in the result.
-
- - **silent**: `boolean` (Default: `false`)
-
- If set to true, an empty object will be returned as response. No meta-data
- will be returned for the created document. This option can be used to save
- some network traffic.
-
- - **overwrite**: `boolean` (Default: `false`)
-
- If set to true, the insert becomes a replace-insert. If a document with the
- same \_key already exists the new document is not rejected with unique
- constraint violated but will replace the old document.
-
-If a boolean is passed instead of an options object, it will be interpreted as
-the _returnNew_ option.
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.edgeCollection("edges");
-const data = { some: "data" };
-
-const info = await collection.save(
- data,
- "vertices/start-vertex",
- "vertices/end-vertex"
-);
-assert.equal(info._id, "edges/" + info._key);
-const edge = await collection.edge(edge);
-assert.equal(edge._key, info._key);
-assert.equal(edge._rev, info._rev);
-assert.equal(edge.some, data.some);
-assert.equal(edge._from, "vertices/start-vertex");
-assert.equal(edge._to, "vertices/end-vertex");
-
-// -- or --
-
-const info = await collection.save({
- some: "data",
- _from: "verticies/start-vertex",
- _to: "vertices/end-vertex"
-});
-// ...
-```
-
-## edgeCollection.edges
-
-`async edgeCollection.edges(documentHandle): Array`
-
-Retrieves a list of all edges of the document with the given _documentHandle_.
-
-**Arguments**
-
-- **documentHandle**: `string`
-
- The handle of the document to retrieve the edges of. This can be either the
- `_id` of a document in the database, the `_key` of an edge in the collection,
- or a document (i.e. an object with an `_id` or `_key` property).
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.edgeCollection("edges");
-await collection.import([
- ["_key", "_from", "_to"],
- ["x", "vertices/a", "vertices/b"],
- ["y", "vertices/a", "vertices/c"],
- ["z", "vertices/d", "vertices/a"]
-]);
-const edges = await collection.edges("vertices/a");
-assert.equal(edges.length, 3);
-assert.deepEqual(edges.map(edge => edge._key), ["x", "y", "z"]);
-```
-
-## edgeCollection.inEdges
-
-`async edgeCollection.inEdges(documentHandle): Array`
-
-Retrieves a list of all incoming edges of the document with the given
-_documentHandle_.
-
-**Arguments**
-
-- **documentHandle**: `string`
-
- The handle of the document to retrieve the edges of. This can be either the
- `_id` of a document in the database, the `_key` of an edge in the collection,
- or a document (i.e. an object with an `_id` or `_key` property).
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.edgeCollection("edges");
-await collection.import([
- ["_key", "_from", "_to"],
- ["x", "vertices/a", "vertices/b"],
- ["y", "vertices/a", "vertices/c"],
- ["z", "vertices/d", "vertices/a"]
-]);
-const edges = await collection.inEdges("vertices/a");
-assert.equal(edges.length, 1);
-assert.equal(edges[0]._key, "z");
-```
-
-## edgeCollection.outEdges
-
-`async edgeCollection.outEdges(documentHandle): Array`
-
-Retrieves a list of all outgoing edges of the document with the given
-_documentHandle_.
-
-**Arguments**
-
-- **documentHandle**: `string`
-
- The handle of the document to retrieve the edges of. This can be either the
- `_id` of a document in the database, the `_key` of an edge in the collection,
- or a document (i.e. an object with an `_id` or `_key` property).
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.edgeCollection("edges");
-await collection.import([
- ["_key", "_from", "_to"],
- ["x", "vertices/a", "vertices/b"],
- ["y", "vertices/a", "vertices/c"],
- ["z", "vertices/d", "vertices/a"]
-]);
-const edges = await collection.outEdges("vertices/a");
-assert.equal(edges.length, 2);
-assert.deepEqual(edges.map(edge => edge._key), ["x", "y"]);
-```
-
-## edgeCollection.traversal
-
-`async edgeCollection.traversal(startVertex, opts): Object`
-
-Performs a traversal starting from the given _startVertex_ and following edges
-contained in this edge collection.
-
-**Arguments**
-
-- **startVertex**: `string`
-
- The handle of the start vertex. This can be either the `_id` of a document in
- the database, the `_key` of an edge in the collection, or a document (i.e. an
- object with an `_id` or `_key` property).
-
-- **opts**: `Object`
-
- See the
- [HTTP API documentation](../../../..//HTTP/Traversal/index.html)
- for details on the additional arguments.
-
- Please note that while _opts.filter_, _opts.visitor_, _opts.init_,
- _opts.expander_ and _opts.sort_ should be strings evaluating to well-formed
- JavaScript code, it's not possible to pass in JavaScript functions directly
- because the code needs to be evaluated on the server and will be transmitted
- in plain text.
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.edgeCollection("edges");
-await collection.import([
- ["_key", "_from", "_to"],
- ["x", "vertices/a", "vertices/b"],
- ["y", "vertices/b", "vertices/c"],
- ["z", "vertices/c", "vertices/d"]
-]);
-const result = await collection.traversal("vertices/a", {
- direction: "outbound",
- visitor: "result.vertices.push(vertex._key);",
- init: "result.vertices = [];"
-});
-assert.deepEqual(result.vertices, ["a", "b", "c", "d"]);
-```
diff --git a/Documentation/Books/Drivers/JS/Reference/Collection/Indexes.md b/Documentation/Books/Drivers/JS/Reference/Collection/Indexes.md
deleted file mode 100644
index a7f0c52bb1ba..000000000000
--- a/Documentation/Books/Drivers/JS/Reference/Collection/Indexes.md
+++ /dev/null
@@ -1,347 +0,0 @@
-
-# Manipulating indexes
-
-These functions implement the
-[HTTP API for manipulating indexes](../../../..//HTTP/Indexes/index.html).
-
-## collection.createIndex
-
-`async collection.createIndex(details): Object`
-
-Creates an arbitrary index on the collection.
-
-**Arguments**
-
-- **details**: `Object`
-
- For information on the possible properties of the _details_ object, see the
- [HTTP API for manipulating indexes](../../../..//HTTP/Indexes/WorkingWith.html).
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.collection("some-collection");
-const index = await collection.createIndex({
- type: "hash",
- fields: ["a", "a.b"]
-});
-// the index has been created with the handle `index.id`
-```
-
-## collection.createHashIndex
-
-`async collection.createHashIndex(fields, [opts]): Object`
-
-Creates a hash index on the collection.
-
-**Arguments**
-
-- **fields**: `Array`
-
- An array of names of document fields on which to create the index. If the
- value is a string, it will be wrapped in an array automatically.
-
-- **opts**: `Object` (optional)
-
- Additional options for this index. If the value is a boolean, it will be
- interpreted as _opts.unique_.
-
-For more information on hash indexes, see the
-[HTTP API for hash indexes](../../../..//HTTP/Indexes/Hash.html).
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.collection("some-collection");
-
-const index = await collection.createHashIndex("favorite-color");
-// the index has been created with the handle `index.id`
-assert.deepEqual(index.fields, ["favorite-color"]);
-
-// -- or --
-
-const index = await collection.createHashIndex(["favorite-color"]);
-// the index has been created with the handle `index.id`
-assert.deepEqual(index.fields, ["favorite-color"]);
-```
-
-## collection.createSkipList
-
-`async collection.createSkipList(fields, [opts]): Object`
-
-Creates a skiplist index on the collection.
-
-**Arguments**
-
-- **fields**: `Array`
-
- An array of names of document fields on which to create the index. If the
- value is a string, it will be wrapped in an array automatically.
-
-- **opts**: `Object` (optional)
-
- Additional options for this index. If the value is a boolean, it will be
- interpreted as _opts.unique_.
-
-For more information on skiplist indexes, see the
-[HTTP API for skiplist indexes](../../../..//HTTP/Indexes/Skiplist.html).
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.collection("some-collection");
-
-const index = await collection.createSkipList("favorite-color");
-// the index has been created with the handle `index.id`
-assert.deepEqual(index.fields, ["favorite-color"]);
-
-// -- or --
-
-const index = await collection.createSkipList(["favorite-color"]);
-// the index has been created with the handle `index.id`
-assert.deepEqual(index.fields, ["favorite-color"]);
-```
-
-## collection.createGeoIndex
-
-`async collection.createGeoIndex(fields, [opts]): Object`
-
-Creates a geo-spatial index on the collection.
-
-**Arguments**
-
-- **fields**: `Array`
-
- An array of names of document fields on which to create the index. Currently,
- geo indexes must cover exactly one field. If the value is a string, it will be
- wrapped in an array automatically.
-
-- **opts**: `Object` (optional)
-
- An object containing additional properties of the index.
-
-For more information on the properties of the _opts_ object see the
-[HTTP API for manipulating geo indexes](../../../..//HTTP/Indexes/Geo.html).
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.collection("some-collection");
-
-const index = await collection.createGeoIndex(["latitude", "longitude"]);
-// the index has been created with the handle `index.id`
-assert.deepEqual(index.fields, ["longitude", "latitude"]);
-
-// -- or --
-
-const index = await collection.createGeoIndex("location", { geoJson: true });
-// the index has been created with the handle `index.id`
-assert.deepEqual(index.fields, ["location"]);
-```
-
-## collection.createFulltextIndex
-
-`async collection.createFulltextIndex(fields, [minLength]): Object`
-
-Creates a fulltext index on the collection.
-
-**Arguments**
-
-- **fields**: `Array`
-
- An array of names of document fields on which to create the index. Currently,
- fulltext indexes must cover exactly one field. If the value is a string, it
- will be wrapped in an array automatically.
-
-- **minLength** (optional):
-
- Minimum character length of words to index. Uses a server-specific default
- value if not specified.
-
-For more information on fulltext indexes, see
-[the HTTP API for fulltext indexes](../../../..//HTTP/Indexes/Fulltext.html).
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.collection("some-collection");
-
-const index = await collection.createFulltextIndex("description");
-// the index has been created with the handle `index.id`
-assert.deepEqual(index.fields, ["description"]);
-
-// -- or --
-
-const index = await collection.createFulltextIndex(["description"]);
-// the index has been created with the handle `index.id`
-assert.deepEqual(index.fields, ["description"]);
-```
-
-## collection.createPersistentIndex
-
-`async collection.createPersistentIndex(fields, [opts]): Object`
-
-Creates a Persistent index on the collection. Persistent indexes are similarly
-in operation to skiplist indexes, only that these indexes are in disk as opposed
-to in memory. This reduces memory usage and DB startup time, with the trade-off
-being that it will always be orders of magnitude slower than in-memory indexes.
-
-**Arguments**
-
-- **fields**: `Array`
-
- An array of names of document fields on which to create the index.
-
-- **opts**: `Object` (optional)
-
- An object containing additional properties of the index.
-
-For more information on the properties of the _opts_ object see
-[the HTTP API for manipulating Persistent indexes](../../../..//HTTP/Indexes/Persistent.html).
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.collection("some-collection");
-
-const index = await collection.createPersistentIndex(["name", "email"]);
-// the index has been created with the handle `index.id`
-assert.deepEqual(index.fields, ["name", "email"]);
-```
-
-## collection.index
-
-`async collection.index(indexHandle): Object`
-
-Fetches information about the index with the given _indexHandle_ and returns it.
-
-**Arguments**
-
-- **indexHandle**: `string`
-
- The handle of the index to look up. This can either be a fully-qualified
- identifier or the collection-specific key of the index. If the value is an
- object, its _id_ property will be used instead. Alternatively, the index
- may be looked up by name.
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.collection("some-collection");
-const index = await collection.createFulltextIndex("description");
-const result = await collection.index(index.id);
-assert.equal(result.id, index.id);
-// result contains the properties of the index
-
-// -- or --
-
-const result = await collection.index(index.id.split("/")[1]);
-assert.equal(result.id, index.id);
-
-// -- or --
-
-const result = await collection.index(index.name);
-assert.equal(result.id, index.id);
-assert.equal(result.name, index.name);
-// result contains the properties of the index
-```
-
-## collection.indexes
-
-`async collection.indexes(): Array`
-
-Fetches a list of all indexes on this collection.
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.collection("some-collection");
-await collection.createFulltextIndex("description");
-const indexes = await collection.indexes();
-assert.equal(indexes.length, 1);
-// indexes contains information about the index
-```
-
-## collection.dropIndex
-
-`async collection.dropIndex(indexHandle): Object`
-
-Deletes the index with the given _indexHandle_ from the collection.
-
-**Arguments**
-
-- **indexHandle**: `string`
-
- The handle of the index to delete. This can either be a fully-qualified
- identifier or the collection-specific key of the index. If the value is an
- object, its _id_ property will be used instead.
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.collection("some-collection");
-const index = await collection.createFulltextIndex("description");
-await collection.dropIndex(index.id);
-// the index has been removed from the collection
-
-// -- or --
-
-await collection.dropIndex(index.id.split("/")[1]);
-// the index has been removed from the collection
-```
-
-## collection.createCapConstraint
-
-`async collection.createCapConstraint(size): Object`
-
-Creates a cap constraint index on the collection.
-
-{% hint 'warning' %}
-This method is not available when targeting ArangoDB 3.0 or later,
-see [Compatibility](../../GettingStarted/README.md#compatibility).
-{% endhint %}
-
-**Arguments**
-
-- **size**: `Object`
-
- An object with any of the following properties:
-
- - **size**: `number` (optional)
-
- The maximum number of documents in the collection.
-
- - **byteSize**: `number` (optional)
-
- The maximum size of active document data in the collection (in bytes).
-
-If _size_ is a number, it will be interpreted as _size.size_.
-
-For more information on the properties of the _size_ object see the
-[HTTP API for creating cap constraints](https://docs.arangodb.com/2.8/HttpIndexes/Cap.html).
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.collection("some-collection");
-
-const index = await collection.createCapConstraint(20);
-// the index has been created with the handle `index.id`
-assert.equal(index.size, 20);
-
-// -- or --
-
-const index = await collection.createCapConstraint({ size: 20 });
-// the index has been created with the handle `index.id`
-assert.equal(index.size, 20);
-```
diff --git a/Documentation/Books/Drivers/JS/Reference/Collection/README.md b/Documentation/Books/Drivers/JS/Reference/Collection/README.md
deleted file mode 100644
index fd0fd49c7e48..000000000000
--- a/Documentation/Books/Drivers/JS/Reference/Collection/README.md
+++ /dev/null
@@ -1,130 +0,0 @@
-
-# Collection API
-
-These functions implement the
-[HTTP API for manipulating collections](../../../..//HTTP/Collection/index.html).
-
-The _Collection API_ is implemented by all _Collection_ instances, regardless of
-their specific type. I.e. it represents a shared subset between instances of
-[_DocumentCollection_](DocumentCollection.md),
-[_EdgeCollection_](EdgeCollection.md),
-[_GraphVertexCollection_](../Graph/VertexCollection.md) and
-[_GraphEdgeCollection_](../Graph/EdgeCollection.md).
-
-## Getting information about the collection
-
-See the
-[HTTP API documentation](../../../..//HTTP/Collection/Getting.html)
-for details.
-
-## collection.exists
-
-`async collection.exists(): boolean`
-
-Checks whether the collection exists.
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.collection('some-collection');
-const result = await collection.exists();
-// result indicates whether the collection exists
-```
-
-### collection.get
-
-`async collection.get(): Object`
-
-Retrieves general information about the collection.
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.collection('some-collection');
-const data = await collection.get();
-// data contains general information about the collection
-```
-
-### collection.properties
-
-`async collection.properties(): Object`
-
-Retrieves the collection's properties.
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.collection('some-collection');
-const data = await collection.properties();
-// data contains the collection's properties
-```
-
-### collection.count
-
-`async collection.count(): Object`
-
-Retrieves information about the number of documents in a collection.
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.collection('some-collection');
-const data = await collection.count();
-// data contains the collection's count
-```
-
-### collection.figures
-
-`async collection.figures(): Object`
-
-Retrieves statistics for a collection.
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.collection('some-collection');
-const data = await collection.figures();
-// data contains the collection's figures
-```
-
-### collection.revision
-
-`async collection.revision(): Object`
-
-Retrieves the collection revision ID.
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.collection('some-collection');
-const data = await collection.revision();
-// data contains the collection's revision
-```
-
-### collection.checksum
-
-`async collection.checksum([opts]): Object`
-
-Retrieves the collection checksum.
-
-**Arguments**
-
-- **opts**: `Object` (optional)
-
- For information on the possible options see the
- [HTTP API for getting collection information](../../../..//HTTP/Collection/Getting.html).
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.collection('some-collection');
-const data = await collection.checksum();
-// data contains the collection's checksum
-```
diff --git a/Documentation/Books/Drivers/JS/Reference/Collection/SimpleQueries.md b/Documentation/Books/Drivers/JS/Reference/Collection/SimpleQueries.md
deleted file mode 100644
index d86d7fa4e35f..000000000000
--- a/Documentation/Books/Drivers/JS/Reference/Collection/SimpleQueries.md
+++ /dev/null
@@ -1,209 +0,0 @@
-
-# Simple queries
-
-These functions implement the
-[HTTP API for simple queries](../../../..//HTTP/SimpleQuery/index.html).
-
-## collection.all
-
-`async collection.all([opts]): Cursor`
-
-Performs a query to fetch all documents in the collection. Returns a
-[new _Cursor_ instance](../Cursor.md) for the query results.
-
-**Arguments**
-
-- **opts**: `Object` (optional)
-
- For information on the possible options see the
- [HTTP API for returning all documents](../../../..//HTTP/SimpleQuery/index.html#return-all-documents).
-
-## collection.any
-
-`async collection.any(): Object`
-
-Fetches a document from the collection at random.
-
-## collection.first
-
-`async collection.first([opts]): Array`
-
-Performs a query to fetch the first documents in the collection. Returns an
-array of the matching documents.
-
-{% hint 'warning' %}
-This method is not available when targeting ArangoDB 3.0 or later,
-see [Compatibility](../../GettingStarted/README.md#compatibility).
-{% endhint %}
-
-**Arguments**
-
-- **opts**: `Object` (optional)
-
- For information on the possible options see the
- [HTTP API for returning the first document of a collection](https://docs.arangodb.com/2.8/HttpSimpleQuery/#first-document-of-a-collection).
-
- If _opts_ is a number it is treated as _opts.count_.
-
-## collection.last
-
-`async collection.last([opts]): Array`
-
-Performs a query to fetch the last documents in the collection. Returns an array
-of the matching documents.
-
-{% hint 'warning' %}
-This method is not available when targeting ArangoDB 3.0 or later,
-see [Compatibility](../../GettingStarted/README.md#compatibility).
-{% endhint %}
-
-**Arguments**
-
-- **opts**: `Object` (optional)
-
- For information on the possible options see the
- [HTTP API for returning the last document of a collection](https://docs.arangodb.com/2.8/HttpSimpleQuery/#last-document-of-a-collection).
-
- If _opts_ is a number it is treated as _opts.count_.
-
-## collection.byExample
-
-`async collection.byExample(example, [opts]): Cursor`
-
-Performs a query to fetch all documents in the collection matching the given
-_example_. Returns a [new _Cursor_ instance](../Cursor.md) for the query results.
-
-**Arguments**
-
-- **example**: _Object_
-
- An object representing an example for documents to be matched against.
-
-- **opts**: _Object_ (optional)
-
- For information on the possible options see the
- [HTTP API for fetching documents by example](../../../..//HTTP/SimpleQuery/index.html#find-documents-matching-an-example).
-
-## collection.firstExample
-
-`async collection.firstExample(example): Object`
-
-Fetches the first document in the collection matching the given _example_.
-
-**Arguments**
-
-- **example**: _Object_
-
- An object representing an example for documents to be matched against.
-
-## collection.removeByExample
-
-`async collection.removeByExample(example, [opts]): Object`
-
-Removes all documents in the collection matching the given _example_.
-
-**Arguments**
-
-- **example**: _Object_
-
- An object representing an example for documents to be matched against.
-
-- **opts**: _Object_ (optional)
-
- For information on the possible options see the
- [HTTP API for removing documents by example](../../../..//HTTP/SimpleQuery/index.html#remove-documents-by-example).
-
-## collection.replaceByExample
-
-`async collection.replaceByExample(example, newValue, [opts]): Object`
-
-Replaces all documents in the collection matching the given _example_ with the
-given _newValue_.
-
-**Arguments**
-
-- **example**: _Object_
-
- An object representing an example for documents to be matched against.
-
-- **newValue**: _Object_
-
- The new value to replace matching documents with.
-
-- **opts**: _Object_ (optional)
-
- For information on the possible options see the
- [HTTP API for replacing documents by example](../../../..//HTTP/SimpleQuery/index.html#replace-documents-by-example).
-
-## collection.updateByExample
-
-`async collection.updateByExample(example, newValue, [opts]): Object`
-
-Updates (patches) all documents in the collection matching the given _example_
-with the given _newValue_.
-
-**Arguments**
-
-- **example**: _Object_
-
- An object representing an example for documents to be matched against.
-
-- **newValue**: _Object_
-
- The new value to update matching documents with.
-
-- **opts**: _Object_ (optional)
-
- For information on the possible options see the
- [HTTP API for updating documents by example](../../../..//HTTP/SimpleQuery/index.html#update-documents-by-example).
-
-## collection.lookupByKeys
-
-`async collection.lookupByKeys(keys): Array`
-
-Fetches the documents with the given _keys_ from the collection. Returns an
-array of the matching documents.
-
-**Arguments**
-
-- **keys**: _Array_
-
- An array of document keys to look up.
-
-## collection.removeByKeys
-
-`async collection.removeByKeys(keys, [opts]): Object`
-
-Deletes the documents with the given _keys_ from the collection.
-
-**Arguments**
-
-- **keys**: _Array_
-
- An array of document keys to delete.
-
-- **opts**: _Object_ (optional)
-
- For information on the possible options see the
- [HTTP API for removing documents by keys](../../../..//HTTP/SimpleQuery/index.html#remove-documents-by-their-keys).
-
-## collection.fulltext
-
-`async collection.fulltext(fieldName, query, [opts]): Cursor`
-
-Performs a fulltext query in the given _fieldName_ on the collection.
-
-**Arguments**
-
-- **fieldName**: _String_
-
- Name of the field to search on documents in the collection.
-
-- **query**: _String_
-
- Fulltext query string to search for.
-
-- **opts**: _Object_ (optional)
-
- For information on the possible options see the
- [HTTP API for fulltext queries](../../../..//HTTP/Indexes/Fulltext.html).
diff --git a/Documentation/Books/Drivers/JS/Reference/Cursor.md b/Documentation/Books/Drivers/JS/Reference/Cursor.md
deleted file mode 100644
index 4b22f5f89653..000000000000
--- a/Documentation/Books/Drivers/JS/Reference/Cursor.md
+++ /dev/null
@@ -1,300 +0,0 @@
-
-# Cursor API
-
-_Cursor_ instances provide an abstraction over the HTTP API's limitations.
-Unless a method explicitly exhausts the cursor, the driver will only fetch as
-many batches from the server as necessary. Like the server-side cursors,
-_Cursor_ instances are incrementally depleted as they are read from.
-
-```js
-const db = new Database();
-const cursor = await db.query('FOR x IN 1..5 RETURN x');
-// query result list: [1, 2, 3, 4, 5]
-const value = await cursor.next();
-assert.equal(value, 1);
-// remaining result list: [2, 3, 4, 5]
-```
-
-## cursor.count
-
-`cursor.count: number`
-
-The total number of documents in the query result. This is only available if the
-`count` option was used.
-
-## cursor.all
-
-`async cursor.all(): Array`
-
-Exhausts the cursor, then returns an array containing all values in the cursor's
-remaining result list.
-
-**Examples**
-
-```js
-const cursor = await db.query('FOR x IN 1..5 RETURN x');
-const result = await cursor.all()
-// result is an array containing the entire query result
-assert.deepEqual(result, [1, 2, 3, 4, 5]);
-assert.equal(cursor.hasNext(), false);
-```
-
-## cursor.next
-
-`async cursor.next(): Object`
-
-Advances the cursor and returns the next value in the cursor's remaining result
-list. If the cursor has already been exhausted, returns `undefined` instead.
-
-**Examples**
-
-```js
-// query result list: [1, 2, 3, 4, 5]
-const val = await cursor.next();
-assert.equal(val, 1);
-// remaining result list: [2, 3, 4, 5]
-
-const val2 = await cursor.next();
-assert.equal(val2, 2);
-// remaining result list: [3, 4, 5]
-```
-
-## cursor.hasNext
-
-`cursor.hasNext(): boolean`
-
-Returns `true` if the cursor has more values or `false` if the cursor has been
-exhausted.
-
-**Examples**
-
-```js
-await cursor.all(); // exhausts the cursor
-assert.equal(cursor.hasNext(), false);
-```
-
-## cursor.each
-
-`async cursor.each(fn): any`
-
-Advances the cursor by applying the function _fn_ to each value in the cursor's
-remaining result list until the cursor is exhausted or _fn_ explicitly returns
-`false`.
-
-Returns the last return value of _fn_.
-
-Equivalent to _Array.prototype.forEach_ (except async).
-
-**Arguments**
-
-* **fn**: `Function`
-
- A function that will be invoked for each value in the cursor's remaining
- result list until it explicitly returns `false` or the cursor is exhausted.
-
- The function receives the following arguments:
-
- * **value**: `any`
-
- The value in the cursor's remaining result list.
-
- * **index**: `number`
-
- The index of the value in the cursor's remaining result list.
-
- * **cursor**: `Cursor`
-
- The cursor itself.
-
-**Examples**
-
-```js
-const results = [];
-function doStuff(value) {
- const VALUE = value.toUpperCase();
- results.push(VALUE);
- return VALUE;
-}
-
-const cursor = await db.query('FOR x IN ["a", "b", "c"] RETURN x')
-const last = await cursor.each(doStuff);
-assert.deepEqual(results, ['A', 'B', 'C']);
-assert.equal(cursor.hasNext(), false);
-assert.equal(last, 'C');
-```
-
-## cursor.every
-
-`async cursor.every(fn): boolean`
-
-Advances the cursor by applying the function _fn_ to each value in the cursor's
-remaining result list until the cursor is exhausted or _fn_ returns a value that
-evaluates to `false`.
-
-Returns `false` if _fn_ returned a value that evaluates to `false`, or `true`
-otherwise.
-
-Equivalent to _Array.prototype.every_ (except async).
-
-**Arguments**
-
-* **fn**: `Function`
-
- A function that will be invoked for each value in the cursor's remaining
- result list until it returns a value that evaluates to `false` or the cursor
- is exhausted.
-
- The function receives the following arguments:
-
- * **value**: `any`
-
- The value in the cursor's remaining result list.
-
- * **index**: `number`
-
- The index of the value in the cursor's remaining result list.
-
- * **cursor**: `Cursor`
-
- The cursor itself.
-
-```js
-const even = value => value % 2 === 0;
-
-const cursor = await db.query('FOR x IN 2..5 RETURN x');
-const result = await cursor.every(even);
-assert.equal(result, false); // 3 is not even
-assert.equal(cursor.hasNext(), true);
-
-const value = await cursor.next();
-assert.equal(value, 4); // next value after 3
-```
-
-## cursor.some
-
-`async cursor.some(fn): boolean`
-
-Advances the cursor by applying the function _fn_ to each value in the cursor's
-remaining result list until the cursor is exhausted or _fn_ returns a value that
-evaluates to `true`.
-
-Returns `true` if _fn_ returned a value that evaluates to `true`, or `false`
-otherwise.
-
-Equivalent to _Array.prototype.some_ (except async).
-
-**Examples**
-
-```js
-const even = value => value % 2 === 0;
-
-const cursor = await db.query('FOR x IN 1..5 RETURN x');
-const result = await cursor.some(even);
-assert.equal(result, true); // 2 is even
-assert.equal(cursor.hasNext(), true);
-
-const value = await cursor.next();
-assert.equal(value, 3); // next value after 2
-```
-
-## cursor.map
-
-`cursor.map(fn): Array`
-
-Advances the cursor by applying the function _fn_ to each value in the cursor's
-remaining result list until the cursor is exhausted.
-
-Returns an array of the return values of _fn_.
-
-Equivalent to _Array.prototype.map_ (except async).
-
-**Note**: This creates an array of all return values. It is probably a bad idea
-to do this for very large query result sets.
-
-**Arguments**
-
-* **fn**: `Function`
-
- A function that will be invoked for each value in the cursor's remaining
- result list until the cursor is exhausted.
-
- The function receives the following arguments:
-
- * **value**: `any`
-
- The value in the cursor's remaining result list.
-
- * **index**: `number`
-
- The index of the value in the cursor's remaining result list.
-
- * **cursor**: `Cursor`
-
- The cursor itself.
-
-**Examples**
-
-```js
-const square = value => value * value;
-const cursor = await db.query('FOR x IN 1..5 RETURN x');
-const result = await cursor.map(square);
-assert.equal(result.length, 5);
-assert.deepEqual(result, [1, 4, 9, 16, 25]);
-assert.equal(cursor.hasNext(), false);
-```
-
-## cursor.reduce
-
-`cursor.reduce(fn, [accu]): any`
-
-Exhausts the cursor by reducing the values in the cursor's remaining result list
-with the given function _fn_. If _accu_ is not provided, the first value in the
-cursor's remaining result list will be used instead (the function will not be
-invoked for that value).
-
-Equivalent to _Array.prototype.reduce_ (except async).
-
-**Arguments**
-
-* **fn**: `Function`
-
- A function that will be invoked for each value in the cursor's remaining
- result list until the cursor is exhausted.
-
- The function receives the following arguments:
-
- * **accu**: `any`
-
- The return value of the previous call to _fn_. If this is the first call,
- _accu_ will be set to the _accu_ value passed to _reduce_ or the first value
- in the cursor's remaining result list.
-
- * **value**: `any`
-
- The value in the cursor's remaining result list.
-
- * **index**: `number`
-
- The index of the value in the cursor's remaining result list.
-
- * **cursor**: `Cursor`
-
- The cursor itself.
-
-**Examples**
-
-```js
-const add = (a, b) => a + b;
-const baseline = 1000;
-
-const cursor = await db.query('FOR x IN 1..5 RETURN x');
-const result = await cursor.reduce(add, baseline)
-assert.equal(result, baseline + 1 + 2 + 3 + 4 + 5);
-assert.equal(cursor.hasNext(), false);
-
-// -- or --
-
-const result = await cursor.reduce(add);
-assert.equal(result, 1 + 2 + 3 + 4 + 5);
-assert.equal(cursor.hasNext(), false);
-```
diff --git a/Documentation/Books/Drivers/JS/Reference/Database/AqlUserFunctions.md b/Documentation/Books/Drivers/JS/Reference/Database/AqlUserFunctions.md
deleted file mode 100644
index 6a510310ac57..000000000000
--- a/Documentation/Books/Drivers/JS/Reference/Database/AqlUserFunctions.md
+++ /dev/null
@@ -1,83 +0,0 @@
-
-# Managing AQL user functions
-
-These functions implement the
-[HTTP API for managing AQL user functions](../../../..//HTTP/AqlUserFunctions/index.html).
-
-## database.listFunctions
-
-`async database.listFunctions(): Array`
-
-Fetches a list of all AQL user functions registered with the database.
-
-**Examples**
-
-```js
-const db = new Database();
-const functions = db.listFunctions();
-// functions is a list of function descriptions
-```
-
-## database.createFunction
-
-`async database.createFunction(name, code): Object`
-
-Creates an AQL user function with the given _name_ and _code_ if it does not
-already exist or replaces it if a function with the same name already existed.
-
-**Arguments**
-
-* **name**: `string`
-
- A valid AQL function name, e.g.: `"myfuncs::accounting::calculate_vat"`.
-
-* **code**: `string`
-
- A string evaluating to a JavaScript function (not a JavaScript function
- object).
-
-**Examples**
-
-```js
-const db = new Database();
-await db.createFunction(
- 'ACME::ACCOUNTING::CALCULATE_VAT',
- String(function (price) {
- return price * 0.19;
- })
-);
-// Use the new function in an AQL query with template handler:
-const cursor = await db.query(aql`
- FOR product IN products
- RETURN MERGE(
- {vat: ACME::ACCOUNTING::CALCULATE_VAT(product.price)},
- product
- )
-`);
-// cursor is a cursor for the query result
-```
-
-## database.dropFunction
-
-`async database.dropFunction(name, [group]): Object`
-
-Deletes the AQL user function with the given name from the database.
-
-**Arguments**
-
-* **name**: `string`
-
- The name of the user function to drop.
-
-* **group**: `boolean` (Default: `false`)
-
- If set to `true`, all functions with a name starting with _name_ will be
- deleted; otherwise only the function with the exact name will be deleted.
-
-**Examples**
-
-```js
-const db = new Database();
-await db.dropFunction('ACME::ACCOUNTING::CALCULATE_VAT');
-// the function no longer exists
-```
diff --git a/Documentation/Books/Drivers/JS/Reference/Database/CollectionAccess.md b/Documentation/Books/Drivers/JS/Reference/Database/CollectionAccess.md
deleted file mode 100644
index 603695848164..000000000000
--- a/Documentation/Books/Drivers/JS/Reference/Database/CollectionAccess.md
+++ /dev/null
@@ -1,103 +0,0 @@
-
-# Accessing collections
-
-These functions implement the
-[HTTP API for accessing collections](../../../..//HTTP/Collection/Getting.html).
-
-## database.collection
-
-`database.collection(collectionName): DocumentCollection`
-
-Returns a _DocumentCollection_ instance for the given collection name.
-
-**Arguments**
-
-- **collectionName**: `string`
-
- Name of the edge collection.
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.collection("potatoes");
-```
-
-## database.edgeCollection
-
-`database.edgeCollection(collectionName): EdgeCollection`
-
-Returns an _EdgeCollection_ instance for the given collection name.
-
-**Arguments**
-
-- **collectionName**: `string`
-
- Name of the edge collection.
-
-**Examples**
-
-```js
-const db = new Database();
-const collection = db.edgeCollection("potatoes");
-```
-
-## database.listCollections
-
-`async database.listCollections([excludeSystem]): Array`
-
-Fetches all collections from the database and returns an array of collection
-descriptions.
-
-**Arguments**
-
-- **excludeSystem**: `boolean` (Default: `true`)
-
- Whether system collections should be excluded.
-
-**Examples**
-
-```js
-const db = new Database();
-
-const collections = await db.listCollections();
-// collections is an array of collection descriptions
-// not including system collections
-
-// -- or --
-
-const collections = await db.listCollections(false);
-// collections is an array of collection descriptions
-// including system collections
-```
-
-## database.collections
-
-`async database.collections([excludeSystem]): Array`
-
-Fetches all collections from the database and returns an array of
-_DocumentCollection_ and _EdgeCollection_ instances for the collections.
-
-**Arguments**
-
-- **excludeSystem**: `boolean` (Default: `true`)
-
- Whether system collections should be excluded.
-
-**Examples**
-
-```js
-const db = new Database();
-
-const collections = await db.collections()
-// collections is an array of DocumentCollection
-// and EdgeCollection instances
-// not including system collections
-
-// -- or --
-
-const collections = await db.collections(false)
-// collections is an array of DocumentCollection
-// and EdgeCollection instances
-// including system collections
-```
diff --git a/Documentation/Books/Drivers/JS/Reference/Database/DatabaseManipulation.md b/Documentation/Books/Drivers/JS/Reference/Database/DatabaseManipulation.md
deleted file mode 100644
index 7ea7a0818016..000000000000
--- a/Documentation/Books/Drivers/JS/Reference/Database/DatabaseManipulation.md
+++ /dev/null
@@ -1,137 +0,0 @@
-
-# Manipulating databases
-
-These functions implement the
-[HTTP API for manipulating databases](../../../..//HTTP/Database/index.html).
-
-## database.createDatabase
-
-`async database.createDatabase(databaseName, [users]): Object`
-
-Creates a new database with the given _databaseName_.
-
-**Arguments**
-
-- **databaseName**: `string`
-
- Name of the database to create.
-
-- **users**: `Array` (optional)
-
- If specified, the array must contain objects with the following properties:
-
- - **username**: `string`
-
- The username of the user to create for the database.
-
- - **passwd**: `string` (Default: empty)
-
- The password of the user.
-
- - **active**: `boolean` (Default: `true`)
-
- Whether the user is active.
-
- - **extra**: `Object` (optional)
-
- An object containing additional user data.
-
-**Examples**
-
-```js
-const db = new Database();
-const info = await db.createDatabase("mydb", [{ username: "root" }]);
-// the database has been created
-```
-
-## database.exists
-
-`async database.exists(): boolean`
-
-Checks whether the database exists.
-
-**Examples**
-
-```js
-const db = new Database();
-const result = await db.exists();
-// result indicates whether the database exists
-```
-
-## database.get
-
-`async database.get(): Object`
-
-Fetches the database description for the active database from the server.
-
-**Examples**
-
-```js
-const db = new Database();
-const info = await db.get();
-// the database exists
-```
-
-## database.listDatabases
-
-`async database.listDatabases(): Array`
-
-Fetches all databases from the server and returns an array of their names.
-
-**Examples**
-
-```js
-const db = new Database();
-const names = await db.listDatabases();
-// databases is an array of database names
-```
-
-## database.listUserDatabases
-
-`async database.listUserDatabases(): Array`
-
-Fetches all databases accessible to the active user from the server and returns
-an array of their names.
-
-**Examples**
-
-```js
-const db = new Database();
-const names = await db.listUserDatabases();
-// databases is an array of database names
-```
-
-## database.dropDatabase
-
-`async database.dropDatabase(databaseName): Object`
-
-Deletes the database with the given _databaseName_ from the server.
-
-```js
-const db = new Database();
-await db.dropDatabase("mydb");
-// database "mydb" no longer exists
-```
-
-## database.truncate
-
-`async database.truncate([excludeSystem]): Object`
-
-Deletes **all documents in all collections** in the active database.
-
-**Arguments**
-
-- **excludeSystem**: `boolean` (Default: `true`)
-
- Whether system collections should be excluded. Note that this option will be
- ignored because truncating system collections is not supported anymore for
- some system collections.
-
-**Examples**
-
-```js
-const db = new Database();
-
-await db.truncate();
-// all non-system collections in this database are now empty
-```
diff --git a/Documentation/Books/Drivers/JS/Reference/Database/FoxxServices.md b/Documentation/Books/Drivers/JS/Reference/Database/FoxxServices.md
deleted file mode 100644
index 8d11b9cb9c8f..000000000000
--- a/Documentation/Books/Drivers/JS/Reference/Database/FoxxServices.md
+++ /dev/null
@@ -1,776 +0,0 @@
-
-# Managing Foxx services
-
-## database.listServices
-
-`async database.listServices([excludeSystem]): Array`
-
-{% hint 'info' %}
-This method is only available when targeting ArangoDB 3.2 or later,
-see [Compatibility](../../GettingStarted/README.md#compatibility).
-{% endhint %}
-
-Fetches a list of all installed service.
-
-**Arguments**
-
-- **excludeSystem**: `boolean` (Default: `true`)
-
- Whether system services should be excluded.
-
-**Examples**
-
-```js
-const services = await db.listServices();
-
-// -- or --
-
-const services = await db.listServices(false);
-```
-
-## database.installService
-
-`async database.installService(mount, source, [options]): Object`
-
-{% hint 'info' %}
-This method is only available when targeting ArangoDB 3.2 or later,
-see [Compatibility](../../GettingStarted/README.md#compatibility).
-{% endhint %}
-
-Installs a new service.
-
-**Arguments**
-
-- **mount**: `string`
-
- The service's mount point, relative to the database.
-
-- **source**: `Buffer | Readable | File | string`
-
- The service bundle to install.
-
-- **options**: `Object` (optional)
-
- An object with any of the following properties:
-
- - **configuration**: `Object` (optional)
-
- An object mapping configuration option names to values.
-
- - **dependencies**: `Object` (optional)
-
- An object mapping dependency aliases to mount points.
-
- - **development**: `boolean` (Default: `false`)
-
- Whether the service should be installed in development mode.
-
- - **legacy**: `boolean` (Default: `false`)
-
- Whether the service should be installed in legacy compatibility mode.
-
- This overrides the `engines` option in the service manifest (if any).
-
- - **setup**: `boolean` (Default: `true`)
-
- Whether the setup script should be executed.
-
-**Examples**
-
-```js
-const source = fs.createReadStream("./my-foxx-service.zip");
-const info = await db.installService("/hello", source);
-
-// -- or --
-
-const source = fs.readFileSync("./my-foxx-service.zip");
-const info = await db.installService("/hello", source);
-
-// -- or --
-
-const element = document.getElementById("my-file-input");
-const source = element.files[0];
-const info = await db.installService("/hello", source);
-```
-
-## database.replaceService
-
-`async database.replaceService(mount, source, [options]): Object`
-
-{% hint 'info' %}
-This method is only available when targeting ArangoDB 3.2 or later,
-see [Compatibility](../../GettingStarted/README.md#compatibility).
-{% endhint %}
-
-Replaces an existing service with a new service by completely removing the old
-service and installing a new service at the same mount point.
-
-**Arguments**
-
-- **mount**: `string`
-
- The service's mount point, relative to the database.
-
-- **source**: `Buffer | Readable | File | string`
-
- The service bundle to replace the existing service with.
-
-- **options**: `Object` (optional)
-
- An object with any of the following properties:
-
- - **configuration**: `Object` (optional)
-
- An object mapping configuration option names to values.
-
- This configuration will replace the existing configuration.
-
- - **dependencies**: `Object` (optional)
-
- An object mapping dependency aliases to mount points.
-
- These dependencies will replace the existing dependencies.
-
- - **development**: `boolean` (Default: `false`)
-
- Whether the new service should be installed in development mode.
-
- - **legacy**: `boolean` (Default: `false`)
-
- Whether the new service should be installed in legacy compatibility mode.
-
- This overrides the `engines` option in the service manifest (if any).
-
- - **teardown**: `boolean` (Default: `true`)
-
- Whether the teardown script of the old service should be executed.
-
- - **setup**: `boolean` (Default: `true`)
-
- Whether the setup script of the new service should be executed.
-
-**Examples**
-
-```js
-const source = fs.createReadStream("./my-foxx-service.zip");
-const info = await db.replaceService("/hello", source);
-
-// -- or --
-
-const source = fs.readFileSync("./my-foxx-service.zip");
-const info = await db.replaceService("/hello", source);
-
-// -- or --
-
-const element = document.getElementById("my-file-input");
-const source = element.files[0];
-const info = await db.replaceService("/hello", source);
-```
-
-## database.upgradeService
-
-`async database.upgradeService(mount, source, [options]): Object`
-
-{% hint 'info' %}
-This method is only available when targeting ArangoDB 3.2 or later,
-see [Compatibility](../../GettingStarted/README.md#compatibility).
-{% endhint %}
-
-Replaces an existing service with a new service while retaining the old
-service's configuration and dependencies.
-
-**Arguments**
-
-- **mount**: `string`
-
- The service's mount point, relative to the database.
-
-- **source**: `Buffer | Readable | File | string`
-
- The service bundle to replace the existing service with.
-
-- **options**: `Object` (optional)
-
- An object with any of the following properties:
-
- - **configuration**: `Object` (optional)
-
- An object mapping configuration option names to values.
-
- This configuration will be merged into the existing configuration.
-
- - **dependencies**: `Object` (optional)
-
- An object mapping dependency aliases to mount points.
-
- These dependencies will be merged into the existing dependencies.
-
- - **development**: `boolean` (Default: `false`)
-
- Whether the new service should be installed in development mode.
-
- - **legacy**: `boolean` (Default: `false`)
-
- Whether the new service should be installed in legacy compatibility mode.
-
- This overrides the `engines` option in the service manifest (if any).
-
- - **teardown**: `boolean` (Default: `false`)
-
- Whether the teardown script of the old service should be executed.
-
- - **setup**: `boolean` (Default: `true`)
-
- Whether the setup script of the new service should be executed.
-
-**Examples**
-
-```js
-const source = fs.createReadStream("./my-foxx-service.zip");
-const info = await db.upgradeService("/hello", source);
-
-// -- or --
-
-const source = fs.readFileSync("./my-foxx-service.zip");
-const info = await db.upgradeService("/hello", source);
-
-// -- or --
-
-const element = document.getElementById("my-file-input");
-const source = element.files[0];
-const info = await db.upgradeService("/hello", source);
-```
-
-## database.uninstallService
-
-`async database.uninstallService(mount, [options]): void`
-
-{% hint 'info' %}
-This method is only available when targeting ArangoDB 3.2 or later,
-see [Compatibility](../../GettingStarted/README.md#compatibility).
-{% endhint %}
-
-Completely removes a service from the database.
-
-**Arguments**
-
-- **mount**: `string`
-
- The service's mount point, relative to the database.
-
-- **options**: `Object` (optional)
-
- An object with any of the following properties:
-
- - **teardown**: `boolean` (Default: `true`)
-
- Whether the teardown script should be executed.
-
-**Examples**
-
-```js
-await db.uninstallService("/my-service");
-// service was uninstalled
-```
-
-## database.getService
-
-`async database.getService(mount): Object`
-
-{% hint 'info' %}
-This method is only available when targeting ArangoDB 3.2 or later,
-see [Compatibility](../../GettingStarted/README.md#compatibility).
-{% endhint %}
-
-Retrieves information about a mounted service.
-
-**Arguments**
-
-- **mount**: `string`
-
- The service's mount point, relative to the database.
-
-**Examples**
-
-```js
-const info = await db.getService("/my-service");
-// info contains detailed information about the service
-```
-
-## database.getServiceConfiguration
-
-`async database.getServiceConfiguration(mount, [minimal]): Object`
-
-{% hint 'info' %}
-This method is only available when targeting ArangoDB 3.2 or later,
-see [Compatibility](../../GettingStarted/README.md#compatibility).
-{% endhint %}
-
-Retrieves an object with information about the service's configuration options
-and their current values.
-
-**Arguments**
-
-- **mount**: `string`
-
- The service's mount point, relative to the database.
-
-- **minimal**: `boolean` (Default: `false`)
-
- Only return the current values.
-
-**Examples**
-
-```js
-const config = await db.getServiceConfiguration("/my-service");
-// config contains information about the service's configuration
-```
-
-## database.replaceServiceConfiguration
-
-`async database.replaceServiceConfiguration(mount, configuration, [minimal]): Object`
-
-{% hint 'info' %}
-This method is only available when targeting ArangoDB 3.2 or later,
-see [Compatibility](../../GettingStarted/README.md#compatibility).
-{% endhint %}
-
-Replaces the configuration of the given service.
-
-**Arguments**
-
-- **mount**: `string`
-
- The service's mount point, relative to the database.
-
-- **configuration**: `Object`
-
- An object mapping configuration option names to values.
-
-- **minimal**: `boolean` (Default: `false`)
-
- Only return the current values and warnings (if any).
-
- **Note:** when using ArangoDB 3.2.8 or older, enabling this option avoids
- triggering a second request to the database.
-
-**Examples**
-
-```js
-const config = { currency: "USD", locale: "en-US" };
-const info = await db.replaceServiceConfiguration("/my-service", config);
-// info.values contains information about the service's configuration
-// info.warnings contains any validation errors for the configuration
-```
-
-## database.updateServiceConfiguration
-
-`async database.updateServiceConfiguration(mount, configuration, [minimal]): Object`
-
-{% hint 'info' %}
-This method is only available when targeting ArangoDB 3.2 or later,
-see [Compatibility](../../GettingStarted/README.md#compatibility).
-{% endhint %}
-
-Updates the configuration of the given service my merging the new values into
-the existing ones.
-
-**Arguments**
-
-- **mount**: `string`
-
- The service's mount point, relative to the database.
-
-- **configuration**: `Object`
-
- An object mapping configuration option names to values.
-
-- **minimal**: `boolean` (Default: `false`)
-
- Only return the current values and warnings (if any).
-
- **Note:** when using ArangoDB 3.2.8 or older, enabling this option avoids
- triggering a second request to the database.
-
-**Examples**
-
-```js
-const config = { locale: "en-US" };
-const info = await db.updateServiceConfiguration("/my-service", config);
-// info.values contains information about the service's configuration
-// info.warnings contains any validation errors for the configuration
-```
-
-## database.getServiceDependencies
-
-`async database.getServiceDependencies(mount, [minimal]): Object`
-
-{% hint 'info' %}
-This method is only available when targeting ArangoDB 3.2 or later,
-see [Compatibility](../../GettingStarted/README.md#compatibility).
-{% endhint %}
-
-Retrieves an object with information about the service's dependencies and their
-current mount points.
-
-**Arguments**
-
-- **mount**: `string`
-
- The service's mount point, relative to the database.
-
-- **minimal**: `boolean` (Default: `false`)
-
- Only return the current values and warnings (if any).
-
-**Examples**
-
-```js
-const deps = await db.getServiceDependencies("/my-service");
-// deps contains information about the service's dependencies
-```
-
-## database.replaceServiceDependencies
-
-`async database.replaceServiceDependencies(mount, dependencies, [minimal]): Object`
-
-{% hint 'info' %}
-This method is only available when targeting ArangoDB 3.2 or later,
-see [Compatibility](../../GettingStarted/README.md#compatibility).
-{% endhint %}
-
-Replaces the dependencies for the given service.
-
-**Arguments**
-
-- **mount**: `string`
-
- The service's mount point, relative to the database.
-
-- **dependencies**: `Object`
-
- An object mapping dependency aliases to mount points.
-
-- **minimal**: `boolean` (Default: `false`)
-
- Only return the current values and warnings (if any).
-
- **Note:** when using ArangoDB 3.2.8 or older, enabling this option avoids
- triggering a second request to the database.
-
-**Examples**
-
-```js
-const deps = { mailer: "/mailer-api", auth: "/remote-auth" };
-const info = await db.replaceServiceDependencies("/my-service", deps);
-// info.values contains information about the service's dependencies
-// info.warnings contains any validation errors for the dependencies
-```
-
-## database.updateServiceDependencies
-
-`async database.updateServiceDependencies(mount, dependencies, [minimal]): Object`
-
-{% hint 'info' %}
-This method is only available when targeting ArangoDB 3.2 or later,
-see [Compatibility](../../GettingStarted/README.md#compatibility).
-{% endhint %}
-
-Updates the dependencies for the given service by merging the new values into
-the existing ones.
-
-**Arguments**
-
-- **mount**: `string`
-
- The service's mount point, relative to the database.
-
-- **dependencies**: `Object`
-
- An object mapping dependency aliases to mount points.
-
-- **minimal**: `boolean` (Default: `false`)
-
- Only return the current values and warnings (if any).
-
- **Note:** when using ArangoDB 3.2.8 or older, enabling this option avoids
- triggering a second request to the database.
-
-**Examples**
-
-```js
-const deps = { mailer: "/mailer-api" };
-const info = await db.updateServiceDependencies("/my-service", deps);
-// info.values contains information about the service's dependencies
-// info.warnings contains any validation errors for the dependencies
-```
-
-## database.enableServiceDevelopmentMode
-
-`async database.enableServiceDevelopmentMode(mount): Object`
-
-{% hint 'info' %}
-This method is only available when targeting ArangoDB 3.2 or later,
-see [Compatibility](../../GettingStarted/README.md#compatibility).
-{% endhint %}
-
-Enables development mode for the given service.
-
-**Arguments**
-
-- **mount**: `string`
-
- The service's mount point, relative to the database.
-
-**Examples**
-
-```js
-const info = await db.enableServiceDevelopmentMode("/my-service");
-// the service is now in development mode
-// info contains detailed information about the service
-```
-
-## database.disableServiceDevelopmentMode
-
-`async database.disableServiceDevelopmentMode(mount): Object`
-
-{% hint 'info' %}
-This method is only available when targeting ArangoDB 3.2 or later,
-see [Compatibility](../../GettingStarted/README.md#compatibility).
-{% endhint %}
-
-Disabled development mode for the given service and commits the service state to
-the database.
-
-**Arguments**
-
-- **mount**: `string`
-
- The service's mount point, relative to the database.
-
-**Examples**
-
-```js
-const info = await db.disableServiceDevelopmentMode("/my-service");
-// the service is now in production mode
-// info contains detailed information about the service
-```
-
-## database.listServiceScripts
-
-`async database.listServiceScripts(mount): Object`
-
-{% hint 'info' %}
-This method is only available when targeting ArangoDB 3.2 or later,
-see [Compatibility](../../GettingStarted/README.md#compatibility).
-{% endhint %}
-
-Retrieves a list of the service's scripts.
-
-Returns an object mapping each name to a more readable representation.
-
-**Arguments**
-
-- **mount**: `string`
-
- The service's mount point, relative to the database.
-
-**Examples**
-
-```js
-const scripts = await db.listServiceScripts("/my-service");
-// scripts is an object listing the service scripts
-```
-
-## database.runServiceScript
-
-`async database.runServiceScript(mount, name, [scriptArg]): any`
-
-{% hint 'info' %}
-This method is only available when targeting ArangoDB 3.2 or later,
-see [Compatibility](../../GettingStarted/README.md#compatibility).
-{% endhint %}
-
-Runs a service script and returns the result.
-
-**Arguments**
-
-- **mount**: `string`
-
- The service's mount point, relative to the database.
-
-- **name**: `string`
-
- Name of the script to execute.
-
-- **scriptArg**: `any`
-
- Value that will be passed as an argument to the script.
-
-**Examples**
-
-```js
-const result = await db.runServiceScript("/my-service", "setup");
-// result contains the script's exports (if any)
-```
-
-## database.runServiceTests
-
-`async database.runServiceTests(mount, [reporter]): any`
-
-{% hint 'info' %}
-This method is only available when targeting ArangoDB 3.2 or later,
-see [Compatibility](../../GettingStarted/README.md#compatibility).
-{% endhint %}
-
-Runs the tests of a given service and returns a formatted report.
-
-**Arguments**
-
-- **mount**: `string`
-
- The service's mount point, relative to the database
-
-- **options**: `Object` (optional)
-
- An object with any of the following properties:
-
- - **reporter**: `string` (Default: `default`)
-
- The reporter to use to process the test results.
-
- As of ArangoDB 3.2 the following reporters are supported:
-
- - **stream**: an array of event objects
- - **suite**: nested suite objects with test results
- - **xunit**: JSONML representation of an XUnit report
- - **tap**: an array of TAP event strings
- - **default**: an array of test results
-
- - **idiomatic**: `boolean` (Default: `false`)
-
- Whether the results should be converted to the apropriate `string`
- representation:
-
- - **xunit** reports will be formatted as XML documents
- - **tap** reports will be formatted as TAP streams
- - **stream** reports will be formatted as JSON-LD streams
-
-**Examples**
-
-```js
-const opts = { reporter: "xunit", idiomatic: true };
-const result = await db.runServiceTests("/my-service", opts);
-// result contains the XUnit report as a string
-```
-
-## database.downloadService
-
-`async database.downloadService(mount): Buffer | Blob`
-
-{% hint 'info' %}
-This method is only available when targeting ArangoDB 3.2 or later,
-see [Compatibility](../../GettingStarted/README.md#compatibility).
-{% endhint %}
-
-Retrieves a zip bundle containing the service files.
-
-Returns a `Buffer` in Node or `Blob` in the browser version.
-
-**Arguments**
-
-- **mount**: `string`
-
- The service's mount point, relative to the database.
-
-**Examples**
-
-```js
-const bundle = await db.downloadService("/my-service");
-// bundle is a Buffer/Blob of the service bundle
-```
-
-## database.getServiceReadme
-
-`async database.getServiceReadme(mount): string?`
-
-{% hint 'info' %}
-This method is only available when targeting ArangoDB 3.2 or later,
-see [Compatibility](../../GettingStarted/README.md#compatibility).
-{% endhint %}
-
-Retrieves the text content of the service's `README` or `README.md` file.
-
-Returns `undefined` if no such file could be found.
-
-**Arguments**
-
-- **mount**: `string`
-
- The service's mount point, relative to the database.
-
-**Examples**
-
-```js
-const readme = await db.getServiceReadme("/my-service");
-// readme is a string containing the service README's
-// text content, or undefined if no README exists
-```
-
-## database.getServiceDocumentation
-
-`async database.getServiceDocumentation(mount): Object`
-
-{% hint 'info' %}
-This method is only available when targeting ArangoDB 3.2 or later,
-see [Compatibility](../../GettingStarted/README.md#compatibility).
-{% endhint %}
-
-Retrieves a Swagger API description object for the service installed at the
-given mount point.
-
-**Arguments**
-
-- **mount**: `string`
-
- The service's mount point, relative to the database.
-
-**Examples**
-
-```js
-const spec = await db.getServiceDocumentation("/my-service");
-// spec is a Swagger API description of the service
-```
-
-## database.commitLocalServiceState
-
-`async database.commitLocalServiceState([replace]): void`
-
-{% hint 'info' %}
-This method is only available when targeting ArangoDB 3.2 or later,
-see [Compatibility](../../GettingStarted/README.md#compatibility).
-{% endhint %}
-
-Writes all locally available services to the database and updates any service
-bundles missing in the database.
-
-**Arguments**
-
-- **replace**: `boolean` (Default: `false`)
-
- Also commit outdated services.
-
- This can be used to solve some consistency problems when service bundles are
- missing in the database or were deleted manually.
-
-**Examples**
-
-```js
-await db.commitLocalServiceState();
-// all services available on the coordinator have been written to the db
-
-// -- or --
-
-await db.commitLocalServiceState(true);
-// all service conflicts have been resolved in favor of this coordinator
-```
diff --git a/Documentation/Books/Drivers/JS/Reference/Database/GraphAccess.md b/Documentation/Books/Drivers/JS/Reference/Database/GraphAccess.md
deleted file mode 100644
index 2aef447c8760..000000000000
--- a/Documentation/Books/Drivers/JS/Reference/Database/GraphAccess.md
+++ /dev/null
@@ -1,40 +0,0 @@
-
-# Accessing graphs
-
-These functions implement the
-[HTTP API for accessing general graphs](../../../..//HTTP/Gharial/index.html).
-
-## database.graph
-
-`database.graph(graphName): Graph`
-
-Returns a _Graph_ instance representing the graph with the given graph name.
-
-## database.listGraphs
-
-`async database.listGraphs(): Array`
-
-Fetches all graphs from the database and returns an array of graph descriptions.
-
-**Examples**
-
-```js
-const db = new Database();
-const graphs = await db.listGraphs();
-// graphs is an array of graph descriptions
-```
-
-## database.graphs
-
-`async database.graphs(): Array`
-
-Fetches all graphs from the database and returns an array of _Graph_ instances
-for the graphs.
-
-**Examples**
-
-```js
-const db = new Database();
-const graphs = await db.graphs();
-// graphs is an array of Graph instances
-```
diff --git a/Documentation/Books/Drivers/JS/Reference/Database/HttpRoutes.md b/Documentation/Books/Drivers/JS/Reference/Database/HttpRoutes.md
deleted file mode 100644
index d3ec93a0b6f6..000000000000
--- a/Documentation/Books/Drivers/JS/Reference/Database/HttpRoutes.md
+++ /dev/null
@@ -1,38 +0,0 @@
-
-# Arbitrary HTTP routes
-
-## database.route
-
-`database.route([path,] [headers]): Route`
-
-Returns a new _Route_ instance for the given path (relative to the database)
-that can be used to perform arbitrary HTTP requests.
-
-**Arguments**
-
-* **path**: `string` (optional)
-
- The database-relative URL of the route.
-
-* **headers**: `Object` (optional)
-
- Default headers that should be sent with each request to the route.
-
-If _path_ is missing, the route will refer to the base URL of the database.
-
-For more information on _Route_ instances see the
-[_Route API_ below](../Route.md).
-
-**Examples**
-
-```js
-const db = new Database();
-const myFoxxService = db.route('my-foxx-service');
-const response = await myFoxxService.post('users', {
- username: 'admin',
- password: 'hunter2'
-});
-// response.body is the result of
-// POST /_db/_system/my-foxx-service/users
-// with JSON request body '{"username": "admin", "password": "hunter2"}'
-```
diff --git a/Documentation/Books/Drivers/JS/Reference/Database/Queries.md b/Documentation/Books/Drivers/JS/Reference/Database/Queries.md
deleted file mode 100644
index 72aac33663a0..000000000000
--- a/Documentation/Books/Drivers/JS/Reference/Database/Queries.md
+++ /dev/null
@@ -1,249 +0,0 @@
-
-# Queries
-
-These functions implements the
-[HTTP API for single round-trip AQL queries](../../../..//HTTP/AqlQueryCursor/QueryResults.html)
-as well as the
-[HTTP API for managing queries](../../../..//HTTP/AqlQuery/index.html).
-
-For collection-specific queries see [Simple Queries](../Collection/SimpleQueries.md).
-
-## database.query
-
-`async database.query(query, [bindVars,] [opts]): Cursor`
-
-Performs a database query using the given _query_ and _bindVars_, then returns a
-[new _Cursor_ instance](../Cursor.md) for the result list.
-
-**Arguments**
-
-- **query**: `string | AqlQuery | AqlLiteral`
-
- An AQL query as a string or
- [AQL query object](../Aql.md#aql) or
- [AQL literal](../Aql.md#aqlliteral).
- If the query is an AQL query object, the second argument is treated as the
- _opts_ argument instead of _bindVars_.
-
-- **bindVars**: `Object` (optional)
-
- An object defining the variables to bind the query to.
-
-- **opts**: `Object` (optional)
-
- Additional parameter object that will be passed to the query API.
- Possible keys are _count_ and _options_ (explained below)
-
-If _opts.count_ is set to `true`, the cursor will have a _count_ property set to
-the query result count.
-
-Possible key options in _opts.options_ include: _failOnWarning_, _cache_,
-profile or _skipInaccessibleCollections_.
-For a complete list of query settings please reference the
-[setting options](../../../..//AQL/Invocation/WithArangosh.html#setting-options).
-
-Additionally if _opts.allowDirtyRead_ is set to `true`, the request will
-explicitly permit ArangoDB to return a potentially dirty or stale result and
-arangojs will load balance the request without distinguishing between leaders
-and followers. Note that dirty reads are only supported for read-only queries
-(e.g. not using `INSERT`, `UPDATE`, `REPLACE` or `REMOVE` expressions).
-
-{% hint 'info' %}
-Dirty reads are only available when targeting ArangoDB 3.4 or later,
-see [Compatibility](../../GettingStarted/README.md#compatibility).
-{% endhint %}
-
-Additionally _opts.timeout_ can be set to a non-negative number to force the
-request to be cancelled after that amount of milliseconds. Note that this will
-simply close the connection and not result in the actual query being cancelled
-in ArangoDB, the query will still be executed to completion and continue to
-consume resources in the database or cluster.
-
-If _query_ is an object with _query_ and _bindVars_ properties, those will be
-used as the values of the respective arguments instead.
-
-**Examples**
-
-```js
-const db = new Database();
-const active = true;
-
-// Using the aql template tag
-const cursor = await db.query(aql`
- FOR u IN _users
- FILTER u.authData.active == ${active}
- RETURN u.user
-`);
-// cursor is a cursor for the query result
-
-// -- or --
-
-// Old-school JS with explicit bindVars:
-db.query("FOR u IN _users FILTER u.authData.active == @active RETURN u.user", {
- active: true
-}).then(function(cursor) {
- // cursor is a cursor for the query result
-});
-```
-
-## aql
-
-`aql(strings, ...args): Object`
-
-Template string handler (aka template tag) for AQL queries. Converts a template
-string to an object that can be passed to `database.query` by converting
-arguments to bind variables.
-
-**Note**: If you want to pass a collection name as a bind variable, you need to
-pass a _Collection_ instance (e.g. what you get by passing the collection name
-to `db.collection`) instead. If you see the error `"array expected as operand to FOR loop"`,
-you're likely passing a collection name instead of a collection instance.
-
-**Examples**
-
-```js
-const userCollection = db.collection("_users");
-const role = "admin";
-
-const query = aql`
- FOR user IN ${userCollection}
- FILTER user.role == ${role}
- RETURN user
-`;
-
-// -- is equivalent to --
-const query = {
- query: "FOR user IN @@value0 FILTER user.role == @value1 RETURN user",
- bindVars: { "@value0": userCollection.name, value1: role }
-};
-```
-
-Note how the aql template tag automatically handles collection references
-(`@@value0` instead of `@value0`) for us so you don't have to worry about
-counting at-symbols.
-
-Because the aql template tag creates actual bindVars instead of inlining values
-directly, it also avoids injection attacks via malicious parameters:
-
-```js
-// malicious user input
-const email = '" || (FOR x IN secrets REMOVE x IN secrets) || "';
-
-// DON'T do this!
-const query = `
- FOR user IN users
- FILTER user.email == "${email}"
- RETURN user
-`;
-// FILTER user.email == "" || (FOR x IN secrets REMOVE x IN secrets) || ""
-
-// instead do this!
-const query = aql`
- FOR user IN users
- FILTER user.email == ${email}
- RETURN user
-`;
-// FILTER user.email == @value0
-```
-
-## database.explain
-
-`async database.explain(query, [bindVars,] [opts]): ExplainResult`
-
-Explains a database query using the given _query_ and _bindVars_ and
-returns one or more plans.
-
-**Arguments**
-
-- **query**: `string | AqlQuery | AqlLiteral`
-
- An AQL query as a string or
- [AQL query object](../Aql.md#aql) or
- [AQL literal](../Aql.md#aqlliteral).
- If the query is an AQL query object, the second argument is treated as the
- _opts_ argument instead of _bindVars_.
-
-- **bindVars**: `Object` (optional)
-
- An object defining the variables to bind the query to.
-
-- **opts**: `Object` (optional)
-
- - **optimizer**: `Object` (optional)
-
- An object with a single property **rules**, a string array of optimizer
- rules to be used for the query.
-
- - **maxNumberOfPlans**: `number` (optional)
-
- Maximum number of plans that the optimizer is allowed to generate.
- Setting this to a low value limits the amount of work the optimizer does.
-
- - **allPlans**: `boolean` (Default: `false`)
-
- If set to true, all possible execution plans will be returned
- as the _plans_ property. Otherwise only the optimal execution plan will
- be returned as the _plan_ property.
-
-## database.parse
-
-`async database.parse(query): ParseResult`
-
-Parses the given query and returns the result.
-
-**Arguments**
-
-- **query**: `string | AqlQuery | AqlLiteral`
-
- An AQL query as a string or
- [AQL query object](../Aql.md#aql) or
- [AQL literal](../Aql.md#aqlliteral).
- If the query is an AQL query object, its bindVars (if any) will be ignored.
-
-## database.queryTracking
-
-`async database.queryTracking(): QueryTrackingProperties`
-
-Fetches the query tracking properties.
-
-## database.setQueryTracking
-
-`async database.setQueryTracking(props): void`
-
-Modifies the query tracking properties.
-
-**Arguments**
-
-- **props**: `Partial`
-
- Query tracking properties with new values to set.
-
-## database.listRunningQueries
-
-`async database.listRunningQueries(): Array`
-
-Fetches a list of information for all currently running queries.
-
-## database.listSlowQueries
-
-`async database.listSlowQueries(): Array`
-
-Fetches a list of information for all recent slow queries.
-
-## database.clearSlowQueries
-
-`async database.clearSlowQueries(): void`
-
-Clears the list of recent slow queries.
-
-## database.killQuery
-
-`async database.killQuery(queryId): void`
-
-Kills a running query with the given ID.
-
-**Arguments**
-
-- **queryId**: `string`
-
- The ID of a currently running query.
diff --git a/Documentation/Books/Drivers/JS/Reference/Database/README.md b/Documentation/Books/Drivers/JS/Reference/Database/README.md
deleted file mode 100644
index a15c090d65c9..000000000000
--- a/Documentation/Books/Drivers/JS/Reference/Database/README.md
+++ /dev/null
@@ -1,316 +0,0 @@
-
-# Database API
-
-## new Database
-
-`new Database([config]): Database`
-
-Creates a new _Database_ instance.
-
-If _config_ is a string, it will be interpreted as _config.url_.
-
-**Arguments**
-
-- **config**: `Object` (optional)
-
- An object with the following properties:
-
- - **url**: `string | Array` (Default: `http://localhost:8529`)
-
- Base URL of the ArangoDB server or list of server URLs.
-
- When working with a cluster or a single server with leader/follower failover,
- [the method `db.acquireHostList`](#databaseacquirehostlist)
- can be used to automatically pick up additional coordinators/followers at
- any point.
-
- When running ArangoDB on a unix socket, e.g. `/tmp/arangodb.sock`, the
- following URL formats are supported for unix sockets:
-
- - `unix:///tmp/arangodb.sock` (no SSL)
- - `http+unix:///tmp/arangodb.sock` (or `https+unix://` for SSL)
- - `http://unix:/tmp/arangodb.sock` (or `https://unix:` for SSL)
-
- Additionally `ssl` and `tls` are treated as synonymous with `https` and
- `tcp` is treated as synonymous with `http`, so the following URLs are
- considered identical:
-
- - `tcp://localhost:8529` and `http://localhost:8529`
- - `ssl://localhost:8529` and `https://localhost:8529`
- - `tcp+unix:///tmp/arangodb.sock` and `http+unix:///tmp/arangodb.sock`
- - `ssl+unix:///tmp/arangodb.sock` and `https+unix:///tmp/arangodb.sock`
- - `tcp://unix:/tmp/arangodb.sock` and `http://unix:/tmp/arangodb.sock`
- - `ssl://unix:/tmp/arangodb.sock` and `https://unix:/tmp/arangodb.sock`
-
- If you want to use ArangoDB with authentication, see
- _useBasicAuth_ or _useBearerAuth_ methods.
-
- If you need to support self-signed HTTPS certificates, you may have to add
- your certificates to the _agentOptions_, e.g.:
-
- ```js
- ...
- agentOptions: {
- ca: [
- fs.readFileSync(".ssl/sub.class1.server.ca.pem"),
- fs.readFileSync(".ssl/ca.pem")
- ]
- }
- ```
-
- Although this is **strongly discouraged**, it's also possible to disable
- HTTPS certificate validation entirely, but note this has
- **extremely dangerous** security implications:
-
- ```js
- ...
- agentOptions: {
- rejectUnauthorized: false
- }
- ```
-
- - **isAbsolute**: `boolean` (Default: `false`)
-
- If this option is explicitly set to `true`, the _url_ will be treated as the
- absolute database path and arangojs will not append the database path to it.
-
- **Note:** This makes it impossible to switch databases with _useDatabase_
- or using _acquireHostList_. This is only intended to be used as an escape
- hatch when working with standalone servers exposing a single database API
- from behind a reverse proxy, which is not a recommended setup.
-
- - **arangoVersion**: `number` (Default: `30000`)
-
- Numeric representation of the ArangoDB version the driver should expect.
- The format is defined as `XYYZZ` where `X` is the major version, `Y` is
- the zero-filled two-digit minor version and `Z` is the zero-filled two-digit
- bugfix version, e.g. `30102` for 3.1.2, `20811` for 2.8.11.
-
- Depending on this value certain methods may become unavailable or change
- their behavior to remain compatible with different versions of ArangoDB.
-
- - **headers**: `Object` (optional)
-
- An object with additional headers to send with every request.
-
- Header names should always be lowercase. If an `"authorization"` header is
- provided, it will be overridden when using _useBasicAuth_ or _useBearerAuth_.
-
- - **agent**: `Agent` (optional)
-
- An http Agent instance to use for connections.
-
- By default a new
- [`http.Agent`](https://nodejs.org/api/http.html#http_new_agent_options) (or
- https.Agent) instance will be created using the _agentOptions_.
-
- This option has no effect when using the browser version of arangojs.
-
- - **agentOptions**: `Object` (Default: see below)
-
- An object with options for the agent. This will be ignored if _agent_ is
- also provided.
-
- Default: `{maxSockets: 3, keepAlive: true, keepAliveMsecs: 1000}`.
- Browser default: `{maxSockets: 3, keepAlive: false}`;
-
- The option `maxSockets` can also be used to limit how many requests
- arangojs will perform concurrently. The maximum number of requests is
- equal to `maxSockets * 2` with `keepAlive: true` or
- equal to `maxSockets` with `keepAlive: false`.
-
- In the browser version of arangojs this option can be used to pass
- additional options to the underlying calls of the
- [`xhr`](https://www.npmjs.com/package/xhr) module.
-
- - **loadBalancingStrategy**: `string` (Default: `"NONE"`)
-
- Determines the behavior when multiple URLs are provided:
-
- - `NONE`: No load balancing. All requests will be handled by the first
- URL in the list until a network error is encountered. On network error,
- arangojs will advance to using the next URL in the list.
-
- - `ONE_RANDOM`: Randomly picks one URL from the list initially, then
- behaves like `NONE`.
-
- - `ROUND_ROBIN`: Every sequential request uses the next URL in the list.
-
- - **maxRetries**: `number` or `false` (Default: `0`)
-
- Determines the behavior when a request fails because the underlying
- connection to the server could not be opened
- (i.e. [`ECONNREFUSED` in Node.js](https://nodejs.org/api/errors.html#errors_common_system_errors)):
-
- - `false`: the request fails immediately.
-
- - `0`: the request is retried until a server can be reached but only a
- total number of times matching the number of known servers (including
- the initial failed request).
-
- - any other number: the request is retried until a server can be reached
- the request has been retried a total of `maxRetries` number of times
- (not including the initial failed request).
-
- When working with a single server without leader/follower failover, the
- retries (if any) will be made to the same server.
-
- This setting currently has no effect when using arangojs in a browser.
-
- **Note**: Requests bound to a specific server (e.g. fetching query results)
- will never be retried automatically and ignore this setting.
-
-## database.acquireHostList
-
-`async database.acquireHostList(): this`
-
-Updates the URL list by requesting a list of all coordinators in the cluster
-and adding any endpoints not initially specified in the _url_ configuration.
-
-For long-running processes communicating with an ArangoDB cluster it is
-recommended to run this method repeatedly (e.g. once per hour) to make sure
-new coordinators are picked up correctly and can be used for fail-over or
-load balancing.
-
-**Note**: This method can not be used when the arangojs instance was created
-with `isAbsolute: true`.
-
-## database.useDatabase
-
-`database.useDatabase(databaseName): this`
-
-Updates the _Database_ instance and its connection string to use the given
-_databaseName_, then returns itself.
-
-**Note**: This method can not be used when the arangojs instance was created
-with `isAbsolute: true`.
-
-**Arguments**
-
-- **databaseName**: `string`
-
- The name of the database to use.
-
-**Examples**
-
-```js
-const db = new Database();
-db.useDatabase("test");
-// The database instance now uses the database "test".
-```
-
-## database.useBasicAuth
-
-`database.useBasicAuth([username, [password]]): this`
-
-Updates the _Database_ instance's `authorization` header to use Basic
-authentication with the given _username_ and _password_, then returns itself.
-
-**Arguments**
-
-- **username**: `string` (Default: `"root"`)
-
- The username to authenticate with.
-
-- **password**: `string` (Default: `""`)
-
- The password to authenticate with.
-
-**Examples**
-
-```js
-const db = new Database();
-db.useDatabase("test");
-db.useBasicAuth("admin", "hunter2");
-// The database instance now uses the database "test"
-// with the username "admin" and password "hunter2".
-```
-
-## database.useBearerAuth
-
-`database.useBearerAuth(token): this`
-
-Updates the _Database_ instance's `authorization` header to use Bearer
-authentication with the given authentication token, then returns itself.
-
-**Arguments**
-
-- **token**: `string`
-
- The token to authenticate with.
-
-**Examples**
-
-```js
-const db = new Database();
-db.useBearerAuth("keyboardcat");
-// The database instance now uses Bearer authentication.
-```
-
-## database.login
-
-`async database.login([username, [password]]): string`
-
-Validates the given database credentials and exchanges them for an
-authentication token, then uses the authentication token for future
-requests and returns it.
-
-**Arguments**
-
-- **username**: `string` (Default: `"root"`)
-
- The username to authenticate with.
-
-- **password**: `string` (Default: `""`)
-
- The password to authenticate with.
-
-**Examples**
-
-```js
-const db = new Database();
-db.useDatabase("test");
-await db.login("admin", "hunter2");
-// The database instance now uses the database "test"
-// with an authentication token for the "admin" user.
-```
-
-## database.version
-
-`async database.version(): Object`
-
-Fetches the ArangoDB version information for the active database from the server.
-
-**Examples**
-
-```js
-const db = new Database();
-const version = await db.version();
-// the version object contains the ArangoDB version information.
-```
-
-## database.close
-
-`database.close(): void`
-
-Closes all active connections of the database instance.
-Can be used to clean up idling connections during longer periods of inactivity.
-
-**Note**: This method currently has no effect in the browser version of arangojs.
-
-**Examples**
-
-```js
-const db = new Database();
-const sessions = db.collection("sessions");
-// Clean up expired sessions once per hour
-setInterval(async () => {
- await db.query(aql`
- FOR session IN ${sessions}
- FILTER session.expires < DATE_NOW()
- REMOVE session IN ${sessions}
- `);
- // Make sure to close the connections because they're no longer used
- db.close();
-}, 1000 * 60 * 60);
-```
diff --git a/Documentation/Books/Drivers/JS/Reference/Database/Transactions.md b/Documentation/Books/Drivers/JS/Reference/Database/Transactions.md
deleted file mode 100644
index 3fc5ecc19f4c..000000000000
--- a/Documentation/Books/Drivers/JS/Reference/Database/Transactions.md
+++ /dev/null
@@ -1,103 +0,0 @@
-
-# Transactions
-
-This function implements the
-[HTTP API for transactions](../../../..//HTTP/Transaction/index.html).
-
-## database.transaction
-
-`async database.transaction(collections, action, [params, [options]]): Object`
-
-Performs a server-side transaction and returns its return value.
-
-**Arguments**
-
-- **collections**: `Object`
-
- An object with the following properties:
-
- - **read**: `Array` (optional)
-
- An array of names (or a single name) of collections that will be read from
- during the transaction.
-
- - **write**: `Array` (optional)
-
- An array of names (or a single name) of collections that will be written to
- or read from during the transaction.
-
-- **action**: `string`
-
- A string evaluating to a JavaScript function to be executed on the server.
-
- {% hint 'warning ' %}
- This function will be executed on the server inside ArangoDB and can not use
- the arangojs driver or any variables other than those passed as _params_.
- For accessing the database from within ArangoDB, see the documentation for the
- [`@arangodb` module in ArangoDB](../../../..//Manual/Appendix/JavaScriptModules/ArangoDB.html).
- {% endhint %}
-
-- **params**: `Object` (optional)
-
- Available as variable `params` when the _action_ function is being executed on
- server. Check the example below.
-
-- **options**: `Object` (optional)
-
- An object with any of the following properties:
-
- - **lockTimeout**: `number` (optional)
-
- Determines how long the database will wait while attempting to gain locks on
- collections used by the transaction before timing out.
-
- - **waitForSync**: `boolean` (optional)
-
- Determines whether to force the transaction to write all data to disk before returning.
-
- - **maxTransactionSize**: `number` (optional)
-
- Determines the transaction size limit in bytes. Honored by the RocksDB storage engine only.
-
- - **intermediateCommitCount**: `number` (optional)
-
- Determines the maximum number of operations after which an intermediate commit is
- performed automatically. Honored by the RocksDB storage engine only.
-
- - **intermediateCommitSize**: `number` (optional)
-
- Determine the maximum total size of operations after which an intermediate commit is
- performed automatically. Honored by the RocksDB storage engine only.
-
-If _collections_ is an array or string, it will be treated as
-_collections.write_.
-
-Please note that while _action_ should be a string evaluating to a well-formed
-JavaScript function, it's not possible to pass in a JavaScript function directly
-because the function needs to be evaluated on the server and will be transmitted
-in plain text.
-
-For more information on transactions, see the
-[HTTP API documentation for transactions](../../../..//HTTP/Transaction/index.html).
-
-**Examples**
-
-```js
-const db = new Database();
-const action = String(function(params) {
- // This code will be executed inside ArangoDB!
- const db = require("@arangodb").db;
- return db
- ._query(
- aql`
- FOR user IN _users
- FILTER user.age > ${params.age}
- RETURN u.user
- `
- )
- .toArray();
-});
-
-const result = await db.transaction({ read: "_users" }, action, { age: 12 });
-// result contains the return value of the action
-```
diff --git a/Documentation/Books/Drivers/JS/Reference/Database/ViewAccess.md b/Documentation/Books/Drivers/JS/Reference/Database/ViewAccess.md
deleted file mode 100644
index 1d994262560a..000000000000
--- a/Documentation/Books/Drivers/JS/Reference/Database/ViewAccess.md
+++ /dev/null
@@ -1,71 +0,0 @@
-
-# Accessing views
-
-These functions implement the
-[HTTP API for accessing views](../../../..//HTTP/Views/Getting.html).
-
-## database.arangoSearchView
-
-`database.arangoSearchView(viewName): ArangoSearchView`
-
-{% hint 'info' %}
-This method is only available when targeting ArangoDB 3.4 or later,
-see [Compatibility](../../GettingStarted/README.md#compatibility).
-{% endhint %}
-
-Returns a _ArangoSearchView_ instance for the given view name.
-
-**Arguments**
-
-- **viewName**: `string`
-
- Name of the arangosearch view.
-
-**Examples**
-
-```js
-const db = new Database();
-const view = db.arangoSearchView("potatoes");
-```
-
-## database.listViews
-
-`async database.listViews(): Array`
-
-{% hint 'info' %}
-This method is only available when targeting ArangoDB 3.4 or later,
-see [Compatibility](../../GettingStarted/README.md#compatibility).
-{% endhint %}
-
-Fetches all views from the database and returns an array of view
-descriptions.
-
-**Examples**
-
-```js
-const db = new Database();
-
-const views = await db.listViews();
-// views is an array of view descriptions
-```
-
-## database.views
-
-`async database.views([excludeSystem]): Array`
-
-{% hint 'info' %}
-This method is only available when targeting ArangoDB 3.4 or later,
-see [Compatibility](../../GettingStarted/README.md#compatibility).
-{% endhint %}
-
-Fetches all views from the database and returns an array of
-_ArangoSearchView_ instances for the views.
-
-**Examples**
-
-```js
-const db = new Database();
-
-const views = await db.views();
-// views is an array of ArangoSearchView instances
-```
diff --git a/Documentation/Books/Drivers/JS/Reference/Graph/EdgeCollection.md b/Documentation/Books/Drivers/JS/Reference/Graph/EdgeCollection.md
deleted file mode 100644
index 7e9953c5a453..000000000000
--- a/Documentation/Books/Drivers/JS/Reference/Graph/EdgeCollection.md
+++ /dev/null
@@ -1,310 +0,0 @@
-
-# GraphEdgeCollection API
-
-The _GraphEdgeCollection API_ extends the
-[_Collection API_](../Collection/README.md) with the following methods.
-
-## graphEdgeCollection.remove
-
-`async graphEdgeCollection.remove(documentHandle): Object`
-
-Deletes the edge with the given _documentHandle_ from the collection.
-
-**Arguments**
-
-- **documentHandle**: `string`
-
- The handle of the edge to retrieve. This can be either the `_id` or the `_key`
- of an edge in the collection, or an edge (i.e. an object with an `_id` or
- `_key` property).
-
-**Examples**
-
-```js
-const graph = db.graph("some-graph");
-const collection = graph.edgeCollection("edges");
-
-await collection.remove("some-key");
-// document 'edges/some-key' no longer exists
-
-// -- or --
-
-await collection.remove("edges/some-key");
-// document 'edges/some-key' no longer exists
-```
-
-## graphEdgeCollection.documentExists
-
-`async graphEdgeCollection.documentExists(documentHandle): boolean`
-
-Checks whether the edge with the given _documentHandle_ exists.
-
-**Arguments**
-
-- **documentHandle**: `string`
-
- The handle of the edge to retrieve. This can be either the `_id` or the
- `_key` of a edge in the collection, or an edge (i.e. an object with an
- `_id` or `_key` property).
-
-**Examples**
-
-```js
-const graph = db.graph("some-graph");
-const collection = graph.edgeCollection("edges");
-
-const exists = await collection.documentExists("some-key");
-if (exists === false) {
- // the edge does not exist
-}
-```
-
-## graphEdgeCollection.document
-
-`async graphEdgeCollection.document(documentHandle, [opts]): Object`
-
-Alias: `graphEdgeCollection.edge`.
-
-Retrieves the edge with the given _documentHandle_ from the collection.
-
-**Arguments**
-
-- **documentHandle**: `string`
-
- The handle of the edge to retrieve. This can be either the `_id` or the `_key`
- of an edge in the collection, or an edge (i.e. an object with an `_id` or
- `_key` property).
-
-- **opts**: `Object` (optional)
-
- If _opts_ is set, it must be an object with any of the following properties:
-
- - **graceful**: `boolean` (Default: `false`)
-
- If set to `true`, the method will return `null` instead of throwing an
- error if the edge does not exist.
-
- - **allowDirtyRead**: `boolean` (Default: `false`)
-
- {% hint 'info' %}
- This option is only available when targeting ArangoDB 3.4 or later,
- see [Compatibility](../../GettingStarted/README.md#compatibility).
- {% endhint %}
-
- If set to `true`, the request will explicitly permit ArangoDB to return a
- potentially dirty or stale result and arangojs will load balance the
- request without distinguishing between leaders and followers.
-
-If a boolean is passed instead of an options object, it will be interpreted as
-the _graceful_ option.
-
-**Examples**
-
-```js
-const graph = db.graph("some-graph");
-const collection = graph.edgeCollection("edges");
-
-const edge = await collection.document("some-key");
-// the edge exists
-assert.equal(edge._key, "some-key");
-assert.equal(edge._id, "edges/some-key");
-
-// -- or --
-
-const edge = await collection.document("edges/some-key");
-// the edge exists
-assert.equal(edge._key, "some-key");
-assert.equal(edge._id, "edges/some-key");
-
-// -- or --
-
-const edge = await collection.document("some-key", true);
-if (edge === null) {
- // the edge does not exist
-}
-```
-
-## graphEdgeCollection.save
-
-`async graphEdgeCollection.save(data, [fromId, toId]): Object`
-
-Creates a new edge between the vertices _fromId_ and _toId_ with the given
-_data_.
-
-**Arguments**
-
-- **data**: `Object`
-
- The data of the new edge. If _fromId_ and _toId_ are not specified, the _data_
- needs to contain the properties **from\_ and **to\_.
-
-- **fromId**: `string` (optional)
-
- The handle of the start vertex of this edge. This can be either the `_id` of a
- document in the database, the `_key` of an edge in the collection, or a
- document (i.e. an object with an `_id` or `_key` property).
-
-- **toId**: `string` (optional)
-
- The handle of the end vertex of this edge. This can be either the `_id` of a
- document in the database, the `_key` of an edge in the collection, or a
- document (i.e. an object with an `_id` or `_key` property).
-
-**Examples**
-
-```js
-const db = new Database();
-const graph = db.graph("some-graph");
-const collection = graph.edgeCollection("edges");
-const edge = await collection.save(
- { some: "data" },
- "vertices/start-vertex",
- "vertices/end-vertex"
-);
-assert.equal(edge._id, "edges/" + edge._key);
-assert.equal(edge.some, "data");
-assert.equal(edge._from, "vertices/start-vertex");
-assert.equal(edge._to, "vertices/end-vertex");
-```
-
-## graphEdgeCollection.edges
-
-`async graphEdgeCollection.edges(documentHandle): Array`
-
-Retrieves a list of all edges of the document with the given _documentHandle_.
-
-**Arguments**
-
-- **documentHandle**: `string`
-
- The handle of the document to retrieve the edges of. This can be either the
- `_id` of a document in the database, the `_key` of an edge in the collection,
- or a document (i.e. an object with an `_id` or `_key` property).
-
-**Examples**
-
-```js
-const db = new Database();
-const graph = db.graph("some-graph");
-const collection = graph.edgeCollection("edges");
-await collection.import([
- ["_key", "_from", "_to"],
- ["x", "vertices/a", "vertices/b"],
- ["y", "vertices/a", "vertices/c"],
- ["z", "vertices/d", "vertices/a"]
-]);
-const edges = await collection.edges("vertices/a");
-assert.equal(edges.length, 3);
-assert.deepEqual(edges.map(edge => edge._key), ["x", "y", "z"]);
-```
-
-## graphEdgeCollection.inEdges
-
-`async graphEdgeCollection.inEdges(documentHandle): Array`
-
-Retrieves a list of all incoming edges of the document with the given
-_documentHandle_.
-
-**Arguments**
-
-- **documentHandle**: `string`
-
- The handle of the document to retrieve the edges of. This can be either the
- `_id` of a document in the database, the `_key` of an edge in the collection,
- or a document (i.e. an object with an `_id` or `_key` property).
-
-**Examples**
-
-```js
-const db = new Database();
-const graph = db.graph("some-graph");
-const collection = graph.edgeCollection("edges");
-await collection.import([
- ["_key", "_from", "_to"],
- ["x", "vertices/a", "vertices/b"],
- ["y", "vertices/a", "vertices/c"],
- ["z", "vertices/d", "vertices/a"]
-]);
-const edges = await collection.inEdges("vertices/a");
-assert.equal(edges.length, 1);
-assert.equal(edges[0]._key, "z");
-```
-
-## graphEdgeCollection.outEdges
-
-`async graphEdgeCollection.outEdges(documentHandle): Array`
-
-Retrieves a list of all outgoing edges of the document with the given
-_documentHandle_.
-
-**Arguments**
-
-- **documentHandle**: `string`
-
- The handle of the document to retrieve the edges of. This can be either the
- `_id` of a document in the database, the `_key` of an edge in the collection,
- or a document (i.e. an object with an `_id` or `_key` property).
-
-**Examples**
-
-```js
-const db = new Database();
-const graph = db.graph("some-graph");
-const collection = graph.edgeCollection("edges");
-await collection.import([
- ["_key", "_from", "_to"],
- ["x", "vertices/a", "vertices/b"],
- ["y", "vertices/a", "vertices/c"],
- ["z", "vertices/d", "vertices/a"]
-]);
-const edges = await collection.outEdges("vertices/a");
-assert.equal(edges.length, 2);
-assert.deepEqual(edges.map(edge => edge._key), ["x", "y"]);
-```
-
-## graphEdgeCollection.traversal
-
-`async graphEdgeCollection.traversal(startVertex, opts): Object`
-
-Performs a traversal starting from the given _startVertex_ and following edges
-contained in this edge collection.
-
-**Arguments**
-
-- **startVertex**: `string`
-
- The handle of the start vertex. This can be either the `_id` of a document in
- the database, the `_key` of an edge in the collection, or a document (i.e. an
- object with an `_id` or `_key` property).
-
-- **opts**: `Object`
-
- See
- [the HTTP API documentation](../../../..//HTTP/Traversal/index.html)
- for details on the additional arguments.
-
- Please note that while _opts.filter_, _opts.visitor_, _opts.init_,
- _opts.expander_ and _opts.sort_ should be strings evaluating to well-formed
- JavaScript code, it's not possible to pass in JavaScript functions directly
- because the code needs to be evaluated on the server and will be transmitted
- in plain text.
-
-**Examples**
-
-```js
-const db = new Database();
-const graph = db.graph("some-graph");
-const collection = graph.edgeCollection("edges");
-await collection.import([
- ["_key", "_from", "_to"],
- ["x", "vertices/a", "vertices/b"],
- ["y", "vertices/b", "vertices/c"],
- ["z", "vertices/c", "vertices/d"]
-]);
-const result = await collection.traversal("vertices/a", {
- direction: "outbound",
- visitor: "result.vertices.push(vertex._key);",
- init: "result.vertices = [];"
-});
-assert.deepEqual(result.vertices, ["a", "b", "c", "d"]);
-```
diff --git a/Documentation/Books/Drivers/JS/Reference/Graph/Edges.md b/Documentation/Books/Drivers/JS/Reference/Graph/Edges.md
deleted file mode 100644
index 5147a716bd4a..000000000000
--- a/Documentation/Books/Drivers/JS/Reference/Graph/Edges.md
+++ /dev/null
@@ -1,165 +0,0 @@
-
-# Manipulating edges
-
-## graph.edgeCollection
-
-`graph.edgeCollection(collectionName): GraphEdgeCollection`
-
-Returns a new [_GraphEdgeCollection_ instance](EdgeCollection.md) with
-the given name bound to this graph.
-
-**Arguments**
-
-* **collectionName**: `string`
-
- Name of the edge collection.
-
-**Examples**
-
-```js
-const db = new Database();
-// assuming the collections "edges" and "vertices" exist
-const graph = db.graph("some-graph");
-const collection = graph.edgeCollection("edges");
-assert.equal(collection.name, "edges");
-// collection is a GraphEdgeCollection
-```
-
-## graph.addEdgeDefinition
-
-`async graph.addEdgeDefinition(definition): Object`
-
-Adds the given edge definition _definition_ to the graph.
-
-**Arguments**
-
-* **definition**: `Object`
-
- For more information on edge definitions see
- [the HTTP API for managing graphs](../../../..//HTTP/Gharial/Management.html).
-
-**Examples**
-
-```js
-const db = new Database();
-// assuming the collections "edges" and "vertices" exist
-const graph = db.graph('some-graph');
-await graph.addEdgeDefinition({
- collection: 'edges',
- from: ['vertices'],
- to: ['vertices']
-});
-// the edge definition has been added to the graph
-```
-
-## graph.replaceEdgeDefinition
-
-`async graph.replaceEdgeDefinition(collectionName, definition): Object`
-
-Replaces the edge definition for the edge collection named _collectionName_ with
-the given _definition_.
-
-**Arguments**
-
-* **collectionName**: `string`
-
- Name of the edge collection to replace the definition of.
-
-* **definition**: `Object`
-
- For more information on edge definitions see
- [the HTTP API for managing graphs](../../../..//HTTP/Gharial/Management.html).
-
-**Examples**
-
-```js
-const db = new Database();
-// assuming the collections "edges", "vertices" and "more-vertices" exist
-const graph = db.graph('some-graph');
-await graph.replaceEdgeDefinition('edges', {
- collection: 'edges',
- from: ['vertices'],
- to: ['more-vertices']
-});
-// the edge definition has been modified
-```
-
-## graph.removeEdgeDefinition
-
-`async graph.removeEdgeDefinition(definitionName, [dropCollection]): Object`
-
-Removes the edge definition with the given _definitionName_ form the graph.
-
-**Arguments**
-
-* **definitionName**: `string`
-
- Name of the edge definition to remove from the graph.
-
-* **dropCollection**: `boolean` (optional)
-
- If set to `true`, the edge collection associated with the definition will also
- be deleted from the database.
-
-**Examples**
-
-```js
-const db = new Database();
-const graph = db.graph('some-graph');
-
-await graph.removeEdgeDefinition('edges')
-// the edge definition has been removed
-
-// -- or --
-
-await graph.removeEdgeDefinition('edges', true)
-// the edge definition has been removed
-// and the edge collection "edges" has been dropped
-// this may have been a bad idea
-```
-
-## graph.traversal
-
-`async graph.traversal(startVertex, opts): Object`
-
-Performs a traversal starting from the given _startVertex_ and following edges
-contained in any of the edge collections of this graph.
-
-**Arguments**
-
-* **startVertex**: `string`
-
- The handle of the start vertex. This can be either the `_id` of a document in
- the graph or a document (i.e. an object with an `_id` property).
-
-* **opts**: `Object`
-
- See
- [the HTTP API documentation](../../../..//HTTP/Traversal/index.html)
- for details on the additional arguments.
-
- Please note that while _opts.filter_, _opts.visitor_, _opts.init_,
- _opts.expander_ and _opts.sort_ should be strings evaluating to well-formed
- JavaScript functions, it's not possible to pass in JavaScript functions
- directly because the functions need to be evaluated on the server and will be
- transmitted in plain text.
-
-**Examples**
-
-```js
-const db = new Database();
-const graph = db.graph('some-graph');
-const collection = graph.edgeCollection('edges');
-await collection.import([
- ['_key', '_from', '_to'],
- ['x', 'vertices/a', 'vertices/b'],
- ['y', 'vertices/b', 'vertices/c'],
- ['z', 'vertices/c', 'vertices/d']
-])
-const result = await graph.traversal('vertices/a', {
- direction: 'outbound',
- visitor: 'result.vertices.push(vertex._key);',
- init: 'result.vertices = [];'
-});
-assert.deepEqual(result.vertices, ['a', 'b', 'c', 'd']);
-```
diff --git a/Documentation/Books/Drivers/JS/Reference/Graph/README.md b/Documentation/Books/Drivers/JS/Reference/Graph/README.md
deleted file mode 100644
index b5bbf962c3c8..000000000000
--- a/Documentation/Books/Drivers/JS/Reference/Graph/README.md
+++ /dev/null
@@ -1,86 +0,0 @@
-
-# Graph API
-
-These functions implement the
-[HTTP API for manipulating graphs](../../../..//HTTP/Gharial/index.html).
-
-## graph.exists
-
-`async graph.exists(): boolean`
-
-Checks whether the graph exists.
-
-**Examples**
-
-```js
-const db = new Database();
-const graph = db.graph('some-graph');
-const result = await graph.exists();
-// result indicates whether the graph exists
-```
-
-## graph.get
-
-`async graph.get(): Object`
-
-Retrieves general information about the graph.
-
-**Examples**
-
-```js
-const db = new Database();
-const graph = db.graph('some-graph');
-const data = await graph.get();
-// data contains general information about the graph
-```
-
-## graph.create
-
-`async graph.create(properties): Object`
-
-Creates a graph with the given _properties_ for this graph's name, then returns
-the server response.
-
-**Arguments**
-
-- **properties**: `Object`
-
- For more information on the _properties_ object, see
- [the HTTP API documentation for creating graphs](../../../..//HTTP/Gharial/Management.html).
-
-**Examples**
-
-```js
-const db = new Database();
-const graph = db.graph('some-graph');
-const info = await graph.create({
- edgeDefinitions: [{
- collection: 'edges',
- from: ['start-vertices'],
- to: ['end-vertices']
- }]
-});
-// graph now exists
-```
-
-## graph.drop
-
-`async graph.drop([dropCollections]): Object`
-
-Deletes the graph from the database.
-
-**Arguments**
-
-- **dropCollections**: `boolean` (optional)
-
- If set to `true`, the collections associated with the graph will also be
- deleted.
-
-**Examples**
-
-```js
-const db = new Database();
-const graph = db.graph('some-graph');
-await graph.drop();
-// the graph "some-graph" no longer exists
-```
diff --git a/Documentation/Books/Drivers/JS/Reference/Graph/VertexCollection.md b/Documentation/Books/Drivers/JS/Reference/Graph/VertexCollection.md
deleted file mode 100644
index fd1e38ec7b9e..000000000000
--- a/Documentation/Books/Drivers/JS/Reference/Graph/VertexCollection.md
+++ /dev/null
@@ -1,148 +0,0 @@
-
-# GraphVertexCollection API
-
-The _GraphVertexCollection API_ extends the
-[_Collection API_](../Collection/README.md) with the following methods.
-
-## graphVertexCollection.remove
-
-`async graphVertexCollection.remove(documentHandle): Object`
-
-Deletes the vertex with the given _documentHandle_ from the collection.
-
-**Arguments**
-
-- **documentHandle**: `string`
-
- The handle of the vertex to retrieve. This can be either the `_id` or the
- `_key` of a vertex in the collection, or a vertex (i.e. an object with an
- `_id` or `_key` property).
-
-**Examples**
-
-```js
-const graph = db.graph("some-graph");
-const collection = graph.vertexCollection("vertices");
-
-await collection.remove("some-key");
-// document 'vertices/some-key' no longer exists
-
-// -- or --
-
-await collection.remove("vertices/some-key");
-// document 'vertices/some-key' no longer exists
-```
-
-## graphVertexCollection.documentExists
-
-`async graphVertexCollection.documentExists(documentHandle): boolean`
-
-Checks whether the vertex with the given _documentHandle_ exists.
-
-**Arguments**
-
-- **documentHandle**: `string`
-
- The handle of the vertex to retrieve. This can be either the `_id` or the
- `_key` of a vertex in the collection, or a vertex (i.e. an object with an
- `_id` or `_key` property).
-
-**Examples**
-
-```js
-const graph = db.graph("some-graph");
-const collection = graph.vertexCollection("vertices");
-
-const exists = await collection.documentExists("some-key");
-if (exists === false) {
- // the vertex does not exist
-}
-```
-
-## graphVertexCollection.document
-
-`async graphVertexCollection.document(documentHandle, [graceful]): Object`
-
-Alias: `graphVertexCollection.vertex`.
-
-Retrieves the vertex with the given _documentHandle_ from the collection.
-
-**Arguments**
-
-- **documentHandle**: `string`
-
- The handle of the vertex to retrieve. This can be either the `_id` or the
- `_key` of a vertex in the collection, or a vertex (i.e. an object with an
- `_id` or `_key` property).
-
-- **opts**: `Object` (optional)
-
- If _opts_ is set, it must be an object with any of the following properties:
-
- - **graceful**: `boolean` (Default: `false`)
-
- If set to `true`, the method will return `null` instead of throwing an
- error if the vertex does not exist.
-
- - **allowDirtyRead**: `boolean` (Default: `false`)
-
- {% hint 'info' %}
- This option is only available when targeting ArangoDB 3.4 or later,
- see [Compatibility](../../GettingStarted/README.md#compatibility).
- {% endhint %}
-
- If set to `true`, the request will explicitly permit ArangoDB to return a
- potentially dirty or stale result and arangojs will load balance the
- request without distinguishing between leaders and followers.
-
-If a boolean is passed instead of an options object, it will be interpreted as
-the _graceful_ option.
-
-**Examples**
-
-```js
-const graph = db.graph("some-graph");
-const collection = graph.vertexCollection("vertices");
-
-const doc = await collection.document("some-key");
-// the vertex exists
-assert.equal(doc._key, "some-key");
-assert.equal(doc._id, "vertices/some-key");
-
-// -- or --
-
-const doc = await collection.document("vertices/some-key");
-// the vertex exists
-assert.equal(doc._key, "some-key");
-assert.equal(doc._id, "vertices/some-key");
-
-// -- or --
-
-const doc = await collection.vertex("some-key", true);
-if (doc === null) {
- // the vertex does not exist
-}
-```
-
-## graphVertexCollection.save
-
-`async graphVertexCollection.save(data): Object`
-
-Creates a new vertex with the given _data_.
-
-**Arguments**
-
-- **data**: `Object`
-
- The data of the vertex.
-
-**Examples**
-
-```js
-const db = new Database();
-const graph = db.graph("some-graph");
-const collection = graph.vertexCollection("vertices");
-const doc = await collection.save({ some: "data" });
-assert.equal(doc._id, "vertices/" + doc._key);
-assert.equal(doc.some, "data");
-```
diff --git a/Documentation/Books/Drivers/JS/Reference/Graph/Vertices.md b/Documentation/Books/Drivers/JS/Reference/Graph/Vertices.md
deleted file mode 100644
index 69d39ee64959..000000000000
--- a/Documentation/Books/Drivers/JS/Reference/Graph/Vertices.md
+++ /dev/null
@@ -1,135 +0,0 @@
-
-# Manipulating vertices
-
-## graph.vertexCollection
-
-`graph.vertexCollection(collectionName): GraphVertexCollection`
-
-Returns a new [_GraphVertexCollection_ instance](VertexCollection.md)
-with the given name for this graph.
-
-**Arguments**
-
-* **collectionName**: `string`
-
- Name of the vertex collection.
-
-**Examples**
-
-```js
-const db = new Database();
-const graph = db.graph("some-graph");
-const collection = graph.vertexCollection("vertices");
-assert.equal(collection.name, "vertices");
-// collection is a GraphVertexCollection
-```
-
-## graph.listVertexCollections
-
-`async graph.listVertexCollections([excludeOrphans]): Array`
-
-Fetches all vertex collections from the graph and returns an array of collection descriptions.
-
-**Arguments**
-
-* **excludeOrphans**: `boolean` (Default: `false`)
-
- Whether orphan collections should be excluded.
-
-**Examples**
-
-```js
-const graph = db.graph('some-graph');
-
-const collections = await graph.listVertexCollections();
-// collections is an array of collection descriptions
-// including orphan collections
-
-// -- or --
-
-const collections = await graph.listVertexCollections(true);
-// collections is an array of collection descriptions
-// not including orphan collections
-```
-
-## graph.vertexCollections
-
-`async graph.vertexCollections([excludeOrphans]): Array`
-
-Fetches all vertex collections from the database and returns an array of _GraphVertexCollection_ instances for the collections.
-
-**Arguments**
-
-* **excludeOrphans**: `boolean` (Default: `false`)
-
- Whether orphan collections should be excluded.
-
-**Examples**
-
-```js
-const graph = db.graph('some-graph');
-
-const collections = await graph.vertexCollections()
-// collections is an array of GraphVertexCollection
-// instances including orphan collections
-
-// -- or --
-
-const collections = await graph.vertexCollections(true)
-// collections is an array of GraphVertexCollection
-// instances not including orphan collections
-```
-
-## graph.addVertexCollection
-
-`async graph.addVertexCollection(collectionName): Object`
-
-Adds the collection with the given _collectionName_ to the graph's vertex
-collections.
-
-**Arguments**
-
-* **collectionName**: `string`
-
- Name of the vertex collection to add to the graph.
-
-**Examples**
-
-```js
-const db = new Database();
-const graph = db.graph('some-graph');
-await graph.addVertexCollection('vertices');
-// the collection "vertices" has been added to the graph
-```
-
-## graph.removeVertexCollection
-
-`async graph.removeVertexCollection(collectionName, [dropCollection]): Object`
-
-Removes the vertex collection with the given _collectionName_ from the graph.
-
-**Arguments**
-
-* **collectionName**: `string`
-
- Name of the vertex collection to remove from the graph.
-
-* **dropCollection**: `boolean` (optional)
-
- If set to `true`, the collection will also be deleted from the database.
-
-**Examples**
-
-```js
-const db = new Database();
-const graph = db.graph('some-graph');
-await graph.removeVertexCollection('vertices')
-// collection "vertices" has been removed from the graph
-
-// -- or --
-
-await graph.removeVertexCollection('vertices', true)
-// collection "vertices" has been removed from the graph
-// the collection has also been dropped from the database
-// this may have been a bad idea
-```
diff --git a/Documentation/Books/Drivers/JS/Reference/README.md b/Documentation/Books/Drivers/JS/Reference/README.md
deleted file mode 100644
index f64a9048654b..000000000000
--- a/Documentation/Books/Drivers/JS/Reference/README.md
+++ /dev/null
@@ -1,30 +0,0 @@
-
-# ArangoDB JavaScript Driver - Reference
-
-- [Database](Database/README.md)
- - [Database Manipulation](Database/DatabaseManipulation.md)
- - [Collection Access](Database/CollectionAccess.md)
- - [View Access](Database/ViewAccess.md)
- - [Queries](Database/Queries.md)
- - [AQL User Functions](Database/AqlUserFunctions.md)
- - [Transactions](Database/Transactions.md)
- - [Graph Access](Database/GraphAccess.md)
- - [Foxx Services](Database/FoxxServices.md)
- - [HTTP Routes](Database/HttpRoutes.md)
-- [Collection](Collection/README.md)
- - [Collection Manipulation](Collection/CollectionManipulation.md)
- - [Document Manipulation](Collection/DocumentManipulation.md)
- - [DocumentCollection](Collection/DocumentCollection.md)
- - [EdgeCollection](Collection/EdgeCollection.md)
- - [Indexes](Collection/Indexes.md)
- - [Simple Queries](Collection/SimpleQueries.md)
- - [Bulk Import](Collection/BulkImport.md)
-- [AQL Helpers](Aql.md)
-- [View Manipulation](ViewManipulation.md)
-- [Cursor](Cursor.md)
-- [Graph](Graph/README.md)
- - [Vertices](Graph/Vertices.md)
- - [Edges](Graph/Edges.md)
- - [VertexCollection](Graph/VertexCollection.md)
- - [EdgeCollection](Graph/EdgeCollection.md)
-- [Route](Route.md)
diff --git a/Documentation/Books/Drivers/JS/Reference/Route.md b/Documentation/Books/Drivers/JS/Reference/Route.md
deleted file mode 100644
index d05f5802d0c2..000000000000
--- a/Documentation/Books/Drivers/JS/Reference/Route.md
+++ /dev/null
@@ -1,386 +0,0 @@
-
-# Route API
-
-_Route_ instances provide access for arbitrary HTTP requests. This allows easy
-access to Foxx services and other HTTP APIs not covered by the driver itself.
-
-## route.route
-
-`route.route([path], [headers]): Route`
-
-Returns a new _Route_ instance for the given path (relative to the current
-route) that can be used to perform arbitrary HTTP requests.
-
-**Arguments**
-
-- **path**: `string` (optional)
-
- The relative URL of the route.
-
-- **headers**: `Object` (optional)
-
- Default headers that should be sent with each request to the route.
-
-If _path_ is missing, the route will refer to the base URL of the database.
-
-**Examples**
-
-```js
-const db = new Database();
-const route = db.route("my-foxx-service");
-const users = route.route("users");
-// equivalent to db.route('my-foxx-service/users')
-```
-
-## route.get
-
-`async route.get([path,] [qs]): Response`
-
-Performs a GET request to the given URL and returns the server response.
-
-**Arguments**
-
-- **path**: `string` (optional)
-
- The route-relative URL for the request. If omitted, the request will be made
- to the base URL of the route.
-
-- **qs**: `string` (optional)
-
- The query string for the request. If _qs_ is an object, it will be translated
- to a query string.
-
-**Examples**
-
-```js
-const db = new Database();
-const route = db.route("my-foxx-service");
-const response = await route.get();
-// response.body is the response body of calling
-// GET _db/_system/my-foxx-service
-
-// -- or --
-
-const response = await route.get("users");
-// response.body is the response body of calling
-// GET _db/_system/my-foxx-service/users
-
-// -- or --
-
-const response = await route.get("users", { group: "admin" });
-// response.body is the response body of calling
-// GET _db/_system/my-foxx-service/users?group=admin
-```
-
-## route.post
-
-`async route.post([path,] [body, [qs]]): Response`
-
-Performs a POST request to the given URL and returns the server response.
-
-**Arguments**
-
-- **path**: `string` (optional)
-
- The route-relative URL for the request. If omitted, the request will be made
- to the base URL of the route.
-
-- **body**: `string` (optional)
-
- The response body. If _body_ is an object, it will be encoded as JSON.
-
-- **qs**: `string` (optional)
-
- The query string for the request. If _qs_ is an object, it will be translated
- to a query string.
-
-**Examples**
-
-```js
-const db = new Database();
-const route = db.route("my-foxx-service");
-const response = await route.post();
-// response.body is the response body of calling
-// POST _db/_system/my-foxx-service
-
-// -- or --
-
-const response = await route.post("users");
-// response.body is the response body of calling
-// POST _db/_system/my-foxx-service/users
-
-// -- or --
-
-const response = await route.post("users", {
- username: "admin",
- password: "hunter2"
-});
-// response.body is the response body of calling
-// POST _db/_system/my-foxx-service/users
-// with JSON request body {"username": "admin", "password": "hunter2"}
-
-// -- or --
-
-const response = await route.post(
- "users",
- {
- username: "admin",
- password: "hunter2"
- },
- { admin: true }
-);
-// response.body is the response body of calling
-// POST _db/_system/my-foxx-service/users?admin=true
-// with JSON request body {"username": "admin", "password": "hunter2"}
-```
-
-## route.put
-
-`async route.put([path,] [body, [qs]]): Response`
-
-Performs a PUT request to the given URL and returns the server response.
-
-**Arguments**
-
-- **path**: `string` (optional)
-
- The route-relative URL for the request. If omitted, the request will be made
- to the base URL of the route.
-
-- **body**: `string` (optional)
-
- The response body. If _body_ is an object, it will be encoded as JSON.
-
-- **qs**: `string` (optional)
-
- The query string for the request. If _qs_ is an object, it will be translated
- to a query string.
-
-**Examples**
-
-```js
-const db = new Database();
-const route = db.route("my-foxx-service");
-const response = await route.put();
-// response.body is the response body of calling
-// PUT _db/_system/my-foxx-service
-
-// -- or --
-
-const response = await route.put("users/admin");
-// response.body is the response body of calling
-// PUT _db/_system/my-foxx-service/users
-
-// -- or --
-
-const response = await route.put("users/admin", {
- username: "admin",
- password: "hunter2"
-});
-// response.body is the response body of calling
-// PUT _db/_system/my-foxx-service/users/admin
-// with JSON request body {"username": "admin", "password": "hunter2"}
-
-// -- or --
-
-const response = await route.put(
- "users/admin",
- {
- username: "admin",
- password: "hunter2"
- },
- { admin: true }
-);
-// response.body is the response body of calling
-// PUT _db/_system/my-foxx-service/users/admin?admin=true
-// with JSON request body {"username": "admin", "password": "hunter2"}
-```
-
-## route.patch
-
-`async route.patch([path,] [body, [qs]]): Response`
-
-Performs a PATCH request to the given URL and returns the server response.
-
-**Arguments**
-
-- **path**: `string` (optional)
-
- The route-relative URL for the request. If omitted, the request will be made
- to the base URL of the route.
-
-- **body**: `string` (optional)
-
- The response body. If _body_ is an object, it will be encoded as JSON.
-
-- **qs**: `string` (optional)
-
- The query string for the request. If _qs_ is an object, it will be translated
- to a query string.
-
-**Examples**
-
-```js
-const db = new Database();
-const route = db.route("my-foxx-service");
-const response = await route.patch();
-// response.body is the response body of calling
-// PATCH _db/_system/my-foxx-service
-
-// -- or --
-
-const response = await route.patch("users/admin");
-// response.body is the response body of calling
-// PATCH _db/_system/my-foxx-service/users
-
-// -- or --
-
-const response = await route.patch("users/admin", {
- password: "hunter2"
-});
-// response.body is the response body of calling
-// PATCH _db/_system/my-foxx-service/users/admin
-// with JSON request body {"password": "hunter2"}
-
-// -- or --
-
-const response = await route.patch(
- "users/admin",
- {
- password: "hunter2"
- },
- { admin: true }
-);
-// response.body is the response body of calling
-// PATCH _db/_system/my-foxx-service/users/admin?admin=true
-// with JSON request body {"password": "hunter2"}
-```
-
-## route.delete
-
-`async route.delete([path,] [qs]): Response`
-
-Performs a DELETE request to the given URL and returns the server response.
-
-**Arguments**
-
-- **path**: `string` (optional)
-
- The route-relative URL for the request. If omitted, the request will be made
- to the base URL of the route.
-
-- **qs**: `string` (optional)
-
- The query string for the request. If _qs_ is an object, it will be translated
- to a query string.
-
-**Examples**
-
-```js
-const db = new Database();
-const route = db.route("my-foxx-service");
-const response = await route.delete();
-// response.body is the response body of calling
-// DELETE _db/_system/my-foxx-service
-
-// -- or --
-
-const response = await route.delete("users/admin");
-// response.body is the response body of calling
-// DELETE _db/_system/my-foxx-service/users/admin
-
-// -- or --
-
-const response = await route.delete("users/admin", { permanent: true });
-// response.body is the response body of calling
-// DELETE _db/_system/my-foxx-service/users/admin?permanent=true
-```
-
-## route.head
-
-`async route.head([path,] [qs]): Response`
-
-Performs a HEAD request to the given URL and returns the server response.
-
-**Arguments**
-
-- **path**: `string` (optional)
-
- The route-relative URL for the request. If omitted, the request will be made
- to the base URL of the route.
-
-- **qs**: `string` (optional)
-
- The query string for the request. If _qs_ is an object, it will be translated
- to a query string.
-
-**Examples**
-
-```js
-const db = new Database();
-const route = db.route("my-foxx-service");
-const response = await route.head();
-// response is the response object for
-// HEAD _db/_system/my-foxx-service
-```
-
-## route.request
-
-`async route.request([opts]): Response`
-
-Performs an arbitrary request to the given URL and returns the server response.
-
-**Arguments**
-
-- **opts**: `Object` (optional)
-
- An object with any of the following properties:
-
- - **path**: `string` (optional)
-
- The route-relative URL for the request. If omitted, the request will be made
- to the base URL of the route.
-
- - **absolutePath**: `boolean` (Default: `false`)
-
- Whether the _path_ is relative to the connection's base URL instead of the
- route.
-
- - **body**: `string` (optional)
-
- The response body. If _body_ is an object, it will be encoded as JSON.
-
- - **qs**: `string` (optional)
-
- The query string for the request. If _qs_ is an object, it will be
- translated to a query string.
-
- - **headers**: `Object` (optional)
-
- An object containing additional HTTP headers to be sent with the request.
-
- - **method**: `string` (Default: `"GET"`)
-
- HTTP method of this request.
-
- - **timeout**: `number` (optional)
-
- A non-negative amount of milliseconds after which the request will be
- aborted. Note that ArangoDB may continue processing the request even
- after it has timed out.
-
-**Examples**
-
-```js
-const db = new Database();
-const route = db.route("my-foxx-service");
-const response = await route.request({
- path: "hello-world",
- method: "POST",
- body: { hello: "world" },
- qs: { admin: true }
-});
-// response.body is the response body of calling
-// POST _db/_system/my-foxx-service/hello-world?admin=true
-// with JSON request body '{"hello": "world"}'
-```
diff --git a/Documentation/Books/Drivers/JS/Reference/ViewManipulation.md b/Documentation/Books/Drivers/JS/Reference/ViewManipulation.md
deleted file mode 100644
index c7cbad4b634f..000000000000
--- a/Documentation/Books/Drivers/JS/Reference/ViewManipulation.md
+++ /dev/null
@@ -1,190 +0,0 @@
-
-# View API
-
-These functions implement the
-[HTTP API for manipulating views](../../..//HTTP/Views/index.html).
-
-## view.exists
-
-`async view.exists(): boolean`
-
-{% hint 'info' %}
-This method is only available when targeting ArangoDB 3.4 or later,
-see [Compatibility](../GettingStarted/README.md#compatibility).
-{% endhint %}
-
-Checks whether the view exists.
-
-**Examples**
-
-```js
-const db = new Database();
-const view = db.arangoSearchView("some-view");
-const result = await view.exists();
-// result indicates whether the view exists
-```
-
-### view.get
-
-`async view.get(): Object`
-
-{% hint 'info' %}
-This method is only available when targeting ArangoDB 3.4 or later,
-see [Compatibility](../GettingStarted/README.md#compatibility).
-{% endhint %}
-
-Retrieves general information about the view.
-
-**Examples**
-
-```js
-const db = new Database();
-const view = db.arangoSearchView("some-view");
-const data = await view.get();
-// data contains general information about the view
-```
-
-### view.properties
-
-`async view.properties(): Object`
-
-{% hint 'info' %}
-This method is only available when targeting ArangoDB 3.4 or later,
-see [Compatibility](../GettingStarted/README.md#compatibility).
-{% endhint %}
-
-Retrieves the view's properties.
-
-**Examples**
-
-```js
-const db = new Database();
-const view = db.arangoSearchView("some-view");
-const data = await view.properties();
-// data contains the view's properties
-```
-
-## view.create
-
-`async view.create([properties]): Object`
-
-{% hint 'info' %}
-This method is only available when targeting ArangoDB 3.4 or later,
-see [Compatibility](../GettingStarted/README.md#compatibility).
-{% endhint %}
-
-Creates a view with the given _properties_ for this view's name,
-then returns the server response.
-
-**Arguments**
-
-- **properties**: `Object` (optional)
-
- For more information on the _properties_ object, see the
- [HTTP API documentation for creating views](../../..//HTTP/Views/ArangoSearch.html).
-
-**Examples**
-
-```js
-const db = new Database();
-const view = db.arangoSearchView("potatoes");
-await view.create();
-// the arangosearch view "potatoes" now exists
-```
-
-## view.setProperties
-
-`async view.setProperties(properties): Object`
-
-{% hint 'info' %}
-This method is only available when targeting ArangoDB 3.4 or later,
-see [Compatibility](../GettingStarted/README.md#compatibility).
-{% endhint %}
-
-Updates the properties of the view.
-
-**Arguments**
-
-- **properties**: `Object`
-
- For information on the _properties_ argument see the
- [HTTP API for modifying views](../../..//HTTP/Views/ArangoSearch.html).
-
-**Examples**
-
-```js
-const db = new Database();
-const view = db.arangoSearchView("some-view");
-const result = await view.setProperties({ consolidationIntervalMsec: 123 });
-assert.equal(result.consolidationIntervalMsec, 123);
-```
-
-## view.replaceProperties
-
-`async view.replaceProperties(properties): Object`
-
-{% hint 'info' %}
-This method is only available when targeting ArangoDB 3.4 or later,
-see [Compatibility](../GettingStarted/README.md#compatibility).
-{% endhint %}
-
-Replaces the properties of the view.
-
-**Arguments**
-
-- **properties**: `Object`
-
- For information on the _properties_ argument see the
- [HTTP API for modifying views](../../..//HTTP/Views/ArangoSearch.html).
-
-**Examples**
-
-```js
-const db = new Database();
-const view = db.arangoSearchView("some-view");
-const result = await view.replaceProperties({ consolidationIntervalMsec: 234 });
-assert.equal(result.consolidationIntervalMsec, 234);
-```
-
-## view.rename
-
-`async view.rename(name): Object`
-
-{% hint 'info' %}
-This method is only available when targeting ArangoDB 3.4 or later,
-see [Compatibility](../GettingStarted/README.md#compatibility).
-{% endhint %}
-
-Renames the view. The _View_ instance will automatically update its
-name when the rename succeeds.
-
-**Examples**
-
-```js
-const db = new Database();
-const view = db.arangoSearchView("some-view");
-const result = await view.rename("new-view-name");
-assert.equal(result.name, "new-view-name");
-assert.equal(view.name, result.name);
-// result contains additional information about the view
-```
-
-## view.drop
-
-`async view.drop(): Object`
-
-{% hint 'info' %}
-This method is only available when targeting ArangoDB 3.4 or later,
-see [Compatibility](../GettingStarted/README.md#compatibility).
-{% endhint %}
-
-Deletes the view from the database.
-
-**Examples**
-
-```js
-const db = new Database();
-const view = db.arangoSearchView("some-view");
-await view.drop();
-// the view "some-view" no longer exists
-```
diff --git a/Documentation/Books/Drivers/Java/GettingStarted/README.md b/Documentation/Books/Drivers/Java/GettingStarted/README.md
deleted file mode 100644
index b41c098d0a3b..000000000000
--- a/Documentation/Books/Drivers/Java/GettingStarted/README.md
+++ /dev/null
@@ -1,50 +0,0 @@
-
-# ArangoDB Java Driver - Getting Started
-
-## Supported versions
-
-arangodb-java-driver | ArangoDB | network protocol | Java version
----------------------|--------------|--------------------|-------------
-5.x.x+ | 3.0.0+ | VelocyStream, HTTP | 1.6+
-4.2.x+ | 3.0.0+ | VelocyStream, HTTP | 1.6+
-4.1.x | 3.1.0+ | VelocyStream | 1.6+
-3.1.x | 3.1.0+ | HTTP | 1.6+
-3.0.x | 3.0.x | HTTP | 1.6+
-2.7.4 | 2.7.x, 2.8.x | HTTP | 1.6+
-
-**Note**: VelocyStream is only supported in ArangoDB 3.1 and above.
-
-## Maven
-
-To add the driver to your project with maven, add the following code to your pom.xml
-(please use a driver with a version number compatible to your ArangoDB server's version):
-
-ArangoDB 3.x.x
-
-```XML
-
-
- com.arangodb
- arangodb-java-driver
- 5.0.0
-
-
-```
-
-If you want to test with a snapshot version (e.g. 4.6.0-SNAPSHOT),
-add the staging repository of oss.sonatype.org to your pom.xml:
-
-```XML
-
-
- arangodb-snapshots
- https://oss.sonatype.org/content/groups/staging
-
-
-```
-
-## Compile the Java Driver
-
-```
-mvn clean install -DskipTests=true -Dgpg.skip=true -Dmaven.javadoc.skip=true -B
-```
diff --git a/Documentation/Books/Drivers/Java/README.md b/Documentation/Books/Drivers/Java/README.md
deleted file mode 100644
index ee7da9e33a28..000000000000
--- a/Documentation/Books/Drivers/Java/README.md
+++ /dev/null
@@ -1,15 +0,0 @@
-
-# ArangoDB Java Driver
-
-The official ArangoDB Java Driver.
-
-- [Getting Started](GettingStarted/README.md)
-- [Reference](Reference/README.md)
-
-## See Also
-
-- [ChangeLog](https://raw.githubusercontent.com/arangodb/arangodb-java-driver/master/ChangeLog.md)
-- [Examples](https://github.com/arangodb/arangodb-java-driver/tree/master/src/test/java/com/arangodb/example)
-- [Tutorial](https://www.arangodb.com/tutorials/tutorial-sync-java-driver/)
-- [JavaDoc](http://arangodb.github.io/arangodb-java-driver/javadoc-4_3/index.html)
-- [JavaDoc VelocyPack](http://arangodb.github.io/java-velocypack/javadoc-1_0/index.html)
diff --git a/Documentation/Books/Drivers/Java/Reference/Collection/BulkImport.md b/Documentation/Books/Drivers/Java/Reference/Collection/BulkImport.md
deleted file mode 100644
index 31ee748f18ac..000000000000
--- a/Documentation/Books/Drivers/Java/Reference/Collection/BulkImport.md
+++ /dev/null
@@ -1,92 +0,0 @@
-
-# Bulk importing documents
-
-This function implements the
-[HTTP API for bulk imports](../../../..//HTTP/BulkImports/index.html).
-
-## ArangoCollection.importDocuments
-
-`ArangoCollection.importDocuments(Collection> values, DocumentImportOptions options) : DocumentImportEntity`
-
-`ArangoCollection.importDocuments(String values, DocumentImportOptions options) : DocumentImportEntity`
-
-Bulk imports the given values into the collection.
-
-**Arguments**
-
-- **values**: `Collection>` or `String`
-
- - `Collection>`: A list of Objects that will be stored as documents
-
- - `String`: JSON-encoded array of objects that will be stored as documents
-
-- **options**: `DocumentImportOptions`
-
- - **fromPrefix**: `String`
-
- An optional prefix for the values in \_from attributes. If specified,
- the value is automatically prepended to each \_from input value.
- This allows specifying just the keys for \_from.
-
- - **toPrefix**: `String`
-
- An optional prefix for the values in \_to attributes. If specified,
- the value is automatically prepended to each \_to input value.
- This allows specifying just the keys for \_to.
-
- - **overwrite**: `Boolean`
-
- If this parameter has a value of true, then all data in the collection
- will be removed prior to the import. Note that any existing index definitions
- will be preserved.
-
- - **waitForSync**: `Boolean`
-
- Wait until documents have been synced to disk before returning.
-
- - **onDuplicate**: `OnDuplicate`
-
- Controls what action is carried out in case of a unique key constraint violation.
- Possible values are:
-
- - **error**: this will not import the current document because of the
- unique key constraint violation. This is the default setting.
-
- - **update**: this will update an existing document in the database with
- the data specified in the request. Attributes of the existing document
- that are not present in the request will be preserved.
-
- - **replace**: this will replace an existing document in the database with
- the data specified in the request.
-
- - **ignore**: this will not update an existing document and simply ignore
- the error caused by the unique key constraint violation. Note that update,
- replace and ignore will only work when the import document in the request
- contains the \_key attribute. update and replace may also fail because of
- secondary unique key constraint violations.
-
- - **complete**: `Boolean`
-
- If set to true, it will make the whole import fail if any error occurs.
- Otherwise the import will continue even if some documents cannot be imported.
-
- - **details**: `Boolean`
-
- If set to true, the result will include an attribute details with details
- about documents that could not be imported.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoCollection collection = db.collection("some-collection");
-
-BaseDocument doc1 = new BaseDocument();
-BaseDocument doc2 = new BaseDocument();
-BaseDocument doc3 = new BaseDocument();
-collection.importDocuments(
- Arrays.asList(doc1, doc2, doc3),
- new DocumentImportOptions()
-);
-```
diff --git a/Documentation/Books/Drivers/Java/Reference/Collection/CollectionManipulation.md b/Documentation/Books/Drivers/Java/Reference/Collection/CollectionManipulation.md
deleted file mode 100644
index 60a22266aab0..000000000000
--- a/Documentation/Books/Drivers/Java/Reference/Collection/CollectionManipulation.md
+++ /dev/null
@@ -1,356 +0,0 @@
-
-# Manipulating the collection
-
-These functions implement
-[the HTTP API for modifying collections](../../../..//HTTP/Collection/Modifying.html).
-
-## ArangoDatabase.createCollection
-
-`ArangoDatabase.createCollection(String name, CollectionCreateOptions options) : CollectionEntity`
-
-Creates a collection with the given _options_ for this collection's name,
-then returns collection information from the server.
-
-**Arguments**
-
-- **name**: `String`
-
- The name of the collection
-
-- **options**: `CollectionCreateOptions`
-
- - **journalSize**: `Long`
-
- The maximal size of a journal or datafile in bytes.
- The value must be at least 1048576 (1 MiB).
-
- - **replicationFactor**: `Integer`
-
- (The default is 1): in a cluster, this attribute determines how many copies
- of each shard are kept on different DBServers. The value 1 means that only
- one copy (no synchronous replication) is kept. A value of k means that
- k-1 replicas are kept. Any two copies reside on different DBServers.
- Replication between them is synchronous, that is, every write operation to
- the "leader" copy will be replicated to all "follower" replicas, before the
- write operation is reported successful. If a server fails, this is detected
- automatically and one of the servers holding copies take over, usually
- without an error being reported.
-
- - **satellite**: `Boolean`
-
- If the true the collection is created as a satellite collection.
- In this case the _replicationFactor_ is ignored.
-
- - **waitForSync**: `Boolean`
-
- If true then the data is synchronized to disk before returning from a
- document create, update, replace or removal operation. (default: false)
-
- - **doCompact**: `Boolean`
-
- Whether or not the collection will be compacted (default is true)
-
- - **isVolatile**: `Boolean`
-
- If true then the collection data is kept in-memory only and not made persistent.
- Unloading the collection will cause the collection data to be discarded.
- Stopping or re-starting the server will also cause full loss of data in
- the collection. Setting this option will make the resulting collection be
- slightly faster than regular collections because ArangoDB does not enforce
- any synchronization to disk and does not calculate any CRC checksums for
- datafiles (as there are no datafiles). This option should therefore be used
- for cache-type collections only, and not for data that cannot be re-created
- otherwise. (The default is false)
-
- - **shardKeys**: `String...`
-
- (The default is [ "_key" ]): in a cluster, this attribute determines which
- document attributes are used to determine the target shard for documents.
- Documents are sent to shards based on the values of their shard key attributes.
- The values of all shard key attributes in a document are hashed, and the
- hash value is used to determine the target shard. Note: Values of shard key
- attributes cannot be changed once set. This option is meaningless in a
- single server setup.
-
- - **numberOfShards**: `Integer`
-
- (The default is 1): in a cluster, this value determines the number of shards
- to create for the collection. In a single server setup, this option is meaningless.
-
- - **isSystem**: `Boolean`
-
- If true, create a system collection. In this case collection-name should
- start with an underscore. End users should normally create non-system
- collections only. API implementors may be required to create system collections
- in very special occasions, but normally a regular collection will do.
- (The default is false)
-
- - **type**: `CollectionType`
-
- (The default is _CollectionType#DOCUMENT_): the type of the collection to create.
-
- - **indexBuckets**: `Integer`
-
- The number of buckets into which indexes using a hash table are split.
- The default is 16 and this number has to be a power of 2 and less than or
- equal to 1024. For very large collections one should increase this to avoid
- long pauses when the hash table has to be initially built or resized, since
- buckets are resized individually and can be initially built in parallel.
- For example, 64 might be a sensible value for a collection with
- 100 000 000 documents. Currently, only the edge index respects this value,
- but other index types might follow in future ArangoDB versions.
- Changes (see below) are applied when the collection is loaded the next time.
-
- - **distributeShardsLike**: `String`
-
- (The default is ""): in an Enterprise Edition cluster, this attribute binds
- the specifics of sharding for the newly created collection to follow that
- of a specified existing collection. Note: Using this parameter has
- consequences for the prototype collection. It can no longer be dropped,
- before sharding imitating collections are dropped. Equally, backups and
- restores of imitating collections alone will generate warnings, which can
- be overridden, about missing sharding prototype.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-db.createCollection("potatoes", new CollectionCreateOptions());
-// the document collection "potatoes" now exists
-```
-
-## ArangoCollection.create
-
-`ArangoCollection.create(CollectionCreateOptions options) : CollectionEntity`
-
-Creates a collection with the given _options_ for this collection's name,
-then returns collection information from the server.
-
-Alternative for [ArangoDatabase.createCollection](#arangodatabasecreatecollection).
-
-**Arguments**
-
-- **options**: `CollectionCreateOptions`
-
- - **journalSize**: `Long`
-
- The maximal size of a journal or datafile in bytes.
- The value must be at least 1048576 (1 MiB).
-
- - **replicationFactor**: `Integer`
-
- (The default is 1): in a cluster, this attribute determines how many copies
- of each shard are kept on different DBServers. The value 1 means that only
- one copy (no synchronous replication) is kept. A value of k means that k-1
- replicas are kept. Any two copies reside on different DBServers.
- Replication between them is synchronous, that is, every write operation to
- the "leader" copy will be replicated to all "follower" replicas, before the
- write operation is reported successful. If a server fails, this is detected
- automatically and one of the servers holding copies take over, usually
- without an error being reported.
-
- - **satellite**: `Boolean`
-
- If the true the collection is created as a satellite collection.
- In this case the _replicationFactor_ is ignored.
-
- - **waitForSync**: `Boolean`
-
- If true then the data is synchronized to disk before returning from a
- document create, update, replace or removal operation. (default: false)
-
- - **doCompact**: `Boolean`
-
- Whether or not the collection will be compacted (default is true)
-
- - **isVolatile**: `Boolean`
-
- If true then the collection data is kept in-memory only and not made persistent.
- Unloading the collection will cause the collection data to be discarded.
- Stopping or re-starting the server will also cause full loss of data in
- the collection. Setting this option will make the resulting collection be
- slightly faster than regular collections because ArangoDB does not enforce
- any synchronization to disk and does not calculate any CRC checksums for
- datafiles (as there are no datafiles). This option should therefore be used
- for cache-type collections only, and not for data that cannot be re-created
- otherwise. (The default is false)
-
- - **shardKeys**: `String...`
-
- (The default is [ "_key" ]): in a cluster, this attribute determines which
- document attributes are used to determine the target shard for documents.
- Documents are sent to shards based on the values of their shard key attributes.
- The values of all shard key attributes in a document are hashed, and the
- hash value is used to determine the target shard. Note: Values of shard key
- attributes cannot be changed once set. This option is meaningless in a
- single server setup.
-
- - **numberOfShards**: `Integer`
-
- (The default is 1): in a cluster, this value determines the number of shards
- to create for the collection. In a single server setup, this option is meaningless.
-
- - **isSystem**: `Boolean`
-
- If true, create a system collection. In this case collection-name should
- start with an underscore. End users should normally create non-system
- collections only. API implementors may be required to create system collections
- in very special occasions, but normally a regular collection will do.
- (The default is false)
-
- - **type**: `CollectionType`
-
- (The default is _CollectionType#DOCUMENT_): the type of the collection to create.
-
- - **indexBuckets**: `Integer`
-
- The number of buckets into which indexes using a hash table are split.
- The default is 16 and this number has to be a power of 2 and less than or
- equal to 1024. For very large collections one should increase this to avoid
- long pauses when the hash table has to be initially built or resized, since
- buckets are resized individually and can be initially built in parallel.
- For example, 64 might be a sensible value for a collection with
- 100 000 000 documents. Currently, only the edge index respects this value,
- but other index types might follow in future ArangoDB versions.
- Changes (see below) are applied when the collection is loaded the next time.
-
- - **distributeShardsLike**: `String`
-
- (The default is ""): in an Enterprise Edition cluster, this attribute binds
- the specifics of sharding for the newly created collection to follow that
- of a specified existing collection. Note: Using this parameter has
- consequences for the prototype collection. It can no longer be dropped,
- before sharding imitating collections are dropped. Equally, backups and
- restores of imitating collections alone will generate warnings, which can
- be overridden, about missing sharding prototype.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoCollection collection = db.collection("potatoes");
-collection.create(new CollectionCreateOptions());
-// the document collection "potatoes" now exists
-```
-
-## ArangoCollection.load
-
-`ArangoCollection.load() : CollectionEntity`
-
-Tells the server to load the collection into memory.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoCollection collection = db.collection("some-collection");
-collection.load();
-// the collection has now been loaded into memory
-```
-
-## ArangoCollection.unload
-
-`ArangoCollection.unload() : CollectionEntity`
-
-Tells the server to remove the collection from memory. This call does not
-delete any documents. You can use the collection afterwards; in which case
-it will be loaded into memory, again.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoCollection collection = db.collection("some-collection");
-collection.unload();
-// the collection has now been unloaded from memory
-```
-
-## ArangoCollection.changeProperties
-
-`ArangoCollection.changeProperties(CollectionPropertiesOptions options) : CollectionPropertiesEntity`
-
-Changes the properties of the collection.
-
-**Arguments**
-
-- **options**: `CollectionPropertiesEntity`
-
- For information on the _properties_ argument see
- [the HTTP API for modifying collections](../../../..//HTTP/Collection/Modifying.html).
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoCollection collection = db.collection("some-collection");
-
-CollectionPropertiesEntity result = collection.changeProperties(
- new CollectionPropertiesEntity().waitForSync(true)
-);
-assertThat(result.getWaitForSync(), is(true));
-// the collection will now wait for data being written to disk
-// whenever a document is changed
-```
-
-## ArangoCollection.rename
-
-`ArangoCollection.rename(String newName) : CollectionEntity`
-
-Renames the collection
-
-**Arguments**
-
-- **newName**: `String`
-
- The new name
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoCollection collection = db.collection("some-collection");
-
-CollectionEntity result = collection.rename("new-collection-name")
-assertThat(result.getName(), is("new-collection-name");
-// result contains additional information about the collection
-```
-
-## ArangoCollection.truncate
-
-`ArangoCollection.truncate() : CollectionEntity`
-
-Removes all documents from the collection, but leaves the indexes intact.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoCollection collection = db.collection("some-collection");
-
-collection.truncate();
-// the collection "some-collection" is now empty
-```
-
-## ArangoCollection.drop
-
-`ArangoCollection.drop() : void`
-
-Deletes the collection from the database.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoCollection collection = db.collection("some-collection");
-
-collection.drop();
-// the collection "some-collection" no longer exists
-```
diff --git a/Documentation/Books/Drivers/Java/Reference/Collection/DocumentManipulation.md b/Documentation/Books/Drivers/Java/Reference/Collection/DocumentManipulation.md
deleted file mode 100644
index 496f89eb34dc..000000000000
--- a/Documentation/Books/Drivers/Java/Reference/Collection/DocumentManipulation.md
+++ /dev/null
@@ -1,522 +0,0 @@
-
-# Manipulating documents
-
-These functions implement the
-[HTTP API for manipulating documents](../../../..//HTTP/Document/index.html).
-
-## ArangoCollection.documentExists
-
-`ArangoCollection.documentExists(String key) : Boolean`
-
-Checks if the document exists by reading a single document head
-
-**Arguments**
-
-- **key**: `String`
-
- The key of the document
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoCollection collection = db.collection("some-collection");
-
-Boolean exists = collection.documentExists("some-key");
-```
-
-## ArangoCollection.getDocument
-
-`ArangoCollection.getDocument(String key, Class type, DocumentReadOptions options) : T`
-
-Retrieves the document with the given \_key from the collection.
-
-**Arguments**
-
-- **key**: `String`
-
- The key of the document
-
-- **type**: `Class`
-
- The type of the document (POJO class, `VPackSlice` or `String` for JSON)
-
-- **options**: `DocumentReadOptions`
-
- - **ifNoneMatch**: `String`
-
- Document revision must not contain If-None-Match
-
- - **ifMatch**: `String`
-
- Document revision must contain If-Match
-
- - **catchException**: `Boolean`
-
- Whether or not catch possible thrown exceptions
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoCollection collection = db.collection("some-collection");
-
-BaseDocument document = collection.getDocument("some-key", BaseDocument.class);
-```
-
-## ArangoCollection.getDocuments
-
-`ArangoCollection.getDocuments(Collection keys, Class type) : MultiDocumentEntity`
-
-Retrieves multiple documents with the given \_key from the collection.
-
-**Arguments**
-
-- **keys**: `Collection`
-
- The key of the document
-
-- **type**: `Class`
-
- The type of the document (POJO class, `VPackSlice` or `String` for JSON)
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoCollection collection = db.collection("some-collection");
-
-Collection keys = Arrays.asList("some-key", "some-other-key");
-MultiDocumentEntity documents = collection.getDocuments(keys, BaseDocument.class);
-```
-
-## ArangoCollection.insertDocument
-
-`ArangoCollection.insertDocument(T value, DocumentCreateOptions options) : DocumentCreateEntity`
-
-Creates a new document from the given document, unless there is already a
-document with the \_key given. If no \_key is given, a new unique \_key is
-generated automatically.
-
-**Arguments**
-
-- **value**: `T`
-
- A representation of a single document (POJO, `VPackSlice` or `String` for JSON)
-
-- **options**: `DocumentCreateOptions`
-
- - **waitForSync**: `Boolean`
-
- Wait until document has been synced to disk.
-
- - **returnNew**: `Boolean`
-
- Return additionally the complete new document under the attribute new in the result.
-
- - **returnOld**: `Boolean`
-
- This options requires ArangoDB version 3.4.0 or higher. Additionally return
- the complete old document under the attribute old in the result.
- Only available if the _overwrite_ option is used.
-
- - **overwrite**: `Boolean`
-
- This options requires ArangoDB version 3.4.0 or higher. If set to true, the
- insert becomes a replace-insert. If a document with the same \_key already
- exists the new document is not rejected with unique constraint violated but
- will replace the old document.
-
- - **silent**: `Boolean`
-
- If set to true, an empty object will be returned as response. No meta-data
- will be returned for the created document. This option can be used to save
- some network traffic.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoCollection collection = db.collection("some-collection");
-
-BaseDocument document = new BaseDocument();
-document.addAttribute("some", "data");
-collection.insertDocument(document, new DocumentCreateOptions());
-```
-
-## ArangoCollection.insertDocuments
-
-`ArangoCollection.insertDocuments(Collection values, DocumentCreateOptions options) : MultiDocumentEntity>`
-
-Creates new documents from the given documents, unless there is already a
-document with the \_key given. If no \_key is given, a new unique \_key is
-generated automatically.
-
-**Arguments**
-
-- **values**: `Collection`
-
- A List of documents (POJO, `VPackSlice` or `String` for JSON)
-
-- **options**: `DocumentCreateOptions`
-
- - **waitForSync**: `Boolean`
-
- Wait until document has been synced to disk.
-
- - **returnNew**: `Boolean`
-
- Return additionally the complete new document under the attribute new in the result.
-
- - **returnOld**: `Boolean`
-
- This options requires ArangoDB version 3.4.0 or higher. Additionally return
- the complete old document under the attribute old in the result.
- Only available if the _overwrite_ option is used.
-
- - **overwrite**: `Boolean`
-
- This options requires ArangoDB version 3.4.0 or higher. If set to true, the
- insert becomes a replace-insert. If a document with the same \_key already
- exists the new document is not rejected with unique constraint violated but
- will replace the old document.
-
- - **silent**: `Boolean`
-
- If set to true, an empty object will be returned as response. No meta-data
- will be returned for the created document. This option can be used to save
- some network traffic.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoCollection collection = db.collection("some-collection");
-
-BaseDocument doc1 = new BaseDocument();
-BaseDocument doc2 = new BaseDocument();
-BaseDocument doc3 = new BaseDocument();
-collection.insertDocuments(
- Arrays.asList(doc1, doc2, doc3),
- new DocumentCreateOptions()
-);
-```
-
-## ArangoCollection.replaceDocument
-
-`ArangoCollection.replaceDocument(String key, T value, DocumentReplaceOptions options) : DocumentUpdateEntity`
-
-Replaces the document with _key_ with the one in the body, provided there is
-such a document and no precondition is violated.
-
-**Arguments**
-
-- **key**: `String`
-
- The key of the document
-
-- **value**: `T`
-
- A representation of a single document (POJO, `VPackSlice` or `String` for JSON)
-
-- **options**: `DocumentReplaceOptions`
-
- - **waitForSync**: `Boolean`
-
- Wait until document has been synced to disk.
-
- - **ignoreRevs**: `Boolean`
-
- By default, or if this is set to true, the \_rev attributes in the given
- document is ignored. If this is set to false, then the \_rev attribute
- given in the body document is taken as a precondition. The document is
- only replaced if the current revision is the one specified.
-
- - **ifMatch**: `String`
-
- Replace a document based on target revision
-
- - **returnNew**: `Boolean`
-
- Return additionally the complete new document under the attribute new in the result.
-
- - **returnOld**: `Boolean`
-
- Additionally return the complete old document under the attribute old in the result.
-
- - **silent**: `Boolean`
-
- If set to true, an empty object will be returned as response. No meta-data
- will be returned for the created document. This option can be used to save
- some network traffic.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoCollection collection = db.collection("some-collection");
-
-BaseDocument document = new BaseDocument();
-document.addAttribute("hello", "world");
-DocumentCreateEntity info = collection.insertDocument(document);
-
-document.addAttribute("hello", "world2");
-collection.replaceDocument(info.getKey(), document, new DocumentReplaceOptions());
-
-BaseDocument doc = collection.getDocument(info.getKey());
-assertThat(doc.getAttribute("hello"), is("world2"));
-```
-
-## ArangoCollection.replaceDocuments
-
-`ArangoCollection.replaceDocuments(Collection values, DocumentReplaceOptions options) : MultiDocumentEntity>`
-
-Replaces multiple documents in the specified collection with the ones in the
-values, the replaced documents are specified by the \_key attributes in the
-documents in values.
-
-**Arguments**
-
-- **values**: `Collection`
-
- A List of documents (POJO, `VPackSlice` or `String` for JSON)
-
-- **options**: `DocumentReplaceOptions`
-
- - **waitForSync**: `Boolean`
-
- Wait until document has been synced to disk.
-
- - **ignoreRevs**: `Boolean`
-
- By default, or if this is set to true, the \_rev attributes in the given
- document is ignored. If this is set to false, then the \_rev attribute
- given in the body document is taken as a precondition. The document is
- only replaced if the current revision is the one specified.
-
- - **ifMatch**: `String`
-
- Replace a document based on target revision
-
- - **returnNew**: `Boolean`
-
- Return additionally the complete new document under the attribute new in the result.
-
- - **returnOld**: `Boolean`
-
- Additionally return the complete old document under the attribute old in the result.
-
- - **silent**: `Boolean`
-
- If set to true, an empty object will be returned as response. No meta-data
- will be returned for the created document. This option can be used to save
- some network traffic.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoCollection collection = db.collection("some-collection");
-
-BaseDocument doc1 = new BaseDocument();
-BaseDocument doc2 = new BaseDocument();
-BaseDocument doc3 = new BaseDocument();
-collection.insertDocuments(Arrays.asList(doc1, doc2, doc3));
-
-// change values of doc1, doc2, doc3
-
-collection.replaceDocuments(
- Arrays.asList(doc1, doc2, doc3),
- new DocumentReplaceOptions()
-);
-```
-
-## ArangoCollection.updateDocument
-
-`ArangoCollection.updateDocument(String key, T value, DocumentUpdateOptions options) : DocumentUpdateEntity`
-
-Updates the document with _key_ with the one in the body, provided there is
-such a document and no precondition is violated.
-
-**Arguments**
-
-- **key**: `String`
-
- The key of the document
-
-- **value**: `T`
-
- A representation of a single document (POJO, `VPackSlice` or `String` for JSON)
-
-- **options**: `DocumentUpdateOptions`
-
- - **waitForSync**: `Boolean`
-
- Wait until document has been synced to disk.
-
- - **ignoreRevs**: `Boolean`
-
- By default, or if this is set to true, the \_rev attributes in the given
- document is ignored. If this is set to false, then the \_rev attribute
- given in the body document is taken as a precondition. The document is
- only replaced if the current revision is the one specified.
-
- - **ifMatch**: `String`
-
- Replace a document based on target revision
-
- - **returnNew**: `Boolean`
-
- Return additionally the complete new document under the attribute new in the result.
-
- - **returnOld**: `Boolean`
-
- Additionally return the complete old document under the attribute old in the result.
-
- - **silent**: `Boolean`
-
- If set to true, an empty object will be returned as response. No meta-data
- will be returned for the created document. This option can be used to save
- some network traffic.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoCollection collection = db.collection("some-collection");
-
-BaseDocument document = new BaseDocument();
-document.addAttribute("hello", "world");
-DocumentCreateEntity info = collection.insertDocument(document);
-
-document.addAttribute("hello", "world2");
-collection.updateDocument(info.getKey(), document, new DocumentUpdateOptions());
-
-BaseDocument doc = collection.getDocument(info.getKey());
-assertThat(doc.getAttribute("hello"), is("world2"));
-```
-
-## ArangoCollection.updateDocuments
-
-`ArangoCollection.updateDocuments(Collection values, DocumentUpdateOptions options) : MultiDocumentEntity>`
-
-Updates multiple documents in the specified collection with the ones in the
-values, the replaced documents are specified by the \_key attributes in the
-documents in values.
-
-**Arguments**
-
-- **values**: `Collection`
-
- A List of documents (POJO, `VPackSlice` or `String` for JSON)
-
-- **options**: `DocumentUpdateOptions`
-
- - **waitForSync**: `Boolean`
-
- Wait until document has been synced to disk.
-
- - **ignoreRevs**: `Boolean`
-
- By default, or if this is set to true, the \_rev attributes in the given
- document is ignored. If this is set to false, then the \_rev attribute
- given in the body document is taken as a precondition. The document is
- only replaced if the current revision is the one specified.
-
- - **ifMatch**: `String`
-
- Replace a document based on target revision
-
- - **returnNew**: `Boolean`
-
- Return additionally the complete new document under the attribute new in the result.
-
- - **returnOld**: `Boolean`
-
- Additionally return the complete old document under the attribute old in the result.
-
- - **silent**: `Boolean`
-
- If set to true, an empty object will be returned as response. No meta-data
- will be returned for the created document. This option can be used to save
- some network traffic.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoCollection collection = db.collection("some-collection");
-
-BaseDocument doc1 = new BaseDocument();
-BaseDocument doc2 = new BaseDocument();
-BaseDocument doc3 = new BaseDocument();
-collection.insertDocuments(Arrays.asList(doc1, doc2, doc3));
-
-// change values of doc1, doc2, doc3
-
-collection.updateDocuments(
- Arrays.asList(doc1, doc2, doc3),
- new DocumentUpdateOptions()
-);
-```
-
-## ArangoCollection.deleteDocument
-
-`ArangoCollection.deleteDocument(String key) : DocumentDeleteEntity`
-
-Deletes the document with the given _key_ from the collection.
-
-**Arguments**:
-
-- **key**: `String`
-
- The key of the document
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoCollection collection = db.collection("some-collection");
-
-BaseDocument document = new BaseDocument("some-key");
-collection.insertDocument(document);
-
-collection.deleteDocument("some-key");
-// document 'some-collection/some-key' no longer exists
-
-Boolean exists = collection.documentExists("some-key");
-assertThat(exists, is(false));
-```
-
-## ArangoCollection.deleteDocuments
-
-`ArangoCollection.deleteDocuments(Collection> values) : MultiDocumentEntity>`
-
-Deletes multiple documents from the collection.
-
-**Arguments**:
-
-- **values**: `Collection>`
-
- The keys of the documents or the documents themselves
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoCollection collection = db.collection("some-collection");
-
-collection.deleteDocuments(Arrays.asList("some-key", "some-other-key");
-// documents 'some-collection/some-key' and 'some-collection/some-other-key' no longer exists
-```
diff --git a/Documentation/Books/Drivers/Java/Reference/Collection/Indexes.md b/Documentation/Books/Drivers/Java/Reference/Collection/Indexes.md
deleted file mode 100644
index c17d118726f4..000000000000
--- a/Documentation/Books/Drivers/Java/Reference/Collection/Indexes.md
+++ /dev/null
@@ -1,240 +0,0 @@
-
-# Manipulating indexes
-
-These functions implement the
-[HTTP API for manipulating indexes](../../../..//HTTP/Indexes/index.html).
-
-## ArangoCollection.ensureHashIndex
-
-`ArangoCollection.ensureHashIndex(Iterable fields, HashIndexOptions options) : IndexEntity`
-
-Creates a hash index for the collection if it does not already exist.
-
-**Arguments**
-
-- **fields**: `Iterable`
-
- A list of attribute paths
-
-- **options**: `HashIndexOptions`
-
- - **unique**: `Boolean`
-
- If true, then create a unique index
-
- - **sparse**: `Boolean`
-
- If true, then create a sparse index
-
- - **deduplicate**: `Boolean`
-
- If false, the de-duplication of array values is turned off.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoCollection collection = db.collection("some-collection");
-
-IndexEntity index = collection.ensureHashIndex(Arrays.asList("a", "b.c"));
-// the index has been created with the handle `index.getId()`
-```
-
-## ArangoCollection.ensureSkipListIndex
-
-`ArangoCollection.ensureSkipListIndex(Iterable fields, SkipListIndexOptions options) : IndexEntity`
-
-Creates a skip-list index for the collection if it does not already exist.
-
-**Arguments**
-
-- **fields**: `Iterable`
-
- A list of attribute paths
-
-- **options**: `SkipListIndexOptions`
-
- - **unique**: `Boolean`
-
- If true, then create a unique index
-
- - **sparse**: `Boolean`
-
- If true, then create a sparse index
-
- - **deduplicate**: `Boolean`
-
- If false, the de-duplication of array values is turned off.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoCollection collection = db.collection("some-collection");
-
-IndexEntity index = collection.ensureSkipListIndex(
- Arrays.asList("a", "b.c")
-);
-// the index has been created with the handle `index.getId()`
-```
-
-## ArangoCollection.ensureGeoIndex
-
-`ArangoCollection.ensureGeoIndex(Iterable fields, GeoIndexOptions options) : IndexEntity`
-
-Creates a geo index for the collection if it does not already exist.
-
-**Arguments**
-
-- **fields**: `Iterable`
-
- A list of attribute paths
-
-- **options**: `GeoIndexOptions`
-
- - **geoJson**: `Boolean`
-
- If a geo-spatial index on a location is constructed and geoJson is true,
- then the order within the array is longitude followed by latitude.
- This corresponds to the format described in.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoCollection collection = db.collection("some-collection");
-
-IndexEntity index = collection.ensureGeoIndex(
- Arrays.asList("latitude", "longitude")
-);
-// the index has been created with the handle `index.getId()`
-```
-
-## ArangoCollection.ensureFulltextIndex
-
-`ArangoCollection.ensureFulltextIndex(Iterable fields, FulltextIndexOptions options) : IndexEntity`
-
-Creates a fulltext index for the collection if it does not already exist.
-
-**Arguments**
-
-- **fields**: `Iterable`
-
- A list of attribute paths
-
-- **options**: `FulltextIndexOptions`
-
- - **minLength**: `Integer`
-
- Minimum character length of words to index. Will default to a server-defined
- value if unspecified. It is thus recommended to set this value explicitly
- when creating the index.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoCollection collection = db.collection("some-collection");
-
-IndexEntity index = collection.ensureFulltextIndex(
- Arrays.asList("description")
-);
-// the index has been created with the handle `index.getId()`
-```
-
-## ArangoCollection.ensurePersistentIndex
-
-`ArangoCollection.ensurePersistentIndex(Iterable fields, PersistentIndexOptions options) : IndexEntity`
-
-Creates a persistent index for the collection if it does not already exist.
-
-**Arguments**
-
-- **fields**: `Iterable`
-
- A list of attribute paths
-
-- **options**: `PersistentIndexOptions`
-
- - **unique**: `Boolean`
-
- If true, then create a unique index
-
- - **sparse**: `Boolean`
-
- If true, then create a sparse index
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoCollection collection = db.collection("some-collection");
-
-IndexEntity index = collection.ensurePersistentIndex(Arrays.asList("a", "b.c"));
-// the index has been created with the handle `index.getId()`
-```
-
-## ArangoCollection.getIndex
-
-`ArangoCollection.getIndex(String id) : IndexEntity`
-
-Fetches information about the index with the given _id_ and returns it.
-
-**Arguments**
-
-- **id**: `String`
-
- The index-handle
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoCollection collection = db.collection("some-collection");
-
-IndexEntity index = collection.getIndex("some-index");
-```
-
-## ArangoCollection.getIndexes
-
-`ArangoCollection.getIndexes() : Collection`
-
-Fetches a list of all indexes on this collection.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoCollection collection = db.collection("some-collection");
-
-Collection indexes = collection.getIndexs();
-```
-
-## ArangoCollection.deleteIndex
-
-`ArangoCollection.deleteIndex(String id) : String`
-
-Deletes the index with the given _id_ from the collection.
-
-**Arguments**
-
-- **id**: `String`
-
- The index-handle
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoCollection collection = db.collection("some-collection");
-
-collection.deleteIndex("some-index");
-```
diff --git a/Documentation/Books/Drivers/Java/Reference/Collection/README.md b/Documentation/Books/Drivers/Java/Reference/Collection/README.md
deleted file mode 100644
index 3968190d5c6b..000000000000
--- a/Documentation/Books/Drivers/Java/Reference/Collection/README.md
+++ /dev/null
@@ -1,94 +0,0 @@
-
-# Collection API
-
-These functions implement the
-[HTTP API for collections](../../../..//HTTP/Collection/index.html).
-
-The _ArangoCollection_ API is used for all collections, regardless of
-their specific type (document/edge collection).
-
-## Getting information about the collection
-
-See
-[the HTTP API documentation](../../../..//HTTP/Collection/Getting.html)
-for details.
-
-## ArangoCollection.exists
-
-`ArangoCollection.exists() : boolean`
-
-Checks whether the collection exists
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoCollection collection = db.collection("potatoes");
-
-boolean exists = collection.exists();
-```
-
-## ArangoCollection.getInfo
-
-`ArangoCollection.getInfo() : CollectionEntity`
-
-Returns information about the collection.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoCollection collection = db.collection("potatoes");
-
-CollectionEntity info = collection.getInfo();
-```
-
-## ArangoCollection.getProperties
-
-`ArangoCollection.getProperties() : CollectionPropertiesEntity`
-
-Reads the properties of the specified collection.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoCollection collection = db.collection("potatoes");
-
-CollectionPropertiesEntity properties = collection.getProperties();
-```
-
-## ArangoCollection.getRevision
-
-`ArangoCollection.getRevision() : CollectionRevisionEntity`
-
-Retrieve the collections revision.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoCollection collection = db.collection("potatoes");
-
-CollectionRevisionEntity revision = collection.getRevision();
-```
-
-## ArangoCollection.getIndexes
-
-`ArangoCollection.getIndexes() : Collection`
-
-Fetches a list of all indexes on this collection.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoCollection collection = db.collection("potatoes");
-
-Collection indexes = collection.getIndexes();
-```
diff --git a/Documentation/Books/Drivers/Java/Reference/Cursor.md b/Documentation/Books/Drivers/Java/Reference/Cursor.md
deleted file mode 100644
index f00865442900..000000000000
--- a/Documentation/Books/Drivers/Java/Reference/Cursor.md
+++ /dev/null
@@ -1,357 +0,0 @@
-
-# Cursor API
-
-_ArangoCursor_ instances provide an abstraction over the HTTP API's limitations.
-Unless a method explicitly exhausts the cursor, the driver will only fetch as
-many batches from the server as necessary. Like the server-side cursors,
-_ArangoCursor_ instances are incrementally depleted as they are read from.
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-
-ArangoCursor cursor = db.query(
- "FOR x IN 1..5 RETURN x", Integer.class
-);
-// query result list: [1, 2, 3, 4, 5]
-Integer value = cursor.next();
-assertThat(value, is(1));
-// remaining result list: [2, 3, 4, 5]
-```
-
-## ArangoCursor.hasNext
-
-`ArangoCursor.hasNext() : boolean`
-
-Returns _true_ if the cursor has more elements in its current batch of results
-or the cursor on the server has more batches.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-
-ArangoCursor cursor = db.query("FOR x IN 1..5 RETURN x", Integer.class)
-boolean hasNext = cursor.hasNext();
-```
-
-## ArangoCursor.next
-
-`ArangoCursor.next() : T`
-
-Returns the next element of the query result. If the current element is the last
-element of the batch and the cursor on the server provides more batches, the
-next batch is fetched from the server.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-
-ArangoCursor cursor = db.query("FOR x IN 1..5 RETURN x", Integer.class)
-Integer value = cursor.next();
-assertThat(value, is(1));
-```
-
-## ArangoCursor.first
-
-`ArangoCursor.first() : T`
-
-Returns the first element or {@code null} if no element exists.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-
-ArangoCursor cursor = db.query("RETURN 1", Integer.class)
-Integer value = cursor.first();
-assertThat(value, is(1));
-```
-
-## ArangoCursor.foreach
-
-`ArangoCursor.foreach(Consumer super T> action) : void`
-
-Performs the given action for each element of the _ArangoIterable_
-
-**Arguments**
-
-- **action**: `Consumer super T>`
-
- A action to perform on the elements
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-
-ArangoCursor cursor = db.query("FOR x IN 1..5 RETURN x", Integer.class)
-cursor.foreach(e -> {
- // remaining results: [1, 2, 3, 4, 5]
-});
-```
-
-## ArangoCursor.map
-
-`ArangoCursor.map(Function super T, ? extends R> mapper) : ArangoIterable`
-
-Returns a _ArangoIterable_ consisting of the results of applying the given
-function to the elements of this _ArangoIterable_.
-
-**Arguments**
-
-- **mapper**: `Function super T, ? extends R>`
-
- A function to apply to each element
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-
-ArangoCursor cursor = db.query("FOR x IN 1..5 RETURN x", Integer.class)
-cursor.map(e -> e * 10).foreach(e -> {
- // remaining results: [10, 20, 30, 40, 50]
-});
-```
-
-## ArangoCursor.filter
-
-`ArangoCursor.filter(Predicate super T> predicate) : ArangoIterable`
-
-**Arguments**
-
-- **predicate**: `Predicate super T>`
-
- A predicate to apply to each element to determine if it should be included
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-
-ArangoCursor cursor = db.query("FOR x IN 1..5 RETURN x", Integer.class)
-cursor.filter(e -> e < 4).foreach(e -> {
- // remaining results: [1, 2, 3]
-});
-```
-
-## ArangoCursor.anyMatch
-
-`ArangoCursor.anyMatch(Predicate super T> predicate) : boolean`
-
-Returns whether any elements of this _ArangoIterable_ match the provided predicate.
-
-**Arguments**
-
-- **predicate**: `Predicate super T>`
-
- A predicate to apply to elements of this {@code ArangoIterable}
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-
-ArangoCursor cursor = db.query("FOR x IN 1..5 RETURN x", Integer.class)
-boolean match = cursor.anyMatch(e -> e == 3);
-assertThat(match, is(true));
-```
-
-## ArangoCursor.allMatch
-
-`ArangoCursor.anyMatch(Predicate super T> predicate) : boolean`
-
-Returns whether all elements of this _ArangoIterable_ match the provided predicate.
-
-**Arguments**
-
-- **predicate**: `Predicate super T>`
-
- A predicate to apply to elements of this {@code ArangoIterable}
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-
-ArangoCursor cursor = db.query("FOR x IN 1..5 RETURN x", Integer.class)
-boolean match = cursor.allMatch(e -> e <= 5);
-assertThat(match, is(true));
-```
-
-## ArangoCursor.noneMatch
-
-`ArangoCursor.noneMatch(Predicate super T> predicate) : boolean`
-
-Returns whether no elements of this _ArangoIterable_ match the provided predicate.
-
-**Arguments**
-
-- **predicate**: `Predicate super T>`
-
- A predicate to apply to elements of this {@code ArangoIterable}
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-
-ArangoCursor cursor = db.query("FOR x IN 1..5 RETURN x", Integer.class)
-boolean match = cursor.noneMatch(e -> e > 5);
-assertThat(match, is(true));
-```
-
-## ArangoCursor.collectInto
-
-`ArangoCursor.collectInto(R target) : R`
-
-**Arguments**
-
-Iterates over all elements of this {@code ArangoIterable} and adds each to
-the given target.
-
-- **target**: `R >`
-
- The collection to insert into
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-
-ArangoCursor cursor = db.query("FOR x IN 1..5 RETURN x", Integer.class)
-Collection list = cursor.collectInto(new ArrayList());
-// -- or --
-Collection set = cursor.collectInto(new HashSet());
-```
-
-## ArangoCursor.iterator
-
-`ArangoCursor.iterator() : Iterator`
-
-Returns an iterator over elements of the query result.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-
-ArangoCursor cursor = db.query("FOR x IN 1..5 RETURN x", Integer.class)
-Iterator iterator = cursor.iterator();
-```
-
-## ArangoCursor.asListRemaining
-
-`ArangoCursor.asListRemaining() : List`
-
-Returns the remaining results as a _List_.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-
-ArangoCursor cursor = db.query("FOR x IN 1..5 RETURN x", Integer.class)
-Collection list = cursor.asListRemaining();
-```
-
-## ArangoCursor.getCount
-
-`ArangoCursor.getCount() : Integer`
-
-Returns the total number of result documents available (only available if the
-query was executed with the _count_ attribute set)
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-
-ArangoCursor cursor = db.query("FOR x IN 1..5 RETURN x", new AqlQueryOptions().count(true), Integer.class)
-Integer count = cursor.getCount();
-assertThat(count, is(5));
-```
-
-## ArangoCursor.count
-
-`ArangoCursor.count() : long`
-
-Returns the count of elements of this _ArangoIterable_.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-
-ArangoCursor cursor = db.query("FOR x IN 1..5 RETURN x", Integer.class)
-long count = cursor.filter(e -> e < 4).count();
-// remaining results: [1, 2, 3]
-assertThat(count, is(3L));
-```
-
-## ArangoCursor.getStats
-
-`ArangoCursor.getStats() : Stats`
-
-Returns extra information about the query result. For data-modification queries,
-the stats will contain the number of modified documents and the number of
-documents that could not be modified due to an error (if `ignoreErrors`
-query option is specified).
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-
-ArangoCursor cursor = db.query("FOR x IN 1..5 RETURN x", Integer.class)
-Stats stats = cursor.getStats();
-```
-
-## ArangoCursor.getWarnings
-
-`ArangoCursor.getWarnings() : Collection`
-
-Returns warnings which the query could have been produced.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-
-ArangoCursor cursor = db.query("FOR x IN 1..5 RETURN x", Integer.class)
-Collection warnings = cursor.getWarnings();
-```
-
-## ArangoCursor.isCached
-
-`ArangoCursor.isCached() : boolean`
-
-Indicating whether the query result was served from the query cache or not.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-
-ArangoCursor cursor = db.query("FOR x IN 1..5 RETURN x", Integer.class)
-boolean cached = cursor.isCached();
-```
diff --git a/Documentation/Books/Drivers/Java/Reference/Database/AqlUserFunctions.md b/Documentation/Books/Drivers/Java/Reference/Database/AqlUserFunctions.md
deleted file mode 100644
index 060d9668a88c..000000000000
--- a/Documentation/Books/Drivers/Java/Reference/Database/AqlUserFunctions.md
+++ /dev/null
@@ -1,96 +0,0 @@
-
-# Managing AQL user functions
-
-These functions implement the
-[HTTP API for managing AQL user functions](../../../..//HTTP/AqlUserFunctions/index.html).
-
-## ArangoDatabase.getAqlFunctions
-
-`ArangoDatabase.getAqlFunctions(AqlFunctionGetOptions options) : Collection`
-
-**Arguments**
-
-- **options**: `AqlFunctionGetOptions`
-
- - **namespace**: `String`
-
- Returns all registered AQL user functions from namespace namespace
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-Collection functions = db.getAqlFunctions(
- new AqlFunctionGetOptions().namespace("myfuncs")
-);
-// functions is a list of function descriptions
-```
-
-## ArangoDatabase.createAqlFunction
-
-`ArangoDatabase.createAqlFunction(String name, String code, AqlFunctionCreateOptions options) : void`
-
-**Arguments**
-
-- **name**: `String`
-
- A valid AQL function name, e.g.: `"myfuncs::accounting::calculate_vat"`
-
-- **code**: `String`
-
- A String evaluating to a JavaScript function
-
-- **options**: `AqlFunctionCreateOptions`
-
- - **isDeterministic**: `Boolean`
-
- An optional boolean value to indicate that the function results are fully
- deterministic (function return value solely depends on the input value
- and return value is the same for repeated calls with same input)
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-db.createAqlFunction("ACME::ACCOUNTING::CALCULATE_VAT",
- "function (price) { return 0.19; }",
- new AqlFunctionCreateOptions());
-// Use the new function in an AQL query
-String query = "FOR product IN products"
- + "RETURN MERGE("
- + "{vat: ACME::ACCOUNTING::CALCULATE_VAT(product.price)}, product)";
-ArangoCursor cursor = db.query(query, null, new AqlQueryOptions(), Double.class);
-// cursor is a cursor for the query result
-```
-
-## ArangoDatabase.deleteAqlFunction
-
-`ArangoDatabase.deleteAqlFunction(String name, AqlFunctionDeleteOptions options): Integer`
-
-Deletes the AQL user function with the given name from the database.
-
-**Arguments**
-
-- **name**: `String`
-
- The name of the user function to delete
-
-- **options**: `AqlFunctionDeleteOptions`
-
- - **group**: `Boolean`
-
- If set to true, then the function name provided in name is treated as a
- namespace prefix, and all functions in the specified namespace will be deleted.
- If set to false, the function name provided in name must be fully qualified,
- including any namespaces.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-db.deleteAqlFunction("ACME::ACCOUNTING::CALCULATE_VAT", new AqlFunctionDeleteOptions());
-// the function no longer exists
-```
diff --git a/Documentation/Books/Drivers/Java/Reference/Database/CollectionAccess.md b/Documentation/Books/Drivers/Java/Reference/Database/CollectionAccess.md
deleted file mode 100644
index e014745fa208..000000000000
--- a/Documentation/Books/Drivers/Java/Reference/Database/CollectionAccess.md
+++ /dev/null
@@ -1,39 +0,0 @@
-
-# Accessing collections
-
-These functions implement the
-[HTTP API for accessing collections](../../../..//HTTP/Collection/Getting.html).
-
-## ArangoDatabase.collection
-
-`ArangoDatabase.collection(String name) : ArangoCollection`
-
-Returns a _ArangoCollection_ instance for the given collection name.
-
-**Arguments**
-
-- **name**: `String`
-
- Name of the collection
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoCollection collection = db.collection("myCollection");
-```
-
-## ArangoDatabase.getCollections
-
-`ArangoDatabase.getCollections() : Collection`
-
-Fetches all collections from the database and returns an list of collection descriptions.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-Collection infos = db.getCollections();
-```
diff --git a/Documentation/Books/Drivers/Java/Reference/Database/DatabaseManipulation.md b/Documentation/Books/Drivers/Java/Reference/Database/DatabaseManipulation.md
deleted file mode 100644
index 9cc843c22cee..000000000000
--- a/Documentation/Books/Drivers/Java/Reference/Database/DatabaseManipulation.md
+++ /dev/null
@@ -1,95 +0,0 @@
-
-# Manipulation databases
-
-These functions implement the
-[HTTP API for manipulating databases](../../../..//HTTP/Database/index.html).
-
-## ArangoDB.createDatabase
-
-`ArangoDB.createDatabase(String name) : Boolean`
-
-Creates a new database with the given name.
-
-**Arguments**
-
-- **name**: `String`
-
- Name of the database to create
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-arango.createDatabase("myDB");
-```
-
-## ArangoDatabase.create()
-
-`ArangoDatabase.create() : Boolean`
-
-Creates the database.
-
-Alternative for [ArangoDB.createDatabase](#arangodbcreatedatabase).
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-db.create();
-```
-
-## ArangoDatabase.exists()
-
-`ArangoDatabase.exists() : boolean`
-
-Checks whether the database exists
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-boolean exists = db.exists();
-```
-
-## ArangoDatabase.getInfo
-
-`ArangoDatabase.getInfo() : DatabaseEntity`
-
-Retrieves information about the current database
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-DatabaseEntity info = db.getInfo();
-```
-
-## ArangoDB.getDatabases
-
-`ArangoDB.getDatabases() : Collection`
-
-Retrieves a list of all existing databases
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-Collection names = arango.getDatabases();
-```
-
-## ArangoDatabase.drop
-
-`ArangoDatabase.drop() : Boolean`
-
-Deletes the database from the server.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-db.drop();
-```
diff --git a/Documentation/Books/Drivers/Java/Reference/Database/GraphAccess.md b/Documentation/Books/Drivers/Java/Reference/Database/GraphAccess.md
deleted file mode 100644
index e04eca744482..000000000000
--- a/Documentation/Books/Drivers/Java/Reference/Database/GraphAccess.md
+++ /dev/null
@@ -1,25 +0,0 @@
-
-# Accessing graphs
-
-These functions implement the
-[HTTP API for accessing general graphs](../../../..//HTTP/Gharial/index.html).
-
-## ArangoDatabase.graph
-
-`ArangoDatabase.graph(String name) : ArangoGraph`
-
-Returns a _ArangoGraph_ instance for the given graph name.
-
-**Arguments**
-
-- **name**: `String`
-
- Name of the graph
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoGraph graph = db.graph("myGraph");
-```
diff --git a/Documentation/Books/Drivers/Java/Reference/Database/HttpRoutes.md b/Documentation/Books/Drivers/Java/Reference/Database/HttpRoutes.md
deleted file mode 100644
index 2d37ca3e7c68..000000000000
--- a/Documentation/Books/Drivers/Java/Reference/Database/HttpRoutes.md
+++ /dev/null
@@ -1,29 +0,0 @@
-
-# Arbitrary HTTP routes
-
-## ArangoDatabase.route
-
-`ArangoDatabase.route(String... path) : ArangoRoute`
-
-Returns a new _ArangoRoute_ instance for the given path
-(relative to the database) that can be used to perform arbitrary requests.
-
-**Arguments**
-
-- **path**: `String...`
-
- The database-relative URL of the route
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoRoute myFoxxService = db.route("my-foxx-service");
-
-VPackSlice body = arango.util().serialize("{'username': 'admin', 'password': 'hunter2'");
-Response response = myFoxxService.route("users").withBody(body).post();
-// response.getBody() is the result of
-// POST /_db/myDB/my-foxx-service/users
-// with VelocyPack request body '{"username": "admin", "password": "hunter2"}'
-```
diff --git a/Documentation/Books/Drivers/Java/Reference/Database/Queries.md b/Documentation/Books/Drivers/Java/Reference/Database/Queries.md
deleted file mode 100644
index e6d082f7e897..000000000000
--- a/Documentation/Books/Drivers/Java/Reference/Database/Queries.md
+++ /dev/null
@@ -1,170 +0,0 @@
-
-# Queries
-
-This function implements the
-[HTTP API for single roundtrip AQL queries](../../../..//HTTP/AqlQueryCursor/QueryResults.html).
-
-## ArangoDatabase.query
-
-`ArangoDatabase.query(String query, Map bindVars, AqlQueryOptions options, Class type) : ArangoCursor`
-
-Performs a database query using the given _query_ and _bindVars_, then returns
-a new _ArangoCursor_ instance for the result list.
-
-**Arguments**
-
-- **query**: `String`
-
- An AQL query string
-
-- **bindVars**: `Map`
-
- key/value pairs defining the variables to bind the query to
-
-- **options**: `AqlQueryOptions`
-
- - **count**: `Boolean`
-
- Indicates whether the number of documents in the result set should be
- returned in the "count" attribute of the result. Calculating the "count"
- attribute might have a performance impact for some queries in the future
- so this option is turned off by default, and "count" is only returned
- when requested.
-
- - **ttl**: `Integer`
-
- The time-to-live for the cursor (in seconds). The cursor will be removed
- on the server automatically after the specified amount of time.
- This is useful to ensure garbage collection of cursors that are not fully
- fetched by clients. If not set, a server-defined value will be used.
-
- - **batchSize**: `Integer`
-
- Maximum number of result documents to be transferred from the server to
- the client in one roundtrip. If this attribute is not set, a server-controlled
- default value will be used. A batchSize value of 0 is disallowed.
-
- - **memoryLimit**: `Long`
-
- The maximum number of memory (measured in bytes) that the query is allowed
- to use. If set, then the query will fail with error "resource limit exceeded"
- in case it allocates too much memory. A value of 0 indicates that there is
- no memory limit.
-
- - **cache**: `Boolean`
-
- Flag to determine whether the AQL query cache shall be used.
- If set to false, then any query cache lookup will be skipped for the query.
- If set to true, it will lead to the query cache being checked for the query
- if the query cache mode is either on or demand.
-
- - **failOnWarning**: `Boolean`
-
- When set to true, the query will throw an exception and abort instead of
- producing a warning. This option should be used during development to catch
- potential issues early. When the attribute is set to false, warnings will
- not be propagated to exceptions and will be returned with the query result.
- There is also a server configuration option `--query.fail-on-warning` for
- setting the default value for failOnWarning so it does not need to be set
- on a per-query level.
-
- - **profile**: `Boolean`
-
- If set to true, then the additional query profiling information will be
- returned in the sub-attribute profile of the extra return attribute if the
- query result is not served from the query cache.
-
- - **maxTransactionSize**: `Long`
-
- Transaction size limit in bytes. Honored by the RocksDB storage engine only.
-
- - **maxWarningCount**: `Long`
-
- Limits the maximum number of warnings a query will return. The number of
- warnings a query will return is limited to 10 by default, but that number
- can be increased or decreased by setting this attribute.
-
- - **intermediateCommitCount**: `Long`
-
- Maximum number of operations after which an intermediate commit is
- performed automatically. Honored by the RocksDB storage engine only.
-
- - **intermediateCommitSize**: `Long`
-
- Maximum total size of operations after which an intermediate commit is
- performed automatically. Honored by the RocksDB storage engine only.
-
- - **satelliteSyncWait**: `Double`
-
- This Enterprise Edition parameter allows to configure how long a DBServer
- will have time to bring the satellite collections involved in the query
- into sync. The default value is 60.0 (seconds). When the max time has been
- reached the query will be stopped.
-
- - **skipInaccessibleCollections**
-
- AQL queries (especially graph traversals) will treat collection to which a
- user has no access rights as if these collections were empty. Instead of
- returning a forbidden access error, your queries will execute normally.
- This is intended to help with certain use-cases: A graph contains several
- collections and different users execute AQL queries on that graph.
- You can now naturally limit the accessible results by changing the
- access rights of users on collections. This feature is only available in
- the Enterprise Edition.
-
- - **fullCount**: `Boolean`
-
- If set to true and the query contains a LIMIT clause, then the result will
- have an extra attribute with the sub-attributes stats and fullCount,
- `{ ... , "extra": { "stats": { "fullCount": 123 } } }`.
- The fullCount attribute will contain the number of documents in the result
- before the last LIMIT in the query was applied. It can be used to count the
- number of documents that match certain filter criteria, but only return a
- subset of them, in one go. It is thus similar to MySQL's SQL_CALC_FOUND_ROWS hint.
- Note that setting the option will disable a few LIMIT optimizations and may
- lead to more documents being processed, and thus make queries run longer.
- Note that the fullCount attribute will only be present in the result if the
- query has a LIMIT clause and the LIMIT clause is actually used in the query.
-
- - **maxPlans**: `Integer`
-
- Limits the maximum number of plans that are created by the AQL query optimizer.
-
- - **rules**: `Collection`
-
- A list of to-be-included or to-be-excluded optimizer rules can be put into
- this attribute, telling the optimizer to include or exclude specific rules.
- To disable a rule, prefix its name with a `-`, to enable a rule, prefix it
- with a `+`. There is also a pseudo-rule all, which will match all optimizer rules.
-
- - **stream**: `Boolean`
-
- Specify true and the query will be executed in a streaming fashion.
- The query result is not stored on the server, but calculated on the fly.
- Beware: long-running queries will need to hold the collection locks for as
- long as the query cursor exists. When set to false a query will be executed
- right away in its entirety. In that case query results are either returned
- right away (if the resultset is small enough), or stored on the arangod
- instance and accessible via the cursor API (with respect to the TTL).
- It is advisable to only use this option on short-running queries or without
- exclusive locks (write-locks on MMFiles). Please note that the query options
- cache, count and fullCount will not work on streaming queries. Additionally
- query statistics, warnings and profiling data will only be available after
- the query is finished. The default value is false.
-
-- **type**: `Class`
-
- The type of the result (POJO class, `VPackSlice`, `String` for JSON, or `Collection`/`List`/`Map`)
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoCursor cursor = db.query(
- "FOR i IN @@collection RETURN i"
- new MapBuilder().put("@collection", "myCollection").get(),
- new AqlQueryOptions(),
- BaseDocument.class
-);
-```
diff --git a/Documentation/Books/Drivers/Java/Reference/Database/README.md b/Documentation/Books/Drivers/Java/Reference/Database/README.md
deleted file mode 100644
index 9709a21cf599..000000000000
--- a/Documentation/Books/Drivers/Java/Reference/Database/README.md
+++ /dev/null
@@ -1,21 +0,0 @@
-
-# Database API
-
-## ArangoDB.db
-
-`ArangoDB.db(String name) : ArangoDatabase`
-
-Returns a _ArangoDatabase_ instance for the given database name.
-
-**Arguments**
-
-- **name**: `String`
-
- Name of the database
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-```
diff --git a/Documentation/Books/Drivers/Java/Reference/Database/Transactions.md b/Documentation/Books/Drivers/Java/Reference/Database/Transactions.md
deleted file mode 100644
index 1b7f9b5e3a9c..000000000000
--- a/Documentation/Books/Drivers/Java/Reference/Database/Transactions.md
+++ /dev/null
@@ -1,39 +0,0 @@
-
-# Transactions
-
-This function implements the
-[HTTP API for transactions](../../../..//HTTP/Transaction/index.html).
-
-## ArangoDatabase.transaction
-
-`ArangoDatabase.transaction(String action, Class type, TransactionOptions options) : T`
-
-Performs a server-side transaction and returns its return value.
-
-**Arguments**
-
-- **action**: `String`
-
- A String evaluating to a JavaScript function to be executed on the server.
-
-- **type**: `Class`
-
- The type of the result (POJO class, `VPackSlice` or `String` for JSON)
-
-- **options**: `TransactionOptions`
-
- Additional transaction options
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-String action = "function (params) {"
- + "const db = require('@arangodb').db;"
- + "return db._query('FOR i IN test RETURN i._key').toArray();"
- + "}";
-String[] keys = arango.db().transaction(
- action, String[].class, new TransactionOptions()
-);
-```
diff --git a/Documentation/Books/Drivers/Java/Reference/Database/ViewAccess.md b/Documentation/Books/Drivers/Java/Reference/Database/ViewAccess.md
deleted file mode 100644
index 85b97b768162..000000000000
--- a/Documentation/Books/Drivers/Java/Reference/Database/ViewAccess.md
+++ /dev/null
@@ -1,59 +0,0 @@
-
-# Accessing views
-
-These functions implement the
-[HTTP API for accessing view](../../../..//HTTP/Views/Getting.html).
-
-## ArangoDatabase.view
-
-`ArangoDatabase.view(String name) : ArangoView`
-
-Returns a _ArangoView_ instance for the given view name.
-
-**Arguments**
-
-- **name**: `String`
-
- Name of the view
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoView view = db.view("myView");
-```
-
-## ArangoDatabase.arangoSearch
-
-`ArangoDatabase.arangoSearch(String name) : ArangoSearch`
-
-Returns a _ArangoSearch_ instance for the given ArangoSearch view name.
-
-**Arguments**
-
-- **name**: `String`
-
- Name of the view
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoSearch view = db.arangoSearch("myArangoSearchView");
-```
-
-## ArangoDatabase.getViews
-
-`ArangoDatabase.getViews() : Collection`
-
-Fetches all views from the database and returns an list of collection descriptions.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-Collection infos = db.getViews();
-```
diff --git a/Documentation/Books/Drivers/Java/Reference/Graph/EdgeCollection.md b/Documentation/Books/Drivers/Java/Reference/Graph/EdgeCollection.md
deleted file mode 100644
index dcce8bcfed0e..000000000000
--- a/Documentation/Books/Drivers/Java/Reference/Graph/EdgeCollection.md
+++ /dev/null
@@ -1,127 +0,0 @@
-
-# Manipulating the edge collection
-
-## ArangoGraph.edgeCollection
-
-`ArangoGraph.edgeCollection(String name) : ArangoEdgeCollection`
-
-Returns a _ArangoEdgeCollection_ instance for the given edge collection name.
-
-**Arguments**
-
-- **name**: `String`
-
- Name of the edge collection
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoGraph graph = db.graph("some-graph");
-
-ArangoEdgeCollection collection = graph.edgeCollection("some-edge-collection");
-```
-
-## ArangoGraph.getEdgeDefinitions
-
-`ArangoGraph.getEdgeDefinitions() : Collection`
-
-Fetches all edge collections from the graph and returns a list of collection names.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoGraph graph = db.graph("some-graph");
-
-Collection collections = graph.getEdgeDefinitions();
-```
-
-## ArangoGraph.addEdgeDefinition
-
-`ArangoGraph.addEdgeDefinition(EdgeDefinition definition) : GraphEntity`
-
-Adds the given edge definition to the graph.
-
-**Arguments**
-
-- **definition**: `EdgeDefinition`
-
- The edge definition
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoGraph graph = db.graph("some-graph");
-
-EdgeDefinition edgeDefinition = new EdgeDefinition()
- .collection("edges")
- .from("start-vertices")
- .to("end-vertices");
-graph.addEdgeDefinition(edgeDefinition);
-// the edge definition has been added to the graph
-```
-
-## ArangoGraph.replaceEdgeDefinition
-
-`ArangoGraph.replaceEdgeDefinition(EdgeDefinition definition) : GraphEntity`
-
-Change one specific edge definition. This will modify all occurrences of this
-definition in all graphs known to your database.
-
-**Arguments**
-
-- **definition**: `EdgeDefinition`
-
- The edge definition
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoGraph graph = db.graph("some-graph");
-
-EdgeDefinition edgeDefinition = new EdgeDefinition()
- .collection("edges")
- .from("start-vertices")
- .to("end-vertices");
-graph.replaceEdgeDefinition(edgeDefinition);
-// the edge definition has been modified
-```
-
-## ArangoGraph.removeEdgeDefinition
-
-`ArangoGraph.removeEdgeDefinition(String definitionName) : GraphEntity`
-
-Remove one edge definition from the graph. This will only remove the
-edge collection, the vertex collections remain untouched and can still
-be used in your queries.
-
-**Arguments**
-
-- **definitionName**: `String`
-
- The name of the edge collection used in the definition
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoGraph graph = db.graph("some-graph");
-
-EdgeDefinition edgeDefinition = new EdgeDefinition()
- .collection("edges")
- .from("start-vertices")
- .to("end-vertices");
-graph.addEdgeDefinition(edgeDefinition);
-// the edge definition has been added to the graph
-
-graph.removeEdgeDefinition("edges");
-// the edge definition has been removed
-```
diff --git a/Documentation/Books/Drivers/Java/Reference/Graph/Edges.md b/Documentation/Books/Drivers/Java/Reference/Graph/Edges.md
deleted file mode 100644
index ecff0ad3b501..000000000000
--- a/Documentation/Books/Drivers/Java/Reference/Graph/Edges.md
+++ /dev/null
@@ -1,182 +0,0 @@
-
-# Manipulating edges
-
-## ArangoEdgeCollection.getEdge
-
-`ArangoEdgeCollection.getEdge(String key, Class type, DocumentReadOptions options) : T`
-
-Retrieves the edge document with the given `key` from the collection.
-
-**Arguments**
-
-- **key**: `String`
-
- The key of the edge
-
-- **type**: `Class`
-
- The type of the edge-document (POJO class, `VPackSlice` or `String` for JSON)
-
-- **options**: `DocumentReadOptions`
-
- - **ifNoneMatch**: `String`
-
- Document revision must not contain If-None-Match
-
- - **ifMatch**: `String`
-
- Document revision must contain If-Match
-
- - **catchException**: `Boolean`
-
- Whether or not catch possible thrown exceptions
-
-## ArangoEdgeCollection.insertEdge
-
-`ArangoEdgeCollection.insertEdge(T value, EdgeCreateOptions options) : EdgeEntity`
-
-Creates a new edge in the collection.
-
-**Arguments**
-
-- **value**: `T`
-
- A representation of a single edge (POJO, `VPackSlice` or `String` for JSON)
-
-- **options**: `EdgeCreateOptions`
-
- - **waitForSync**: `Boolean`
-
- Wait until document has been synced to disk.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoGraph graph = db.graph("some-graph");
-ArangoEdgeCollection collection = graph.edgeCollection("some-edge-collection");
-
-BaseEdgeDocument document = new BaseEdgeDocument("some-from-key", "some-to-key");
-document.addAttribute("some", "data");
-collection.insertEdge(document, new EdgeCreateOptions());
-```
-
-## ArangoEdgeCollection.replaceEdge
-
-`ArangoEdgeCollection.replaceEdge(String key, T value, EdgeReplaceOptions options) : EdgeUpdateEntity`
-
-Replaces the edge with key with the one in the body, provided there is such
-a edge and no precondition is violated.
-
-**Arguments**
-
-- **key**: `String`
-
- The key of the edge
-
-- **value**: `T`
-
- A representation of a single edge (POJO, `VPackSlice` or `String` for JSON)
-
-- **options**: `EdgeReplaceOptions`
-
- - **waitForSync**: `Boolean`
-
- Wait until document has been synced to disk.
-
- - **ifMatch**: `String`
-
- Replace a document based on target revision
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoGraph graph = db.graph("some-graph");
-ArangoEdgeCollection collection = graph.edgeCollection("some-edge-collection");
-
-BaseEdgeDocument document = new BaseEdgeDocument("some-from-key", "some-to-key");
-collection.replaceEdge("some-key", document, new EdgeReplaceOptions());
-```
-
-## ArangoEdgeCollection.updateEdge
-
-`ArangoEdgeCollection.updateEdge(String key, T value, EdgeUpdateOptions options) : EdgeUpdateEntity`
-
-Updates the edge with key with the one in the body, provided there is such a
-edge and no precondition is violated.
-
-**Arguments**
-
-- **key**: `String`
-
- The key of the edge
-
-- **value**: `T`
-
- A representation of a single edge (POJO, `VPackSlice` or `String` for JSON)
-
-- **options**: `EdgeUpdateOptions`
-
- - **waitForSync**: `Boolean`
-
- Wait until document has been synced to disk.
-
- - **ifMatch**: `String`
-
- Update a document based on target revision
-
- - **keepNull**: `Boolean`
-
- If the intention is to delete existing attributes with the patch command,
- the URL query parameter keepNull can be used with a value of false.
- This will modify the behavior of the patch command to remove any attributes
- from the existing document that are contained in the patch document with an
- attribute value of null.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoGraph graph = db.graph("some-graph");
-ArangoEdgeCollection collection = graph.edgeCollection("some-edge-collection");
-
-BaseEdgeDocument document = new BaseEdgeDocument("some-from-key", "some-to-key");
-collection.updateEdge("some-key", document, new EdgeUpdateOptions());
-```
-
-## ArangoEdgeCollection.deleteEdge
-
-`ArangoEdgeCollection.deleteEdge(String key, EdgeDeleteOptions options) : void`
-
-Deletes the edge with the given _key_ from the collection.
-
-**Arguments**
-
-- **key**: `String`
-
- The key of the edge
-
-- **options** : `EdgeDeleteOptions`
-
- - **waitForSync**: `Boolean`
-
- Wait until document has been synced to disk.
-
- - **ifMatch**: `String`
-
- Remove a document based on target revision
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoGraph graph = db.graph("some-graph");
-ArangoEdgeCollection collection = graph.edgeCollection("some-edge-collection");
-
-collection.deleteEdge("some-key", new EdgeDeleteOptions());
-```
diff --git a/Documentation/Books/Drivers/Java/Reference/Graph/README.md b/Documentation/Books/Drivers/Java/Reference/Graph/README.md
deleted file mode 100644
index 7519c0d0a779..000000000000
--- a/Documentation/Books/Drivers/Java/Reference/Graph/README.md
+++ /dev/null
@@ -1,190 +0,0 @@
-
-# Graph API
-
-These functions implement the
-[HTTP API for manipulating graphs](../../../..//HTTP/Gharial/index.html).
-
-## ArangoDatabase.createGraph
-
-`ArangoDatabase.createGraph(String name, Collection edgeDefinitions, GraphCreateOptions options) : GraphEntity`
-
-Create a new graph in the graph module. The creation of a graph requires the
-name of the graph and a definition of its edges.
-
-**Arguments**
-
-- **name**: `String`
-
- Name of the graph
-
-- **edgeDefinitions**: `Collection`
-
- An array of definitions for the edge
-
-- **options**: `GraphCreateOptions`
-
- - **orphanCollections**: `String...`
-
- Additional vertex collections
-
- - **isSmart**: `Boolean`
-
- Define if the created graph should be smart.
- This only has effect in Enterprise Edition.
-
- - **replicationFactor**: `Integer`
-
- (The default is 1): in a cluster, this attribute determines how many copies
- of each shard are kept on different DBServers. The value 1 means that only
- one copy (no synchronous replication) is kept. A value of k means that k-1
- replicas are kept. Any two copies reside on different DBServers.
- Replication between them is synchronous, that is, every write operation to
- the "leader" copy will be replicated to all "follower" replicas, before the
- write operation is reported successful. If a server fails, this is detected
- automatically and one of the servers holding copies take over, usually
- without an error being reported.
-
- - **numberOfShards**: `Integer`
-
- The number of shards that is used for every collection within this graph.
- Cannot be modified later.
-
- - **smartGraphAttribute**: `String`
-
- The attribute name that is used to smartly shard the vertices of a graph.
- Every vertex in this Graph has to have this attribute. Cannot be modified later.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-
-EdgeDefinition edgeDefinition = new EdgeDefinition()
- .collection("edges")
- .from("start-vertices")
- .to("end-vertices");
-GraphEntity graph = db.createGraph(
- "some-graph", Arrays.asList(edgeDefinition), new GraphCreateOptions()
-);
-// graph now exists
-```
-
-## ArangoGraph.create
-
-`ArangoGraph.create(Collection edgeDefinitions, GraphCreateOptions options) : GraphEntity`
-
-Create a new graph in the graph module. The creation of a graph requires the
-name of the graph and a definition of its edges.
-
-Alternative for [ArangoDatabase.createGraph](#arangodatabasecreategraph).
-
-**Arguments**
-
-- **edgeDefinitions**: `Collection`
-
- An array of definitions for the edge
-
-- **options**: `GraphCreateOptions`
-
- - **orphanCollections**: `String...`
-
- Additional vertex collections
-
- - **isSmart**: `Boolean`
-
- Define if the created graph should be smart.
- This only has effect in Enterprise Edition.
-
- - **replicationFactor**: `Integer`
-
- (The default is 1): in a cluster, this attribute determines how many copies
- of each shard are kept on different DBServers. The value 1 means that only
- one copy (no synchronous replication) is kept. A value of k means that k-1
- replicas are kept. Any two copies reside on different DBServers.
- Replication between them is synchronous, that is, every write operation to
- the "leader" copy will be replicated to all "follower" replicas, before the
- write operation is reported successful. If a server fails, this is detected
- automatically and one of the servers holding copies take over, usually
- without an error being reported.
-
- - **numberOfShards**: `Integer`
-
- The number of shards that is used for every collection within this graph.
- Cannot be modified later.
-
- - **smartGraphAttribute**: `String`
-
- The attribute name that is used to smartly shard the vertices of a graph.
- Every vertex in this Graph has to have this attribute. Cannot be modified later.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-
-ArangoGraph graph = db.graph("some-graph");
-EdgeDefinition edgeDefinition = new EdgeDefinition()
- .collection("edges")
- .from("start-vertices")
- .to("end-vertices");
-graph.create(Arrays.asList(edgeDefinition), new GraphCreateOptions());
-// graph now exists
-```
-
-## ArangoGraph.exists
-
-`ArangoGraph.exists() : boolean`
-
-Checks whether the graph exists
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-
-ArangoGraph graph = db.graph("some-graph");
-boolean exists = graph.exists();
-```
-
-## ArangoGraph.getInfo
-
-`ArangoGraph.getInfo() : GraphEntity`
-
-Retrieves general information about the graph.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-
-ArangoGraph graph = db.graph("some-graph");
-GraphEntity info = graph.getInfo();
-```
-
-## ArangoGraph.drop
-
-`ArangoGraph.drop(boolean dropCollections) : void`
-
-Deletes the graph from the database.
-
-**Arguments**
-
-- **dropCollections**: `boolean`
-
- Drop collections of this graph as well. Collections will only be dropped if
- they are not used in other graphs.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-
-ArangoGraph graph = db.graph("some-graph");
-graph.drop();
-// the graph "some-graph" no longer exists
-```
diff --git a/Documentation/Books/Drivers/Java/Reference/Graph/VertexCollection.md b/Documentation/Books/Drivers/Java/Reference/Graph/VertexCollection.md
deleted file mode 100644
index 41877172a2cf..000000000000
--- a/Documentation/Books/Drivers/Java/Reference/Graph/VertexCollection.md
+++ /dev/null
@@ -1,63 +0,0 @@
-
-# Manipulating the vertex collection
-
-## ArangoGraph.vertexCollection
-
-`ArangoGraph.vertexCollection(String name) : ArangoVertexCollection`
-
-Returns a _ArangoVertexCollection_ instance for the given vertex collection name.
-
-**Arguments**
-
-- **name**: `String`
-
- Name of the vertex collection
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoGraph graph = db.graph("some-graph");
-
-ArangoVertexCollection collection = graph.vertexCollection("some-vertex-collection");
-```
-
-## ArangoGraph.getVertexCollections
-
-`ArangoGraph.getVertexCollections() : Collection`
-
-Fetches all vertex collections from the graph and returns a list of collection names.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoGraph graph = db.graph("some-graph");
-
-Collection collections = graph.getVertexCollections();
-```
-
-## ArangoGraph.addVertexCollection
-
-`ArangoGraph.addVertexCollection(String name) : GraphEntity`
-
-Adds a vertex collection to the set of collections of the graph.
-If the collection does not exist, it will be created.
-
-**Arguments**
-
-- **name**: `String`
-
- Name of the vertex collection
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoGraph graph = db.graph("some-graph");
-
-graph.addVertexCollection("some-other-collection");
-```
diff --git a/Documentation/Books/Drivers/Java/Reference/Graph/Vertices.md b/Documentation/Books/Drivers/Java/Reference/Graph/Vertices.md
deleted file mode 100644
index 0fd634214808..000000000000
--- a/Documentation/Books/Drivers/Java/Reference/Graph/Vertices.md
+++ /dev/null
@@ -1,182 +0,0 @@
-
-# Manipulating vertices
-
-## ArangoVertexCollection.getVertex
-
-`ArangoVertexCollection.getVertex(String key, Class type, DocumentReadOptions options) : T`
-
-Retrieves the vertex document with the given `key` from the collection.
-
-**Arguments**
-
-- **key**: `String`
-
- The key of the vertex
-
-- **type**: `Class`
-
- The type of the vertex-document (POJO class, `VPackSlice` or `String` for JSON)
-
-- **options**: `DocumentReadOptions`
-
- - **ifNoneMatch**: `String`
-
- Document revision must not contain If-None-Match
-
- - **ifMatch**: `String`
-
- Document revision must contain If-Match
-
- - **catchException**: `Boolean`
-
- Whether or not catch possible thrown exceptions
-
-## ArangoVertexCollection.insertVertex
-
-`ArangoVertexCollection.insertVertex(T value, VertexCreateOptions options) : VertexEntity`
-
-Creates a new vertex in the collection.
-
-**Arguments**
-
-- **value**: `T`
-
- A representation of a single vertex (POJO, `VPackSlice` or `String` for JSON)
-
-- **options**: `VertexCreateOptions`
-
- - **waitForSync**: `Boolean`
-
- Wait until document has been synced to disk.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoGraph graph = db.graph("some-graph");
-ArangoVertexCollection collection = graph.vertexCollection("some-vertex-collection");
-
-BaseDocument document = new BaseDocument();
-document.addAttribute("some", "data");
-collection.insertVertex(document, new VertexCreateOptions());
-```
-
-## ArangoVertexCollection.replaceVertex
-
-`ArangoVertexCollection.replaceVertex(String key, T value, VertexReplaceOptions options) : VertexUpdateEntity`
-
-Replaces the vertex with key with the one in the body, provided there is such
-a vertex and no precondition is violated.
-
-**Arguments**
-
-- **key**: `String`
-
- The key of the vertex
-
-- **value**: `T`
-
- A representation of a single vertex (POJO, `VPackSlice` or `String` for JSON)
-
-- **options**: `VertexReplaceOptions`
-
- - **waitForSync**: `Boolean`
-
- Wait until document has been synced to disk.
-
- - **ifMatch**: `String`
-
- Replace a document based on target revision
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoGraph graph = db.graph("some-graph");
-ArangoVertexCollection collection = graph.vertexCollection("some-vertex-collection");
-
-BaseDocument document = new BaseDocument();
-collection.replaceVertex("some-key", document, new VertexReplaceOptions());
-```
-
-## ArangoVertexCollection.updateVertex
-
-`ArangoVertexCollection.updateVertex(String key, T value, VertexUpdateOptions options) : VertexUpdateEntity`
-
-Updates the vertex with key with the one in the body, provided there is such
-a vertex and no precondition is violated.
-
-**Arguments**
-
-- **key**: `String`
-
- The key of the vertex
-
-- **value**: `T`
-
- A representation of a single vertex (POJO, `VPackSlice` or `String` for JSON)
-
-- **options**: `VertexUpdateOptions`
-
- - **waitForSync**: `Boolean`
-
- Wait until document has been synced to disk.
-
- - **ifMatch**: `String`
-
- Update a document based on target revision
-
- - **keepNull**: `Boolean`
-
- If the intention is to delete existing attributes with the patch command,
- the URL query parameter keepNull can be used with a value of false.
- This will modify the behavior of the patch command to remove any attributes
- from the existing document that are contained in the patch document with
- an attribute value of null.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoGraph graph = db.graph("some-graph");
-ArangoVertexCollection collection = graph.vertexCollection("some-vertex-collection");
-
-BaseDocument document = new BaseDocument();
-collection.updateVertex("some-key", document, new VertexUpdateOptions());
-```
-
-## ArangoVertexCollection.deleteVertex
-
-`ArangoVertexCollection.deleteVertex(String key, VertexDeleteOptions options) : void`
-
-Deletes the vertex with the given _key_ from the collection.
-
-**Arguments**
-
-- **key**: `String`
-
- The key of the vertex
-
-- **options** : `VertexDeleteOptions`
-
- - **waitForSync**: `Boolean`
-
- Wait until document has been synced to disk.
-
- - **ifMatch**: `String`
-
- Remove a document based on target revision
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoGraph graph = db.graph("some-graph");
-ArangoVertexCollection collection = graph.vertexCollection("some-vertex-collection");
-
-collection.deleteVertex("some-key", new VertexDeleteOptions());
-```
diff --git a/Documentation/Books/Drivers/Java/Reference/README.md b/Documentation/Books/Drivers/Java/Reference/README.md
deleted file mode 100644
index c986f37b1b5e..000000000000
--- a/Documentation/Books/Drivers/Java/Reference/README.md
+++ /dev/null
@@ -1,29 +0,0 @@
-
-# ArangoDB Java Driver - Reference
-
-- [Driver Setup](Setup.md)
-- [Database](Database/README.md)
- - [Database Manipulation](Database/DatabaseManipulation.md)
- - [Collection Access](Database/CollectionAccess.md)
- - [View Access](Database/ViewAccess.md)
- - [Queries](Database/Queries.md)
- - [AQL User Functions](Database/AqlUserFunctions.md)
- - [Transactions](Database/Transactions.md)
- - [Graph Access](Database/GraphAccess.md)
- - [HTTP Routes](Database/HttpRoutes.md)
-- [Collection](Collection/README.md)
- - [Collection Manipulation](Collection/CollectionManipulation.md)
- - [Document Manipulation](Collection/DocumentManipulation.md)
- - [Indexes](Collection/Indexes.md)
- - [Bulk Import](Collection/BulkImport.md)
-- [View](View/README.md)
- - [View Manipulation](View/ViewManipulation.md)
- - [ArangoSearch Views](View/ArangoSearch.md)
-- [Cursor](Cursor.md)
-- [Graph](Graph/README.md)
- - [Vertex Collection](Graph/VertexCollection.md)
- - [Edge Collection](Graph/EdgeCollection.md)
- - [Vertices Manipulation](Graph/Vertices.md)
- - [Edges Manipulation](Graph/Edges.md)
-- [Route](Route.md)
-- [Serialization](Serialization.md)
diff --git a/Documentation/Books/Drivers/Java/Reference/Route.md b/Documentation/Books/Drivers/Java/Reference/Route.md
deleted file mode 100644
index ad5659950604..000000000000
--- a/Documentation/Books/Drivers/Java/Reference/Route.md
+++ /dev/null
@@ -1,276 +0,0 @@
-
-# Route API
-
-_ArangoRoute_ instances provide access for arbitrary HTTP requests.
-This allows easy access to Foxx services and other HTTP APIs not covered
-by the driver itself.
-
-## ArangoRoute.route
-
-`ArangoRoute.route(String... path) : ArangoRoute`
-
-Returns a new _ArangoRoute_ instance for the given path (relative to the
-current route) that can be used to perform arbitrary requests.
-
-**Arguments**
-
-- **path**: `String...`
-
- The relative URL of the route
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-
-ArangoRoute route = db.route("my-foxx-service");
-ArangoRoute users = route.route("users");
-// equivalent to db.route("my-foxx-service/users")
-// or db.route("my-foxx-service", "users")
-```
-
-## ArangoRoute.withHeader
-
-`ArangoRoute.withHeader(String key, Object value) : ArangoRoute`
-
-Header that should be sent with each request to the route.
-
-**Arguments**
-
-- **key**: `String`
-
- Header key
-
-- **value**: `Object`
-
- Header value (the _toString()_ method will be called for the value}
-
-## ArangoRoute.withQueryParam
-
-`ArangoRoute.withQueryParam(String key, Object value) : ArangoRoute`
-
-Query parameter that should be sent with each request to the route.
-
-**Arguments**
-
-- **key**: `String`
-
- Query parameter key
-
-- **value**: `Object`
-
- Query parameter value (the _toString()_ method will be called for the value}
-
-## ArangoRoute.withBody
-
-`ArangoRoute.withBody(Object body) : ArangoRoute`
-
-The response body. The body will be serialized to _VPackSlice_.
-
-**Arguments**
-
-- **body**: `Object`
-
- The request body
-
-## ArangoRoute.delete
-
-`ArangoRoute.delete() : Response`
-
-Performs a DELETE request to the given URL and returns the server response.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-
-ArangoRoute route = db.route("my-foxx-service");
-ArangoRoute route = route.delete()
-// response.getBody() is the response body of calling
-// DELETE _db/_system/my-foxx-service
-
-// -- or --
-
-ArangoRoute route = route.route("users/admin").delete()
-// response.getBody() is the response body of calling
-// DELETE _db/_system/my-foxx-service/users/admin
-
-// -- or --
-
-ArangoRoute route = route.route("users/admin").withQueryParam("permanent", true).delete()
-// response.getBody() is the response body of calling
-// DELETE _db/_system/my-foxx-service/users/admin?permanent=true
-```
-
-## ArangoRoute.get
-
-`ArangoRoute.get() : Response`
-
-Performs a GET request to the given URL and returns the server response.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-
-ArangoRoute route = db.route("my-foxx-service");
-Response response = route.get();
-// response.getBody() is the response body of calling
-// GET _db/_system/my-foxx-service
-
-// -- or --
-
-Response response = route.route("users").get();
-// response.getBody() is the response body of calling
-// GET _db/_system/my-foxx-service/users
-
-// -- or --
-
-Response response = route.route("users").withQueryParam("group", "admin").get();
-// response.getBody() is the response body of calling
-// GET _db/_system/my-foxx-service/users?group=admin
-```
-
-## ArangoRoute.head
-
-`ArangoRoute.head() : Response`
-
-Performs a HEAD request to the given URL and returns the server response.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-
-ArangoRoute route = db.route("my-foxx-service");
-ArangoRoute route = route.head();
-// response is the response object for
-// HEAD _db/_system/my-foxx-service
-```
-
-## ArangoRoute.patch
-
-`ArangoRoute.patch() : Response`
-
-Performs a PATCH request to the given URL and returns the server response.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-
-ArangoRoute route = db.route("my-foxx-service");
-ArangoRoute route = route.patch();
-// response.getBody() is the response body of calling
-// PATCH _db/_system/my-foxx-service
-
-// -- or --
-
-ArangoRoute route = route.route("users/admin").patch();
-// response.getBody() is the response body of calling
-// PATCH _db/_system/my-foxx-service/users
-
-// -- or --
-
-VPackSlice body = arango.util().serialize("{ password: 'hunter2' }");
-ArangoRoute route = route.route("users/admin").withBody(body).patch();
-// response.getBody() is the response body of calling
-// PATCH _db/_system/my-foxx-service/users/admin
-// with JSON request body {"password": "hunter2"}
-
-// -- or --
-
-VPackSlice body = arango.util().serialize("{ password: 'hunter2' }");
-ArangoRoute route = route.route("users/admin")
- .withBody(body).withQueryParam("admin", true).patch();
-// response.getBody() is the response body of calling
-// PATCH _db/_system/my-foxx-service/users/admin?admin=true
-// with JSON request body {"password": "hunter2"}
-```
-
-## ArangoRoute.post
-
-`ArangoRoute.post() : Response`
-
-Performs a POST request to the given URL and returns the server response.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-
-ArangoRoute route = db.route("my-foxx-service");
-ArangoRoute route = route.post()
-// response.getBody() is the response body of calling
-// POST _db/_system/my-foxx-service
-
-// -- or --
-
-ArangoRoute route = route.route("users").post()
-// response.getBody() is the response body of calling
-// POST _db/_system/my-foxx-service/users
-
-// -- or --
-
-VPackSlice body = arango.util().serialize("{ password: 'hunter2' }");
-ArangoRoute route = route.route("users").withBody(body).post();
-// response.getBody() is the response body of calling
-// POST _db/_system/my-foxx-service/users
-// with JSON request body {"username": "admin", "password": "hunter2"}
-
-// -- or --
-
-VPackSlice body = arango.util().serialize("{ password: 'hunter2' }");
-ArangoRoute route = route.route("users")
- .withBody(body).withQueryParam("admin", true).post();
-// response.getBody() is the response body of calling
-// POST _db/_system/my-foxx-service/users?admin=true
-// with JSON request body {"username": "admin", "password": "hunter2"}
-```
-
-## ArangoRoute.put
-
-`ArangoRoute.put() : Response`
-
-Performs a PUT request to the given URL and returns the server response.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-
-ArangoRoute route = db.route("my-foxx-service");
-ArangoRoute route = route.put();
-// response.getBody() is the response body of calling
-// PUT _db/_system/my-foxx-service
-
-// -- or --
-
-ArangoRoute route = route.route("users/admin").put();
-// response.getBody() is the response body of calling
-// PUT _db/_system/my-foxx-service/users
-
-// -- or --
-
-VPackSlice body = arango.util().serialize("{ password: 'hunter2' }");
-ArangoRoute route = route.route("users/admin").withBody(body).put();
-// response.getBody() is the response body of calling
-// PUT _db/_system/my-foxx-service/users/admin
-// with JSON request body {"username": "admin", "password": "hunter2"}
-
-// -- or --
-
-VPackSlice body = arango.util().serialize("{ password: 'hunter2' }");
-ArangoRoute route = route.route("users/admin")
- .withBody(body).withQueryParam("admin", true).put();
-// response.getBody() is the response body of calling
-// PUT _db/_system/my-foxx-service/users/admin?admin=true
-// with JSON request body {"username": "admin", "password": "hunter2"}
-```
diff --git a/Documentation/Books/Drivers/Java/Reference/Serialization.md b/Documentation/Books/Drivers/Java/Reference/Serialization.md
deleted file mode 100644
index 76988b907561..000000000000
--- a/Documentation/Books/Drivers/Java/Reference/Serialization.md
+++ /dev/null
@@ -1,253 +0,0 @@
-
-# Serialization
-
-## VelocyPack serialization
-
-Since version `4.1.11` you can extend the VelocyPack serialization by
-registering additional `VPackModule`s on `ArangoDB.Builder`.
-
-### Java 8 types
-
-GitHub: https://github.com/arangodb/java-velocypack-module-jdk8
-
-Added support for:
-
-- `java.time.Instant`
-- `java.time.LocalDate`
-- `java.time.LocalDateTime`
-- `java.time.ZonedDateTime`
-- `java.time.OffsetDateTime`
-- `java.time.ZoneId`
-- `java.util.Optional`
-- `java.util.OptionalDouble`
-- `java.util.OptionalInt`
-- `java.util.OptionalLong`
-
-```XML
-
-
- com.arangodb
- velocypack-module-jdk8
- 1.1.0
-
-
-```
-
-```Java
-ArangoDB arangoDB = new ArangoDB.Builder().registerModule(new VPackJdk8Module()).build();
-```
-
-### Scala types
-
-GitHub: https://github.com/arangodb/java-velocypack-module-scala
-
-Added support for:
-
-- `scala.Option`
-- `scala.collection.immutable.List`
-- `scala.collection.immutable.Map`
-- `scala.math.BigInt`
-- `scala.math.BigDecimal`
-
-```XML
-
-
- com.arangodb
- velocypack-module-scala
- 1.0.2
-
-
-```
-
-```Scala
-val arangoDB: ArangoDB = new ArangoDB.Builder().registerModule(new VPackScalaModule).build
-```
-
-### Joda-Time
-
-GitHub: https://github.com/arangodb/java-velocypack-module-joda
-
-Added support for:
-
-- `org.joda.time.DateTime`
-- `org.joda.time.Instant`
-- `org.joda.time.LocalDate`
-- `org.joda.time.LocalDateTime`
-
-```XML
-
-
- com.arangodb
- velocypack-module-joda
- 1.1.1
-
-
-```
-
-```Java
-ArangoDB arangoDB = new ArangoDB.Builder().registerModule(new VPackJodaModule()).build();
-```
-
-## Use of jackson as an alternative serializer
-
-Since version 4.5.2, the driver supports alternative serializer to de-/serialize
-documents, edges and query results. One implementation is
-[VelocyJack](https://github.com/arangodb/jackson-dataformat-velocypack#within-arangodb-java-driver)
-which is based on [Jackson](https://github.com/FasterXML/jackson) working with
-[jackson-dataformat-velocypack](https://github.com/arangodb/jackson-dataformat-velocypack).
-
-**Note**: Any registered custom [serializer/deserializer or module](#custom-serialization)
-will be ignored.
-
-## custom serialization
-
-```Java
-ArangoDB arangoDB = new ArangoDB.Builder().registerModule(new VPackModule() {
- @Override
- public > void setup(final C context) {
- context.registerDeserializer(MyObject.class, new VPackDeserializer() {
- @Override
- public MyObject deserialize(VPackSlice parent,VPackSlice vpack,
- VPackDeserializationContext context) throws VPackException {
- MyObject obj = new MyObject();
- obj.setName(vpack.get("name").getAsString());
- return obj;
- }
- });
- context.registerSerializer(MyObject.class, new VPackSerializer() {
- @Override
- public void serialize(VPackBuilder builder,String attribute,MyObject value,
- VPackSerializationContext context) throws VPackException {
- builder.add(attribute, ValueType.OBJECT);
- builder.add("name", value.getName());
- builder.close();
- }
- });
- }
-}).build();
-```
-
-## JavaBeans
-
-The driver can serialize/deserialize JavaBeans. They need at least a
-constructor without parameter.
-
-```Java
-public class MyObject {
-
- private String name;
- private Gender gender;
- private int age;
-
- public MyObject() {
- super();
- }
-
-}
-```
-
-## Internal fields
-
-To use Arango-internal fields (like \_id, \_key, \_rev, \_from, \_to) in your
-JavaBeans, use the annotation `DocumentField`.
-
-```Java
-public class MyObject {
-
- @DocumentField(Type.KEY)
- private String key;
-
- private String name;
- private Gender gender;
- private int age;
-
- public MyObject() {
- super();
- }
-
-}
-```
-
-## Serialized fieldnames
-
-To use a different serialized name for a field, use the annotation `SerializedName`.
-
-```Java
-public class MyObject {
-
- @SerializedName("title")
- private String name;
-
- private Gender gender;
- private int age;
-
- public MyObject() {
- super();
- }
-
-}
-```
-
-## Ignore fields
-
-To ignore fields at serialization/deserialization, use the annotation `Expose`
-
-```Java
-public class MyObject {
-
- @Expose
- private String name;
- @Expose(serialize = true, deserialize = false)
- private Gender gender;
- private int age;
-
- public MyObject() {
- super();
- }
-
-}
-```
-
-## Custom serializer
-
-```Java
-ArangoDB arangoDB = new ArangoDB.Builder().registerModule(new VPackModule() {
- @Override
- public > void setup(final C context) {
- context.registerDeserializer(MyObject.class, new VPackDeserializer() {
- @Override
- public MyObject deserialize(VPackSlice parent,VPackSlice vpack,
- VPackDeserializationContext context) throws VPackException {
- MyObject obj = new MyObject();
- obj.setName(vpack.get("name").getAsString());
- return obj;
- }
- });
- context.registerSerializer(MyObject.class, new VPackSerializer() {
- @Override
- public void serialize(VPackBuilder builder,String attribute,MyObject value,
- VPackSerializationContext context) throws VPackException {
- builder.add(attribute, ValueType.OBJECT);
- builder.add("name", value.getName());
- builder.close();
- }
- });
- }
-}).build();
-```
-
-## Manual serialization
-
-To de-/serialize from and to VelocyPack before or after a database call, use the
-`ArangoUtil` from the method `util()` in `ArangoDB`, `ArangoDatabase`,
-`ArangoCollection`, `ArangoGraph`, `ArangoEdgeCollection`or `ArangoVertexCollection`.
-
-```Java
-ArangoDB arangoDB = new ArangoDB.Builder();
-VPackSlice vpack = arangoDB.util().serialize(myObj);
-```
-
-```Java
-ArangoDB arangoDB = new ArangoDB.Builder();
-MyObject myObj = arangoDB.util().deserialize(vpack, MyObject.class);
-```
diff --git a/Documentation/Books/Drivers/Java/Reference/Setup.md b/Documentation/Books/Drivers/Java/Reference/Setup.md
deleted file mode 100644
index b1eebc79a4fa..000000000000
--- a/Documentation/Books/Drivers/Java/Reference/Setup.md
+++ /dev/null
@@ -1,198 +0,0 @@
-
-# Driver setup
-
-Setup with default configuration, this automatically loads a properties file
-`arangodb.properties` if exists in the classpath:
-
-```Java
-// this instance is thread-safe
-ArangoDB arangoDB = new ArangoDB.Builder().build();
-```
-
-The driver is configured with some default values:
-
-| property-key | description | default value |
-| ------------------------ | --------------------------------------- | -------------- |
-| arangodb.hosts | ArangoDB hosts | 127.0.0.1:8529 |
-| arangodb.timeout | connect & request timeout (millisecond) | 0 |
-| arangodb.user | Basic Authentication User |
-| arangodb.password | Basic Authentication Password |
-| arangodb.useSsl | use SSL connection | false |
-| arangodb.chunksize | VelocyStream Chunk content-size (bytes) | 30000 |
-| arangodb.connections.max | max number of connections | 1 VST, 20 HTTP |
-| arangodb.protocol | used network protocol | VST |
-
-To customize the configuration the parameters can be changed in the code...
-
-```Java
-ArangoDB arangoDB = new ArangoDB.Builder()
- .host("192.168.182.50", 8888)
- .build();
-```
-
-... or with a custom properties file (my.properties)
-
-```Java
-InputStream in = MyClass.class.getResourceAsStream("my.properties");
-ArangoDB arangoDB = new ArangoDB.Builder()
- .loadProperties(in)
- .build();
-```
-
-Example for arangodb.properties:
-
-```
-arangodb.hosts=127.0.0.1:8529,127.0.0.1:8529
-arangodb.user=root
-arangodb.password=
-```
-
-## Network protocol
-
-The drivers default used network protocol is the binary protocol VelocyStream
-which offers the best performance within the driver. To use HTTP, you have to
-set the configuration `useProtocol` to `Protocol.HTTP_JSON` for HTTP with JSON
-content or `Protocol.HTTP_VPACK` for HTTP with
-[VelocyPack](https://github.com/arangodb/velocypack/blob/master/VelocyPack.md) content.
-
-```Java
-ArangoDB arangoDB = new ArangoDB.Builder()
- .useProtocol(Protocol.VST)
- .build();
-```
-
-In addition to set the configuration for HTTP you have to add the
-apache httpclient to your classpath.
-
-```XML
-
- org.apache.httpcomponents
- httpclient
- 4.5.1
-
-```
-
-**Note**: If you are using ArangoDB 3.0.x you have to set the protocol to
-`Protocol.HTTP_JSON` because it is the only one supported.
-
-## SSL
-
-To use SSL, you have to set the configuration `useSsl` to `true` and set a `SSLContext`
-(see [example code](https://github.com/arangodb/arangodb-java-driver/blob/master/src/test/java/com/arangodb/example/ssl/SslExample.java)).
-
-```Java
-ArangoDB arangoDB = new ArangoDB.Builder()
- .useSsl(true)
- .sslContext(sc)
- .build();
-```
-
-## Connection Pooling
-
-The driver supports connection pooling for VelocyStream with a default of 1 and
-HTTP with a default of 20 maximum connections per host. To change this value
-use the method `maxConnections(Integer)` in `ArangoDB.Builder`.
-
-```Java
-ArangoDB arangoDB = new ArangoDB.Builder()
- .maxConnections(8)
- .build();
-```
-
-The driver does not explicitly release connections. To avoid exhaustion of
-resources when no connection is needed, you can clear the connection pool
-(close all connections to the server) or use [connection TTL](#connection-time-to-live).
-
-```Java
-arangoDB.shutdown();
-```
-
-## Fallback hosts
-
-The driver supports configuring multiple hosts. The first host is used to open a
-connection to. When this host is not reachable the next host from the list is used.
-To use this feature just call the method `host(String, int)` multiple times.
-
-```Java
-ArangoDB arangoDB = new ArangoDB.Builder()
- .host("host1", 8529)
- .host("host2", 8529)
- .build();
-```
-
-Since version 4.3 the driver support acquiring a list of known hosts in a
-cluster setup or a single server setup with followers. For this the driver has
-to be able to successfully open a connection to at least one host to get the
-list of hosts. Then it can use this list when fallback is needed. To use this
-feature just pass `true` to the method `acquireHostList(boolean)`.
-
-```Java
-ArangoDB arangoDB = new ArangoDB.Builder()
- .acquireHostList(true)
- .build();
-```
-
-## Load Balancing
-
-Since version 4.3 the driver supports load balancing for cluster setups in
-two different ways.
-
-The first one is a round robin load balancing where the driver iterates
-through a list of known hosts and performs every request on a different
-host than the request before.
-
-```Java
-ArangoDB arangoDB = new ArangoDB.Builder()
- .loadBalancingStrategy(LoadBalancingStrategy.ROUND_ROBIN)
- .build();
-```
-
-Just like the Fallback hosts feature the round robin load balancing strategy
-can use the `acquireHostList` configuration to acquire a list of all known hosts
-in the cluster. Do so only requires the manually configuration of only one host.
-Because this list is updated frequently it makes load balancing over the whole
-cluster very comfortable.
-
-```Java
-ArangoDB arangoDB = new ArangoDB.Builder()
- .loadBalancingStrategy(LoadBalancingStrategy.ROUND_ROBIN)
- .acquireHostList(true)
- .build();
-```
-
-The second load balancing strategy allows to pick a random host from the
-configured or acquired list of hosts and sticks to that host as long as the
-connection is open. This strategy is useful for an application - using the driver -
-which provides a session management where each session has its own instance of
-`ArangoDB` build from a global configured list of hosts. In this case it could
-be wanted that every sessions sticks with all its requests to the same host but
-not all sessions should use the same host. This load balancing strategy also
-works together with `acquireHostList`.
-
-```Java
-ArangoDB arangoDB = new ArangoDB.Builder()
- .loadBalancingStrategy(LoadBalancingStrategy.ONE_RANDOM)
- .acquireHostList(true)
- .build();
-```
-
-## Connection time to live
-
-Since version 4.4 the driver supports setting a TTL (time to life) in milliseconds
-for connections managed by the internal connection pool.
-
-```Java
-ArangoDB arango = new ArangoDB.Builder()
- .connectionTtl(5 * 60 * 1000)
- .build();
-```
-
-In this example all connections will be closed/reopened after 5 minutes.
-
-Connection TTL can be disabled setting it to `null`:
-
-```Java
-.connectionTtl(null)
-```
-
-The default TTL is `null` (no automatic connection closure).
diff --git a/Documentation/Books/Drivers/Java/Reference/View/ArangoSearch.md b/Documentation/Books/Drivers/Java/Reference/View/ArangoSearch.md
deleted file mode 100644
index 0b70d91acfbf..000000000000
--- a/Documentation/Books/Drivers/Java/Reference/View/ArangoSearch.md
+++ /dev/null
@@ -1,274 +0,0 @@
-
-# ArangoSearch API
-
-These functions implement the
-[HTTP API for ArangoSearch views](../../../..//HTTP/Views/ArangoSearch.html).
-
-## ArangoDatabase.createArangoSearch
-
-`ArangoDatabase.createArangoSearch(String name, ArangoSearchCreateOptions options) : ViewEntity`
-
-Creates a ArangoSearch view with the given _options_, then returns
-view information from the server.
-
-**Arguments**
-
-- **name**: `String`
-
- The name of the view
-
-- **options**: `ArangoSearchCreateOptions`
-
- - **consolidationIntervalMsec**: `Long`
-
- Wait at least this many milliseconds between committing index data changes
- and making them visible to queries (default: 60000, to disable use: 0).
- For the case where there are a lot of inserts/updates, a lower value,
- until commit, will cause the index not to account for them and memory usage
- would continue to grow. For the case where there are a few inserts/updates,
- a higher value will impact performance and waste disk space for each
- commit call without any added benefits.
-
- - **cleanupIntervalStep**: `Long`
-
- Wait at least this many commits between removing unused files in
- data directory (default: 10, to disable use: 0). For the case where the
- consolidation policies merge segments often (i.e. a lot of commit+consolidate),
- a lower value will cause a lot of disk space to be wasted. For the case
- where the consolidation policies rarely merge segments (i.e. few inserts/deletes),
- a higher value will impact performance without any added benefits.
-
- - **consolidationPolicy**:
-
- - **type**: `ConsolidationType`
-
- The type of the consolidation policy.
-
- - **threshold**: `Double`
-
- Select a given segment for "consolidation" if and only if the formula
- based on type (as defined above) evaluates to true, valid value range
- [0.0, 1.0] (default: 0.85)
-
- - **segmentThreshold**: `Long`
-
- Apply the "consolidation" operation if and only if (default: 300):
- `{segmentThreshold} < number_of_segments`
-
- - **link**: `CollectionLink[]`
-
- A list of linked collections
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-db.createArangoSearch("potatoes", new ArangoSearchPropertiesOptions());
-// the ArangoSearch view "potatoes" now exists
-```
-
-## ArangoSearch.create
-
-`ArangoSearch.create(ArangoSearchCreateOptions options) : ViewEntity`
-
-Creates a ArangoSearch view with the given _options_, then returns view information from the server.
-
-Alternative for `ArangoDatabase.createArangoSearch`.
-
-**Arguments**
-
-- **options**: `ArangoSearchCreateOptions`
-
- - **consolidationIntervalMsec**: `Long`
-
- Wait at least this many milliseconds between committing index data changes
- and making them visible to queries (default: 60000, to disable use: 0).
- For the case where there are a lot of inserts/updates, a lower value,
- until commit, will cause the index not to account for them and memory usage
- would continue to grow. For the case where there are a few inserts/updates,
- a higher value will impact performance and waste disk space for each
- commit call without any added benefits.
-
- - **cleanupIntervalStep**: `Long`
-
- Wait at least this many commits between removing unused files in
- data directory (default: 10, to disable use: 0). For the case where the
- consolidation policies merge segments often (i.e. a lot of commit+consolidate),
- a lower value will cause a lot of disk space to be wasted. For the case
- where the consolidation policies rarely merge segments (i.e. few inserts/deletes),
- a higher value will impact performance without any added benefits.
-
- - **consolidationPolicy**:
-
- - **type**: `ConsolidationType`
-
- The type of the consolidation policy.
-
- - **threshold**: `Double`
-
- Select a given segment for "consolidation" if and only if the formula
- based on type (as defined above) evaluates to true, valid value range
- [0.0, 1.0] (default: 0.85)
-
- - **segmentThreshold**: `Long`
-
- Apply the "consolidation" operation if and only if (default: 300):
- `{segmentThreshold} < number_of_segments`
-
- - **link**: `CollectionLink[]`
-
- A list of linked collections
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoSearch view = db.arangoSearch("potatoes");
-
-view.create(new ArangoSearchPropertiesOptions());
-// the ArangoSearch view "potatoes" now exists
-```
-
-## ArangoSearch.getProperties
-
-`ArangoSearch.getProperties() : ArangoSearchPropertiesEntity`
-
-Reads the properties of the specified view.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoSearch view = db.arangoSearch("potatoes");
-
-ArangoSearchPropertiesEntity properties = view.getProperties();
-```
-
-## ArangoSearch.updateProperties
-
-`ArangoSearch.updateProperties(ArangoSearchPropertiesOptions options) : ArangoSearchPropertiesEntity`
-
-Partially changes properties of the view.
-
-**Arguments**
-
-- **options**: `ArangoSearchPropertiesOptions`
-
- - **consolidationIntervalMsec**: `Long`
-
- Wait at least this many milliseconds between committing index data changes
- and making them visible to queries (default: 60000, to disable use: 0).
- For the case where there are a lot of inserts/updates, a lower value,
- until commit, will cause the index not to account for them and memory usage
- would continue to grow. For the case where there are a few inserts/updates,
- a higher value will impact performance and waste disk space for each
- commit call without any added benefits.
-
- - **cleanupIntervalStep**: `Long`
-
- Wait at least this many commits between removing unused files in
- data directory (default: 10, to disable use: 0). For the case where the
- consolidation policies merge segments often (i.e. a lot of commit+consolidate),
- a lower value will cause a lot of disk space to be wasted. For the case
- where the consolidation policies rarely merge segments (i.e. few inserts/deletes),
- a higher value will impact performance without any added benefits.
-
- - **consolidationPolicy**:
-
- - **type**: `ConsolidationType`
-
- The type of the consolidation policy.
-
- - **threshold**: `Double`
-
- Select a given segment for "consolidation" if and only if the formula
- based on type (as defined above) evaluates to true, valid value range
- [0.0, 1.0] (default: 0.85)
-
- - **segmentThreshold**: `Long`
-
- Apply the "consolidation" operation if and only if (default: 300):
- `{segmentThreshold} < number_of_segments`
-
- - **link**: `CollectionLink[]`
-
- A list of linked collections
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoSearch view = db.arangoSearch("some-view");
-
-view.updateProperties(
- new ArangoSearchPropertiesOptions()
- .link(CollectionLink.on("myCollection").fields(FieldLink.on("value").analyzers("identity")))
-);
-```
-
-## ArangoSearch.replaceProperties
-
-`ArangoSearch.replaceProperties(ArangoSearchPropertiesOptions options) : ArangoSearchPropertiesEntity`
-
-Changes properties of the view.
-
-**Arguments**
-
-- **options**: `ArangoSearchPropertiesOptions`
-
- - **consolidationIntervalMsec**: `Long`
-
- Wait at least this many milliseconds between committing index data changes
- and making them visible to queries (default: 60000, to disable use: 0).
- For the case where there are a lot of inserts/updates, a lower value,
- until commit, will cause the index not to account for them and memory usage
- would continue to grow. For the case where there are a few inserts/updates,
- a higher value will impact performance and waste disk space for each
- commit call without any added benefits.
-
- - **cleanupIntervalStep**: `Long`
-
- Wait at least this many commits between removing unused files in
- data directory (default: 10, to disable use: 0). For the case where the
- consolidation policies merge segments often (i.e. a lot of commit+consolidate),
- a lower value will cause a lot of disk space to be wasted. For the case
- where the consolidation policies rarely merge segments (i.e. few inserts/deletes),
- a higher value will impact performance without any added benefits.
-
- - **consolidationPolicy**:
-
- - **type**: `ConsolidationType`
-
- The type of the consolidation policy.
-
- - **threshold**: `Double`
-
- Select a given segment for "consolidation" if and only if the formula
- based on type (as defined above) evaluates to true, valid value range
- [0.0, 1.0] (default: 0.85)
-
- - **segmentThreshold**: `Long`
-
- Apply the "consolidation" operation if and only if (default: 300):
- `{segmentThreshold} < number_of_segments`
-
- - **link**: `CollectionLink[]`
-
- A list of linked collections
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoSearch view = db.arangoSearch("some-view");
-
-view.replaceProperties(
- new ArangoSearchPropertiesOptions()
- .link(CollectionLink.on("myCollection").fields(FieldLink.on("value").analyzers("identity")))
-);
-```
diff --git a/Documentation/Books/Drivers/Java/Reference/View/README.md b/Documentation/Books/Drivers/Java/Reference/View/README.md
deleted file mode 100644
index 0091cf8148e3..000000000000
--- a/Documentation/Books/Drivers/Java/Reference/View/README.md
+++ /dev/null
@@ -1,43 +0,0 @@
-
-# View API
-
-These functions implement the
-[HTTP API for views](../../../..//HTTP/Views/index.html).
-
-## Getting information about the view
-
-See
-[the HTTP API documentation](../../../..//HTTP/Views/Getting.html)
-for details.
-
-## ArangoView.exists
-
-`ArangoView.exists() : boolean`
-
-Checks whether the view exists
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoView view = db.view("potatoes");
-
-boolean exists = view.exists();
-```
-
-## ArangoView.getInfo
-
-`ArangoView.getInfo() : ViewEntity`
-
-Returns information about the view.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoView view = db.view("potatoes");
-
-ViewEntity info = view.getInfo();
-```
diff --git a/Documentation/Books/Drivers/Java/Reference/View/ViewManipulation.md b/Documentation/Books/Drivers/Java/Reference/View/ViewManipulation.md
deleted file mode 100644
index c85aa1289b01..000000000000
--- a/Documentation/Books/Drivers/Java/Reference/View/ViewManipulation.md
+++ /dev/null
@@ -1,71 +0,0 @@
-
-# Manipulating the view
-
-These functions implement
-[the HTTP API for modifying views](../../../..//HTTP/Views/Modifying.html).
-
-## ArangoDatabase.createView
-
-`ArangoDatabase.createView(String name, ViewType type) : ViewEntity`
-
-Creates a view of the given _type_, then returns view information from the server.
-
-**Arguments**
-
-- **name**: `String`
-
- The name of the view
-
-- **type**: `ViewType`
-
- The type of the view
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-db.createView("myView", ViewType.ARANGO_SEARCH);
-// the view "potatoes" now exists
-```
-
-## ArangoView.rename
-
-`ArangoView.rename(String newName) : ViewEntity`
-
-Renames the view.
-
-**Arguments**
-
-- **newName**: `String`
-
- The new name
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoView view = db.view("some-view");
-
-ViewEntity result = view.rename("new-view-name")
-assertThat(result.getName(), is("new-view-name");
-// result contains additional information about the view
-```
-
-## ArangoView.drop
-
-`ArangoView.drop() : void`
-
-Deletes the view from the database.
-
-**Examples**
-
-```Java
-ArangoDB arango = new ArangoDB.Builder().build();
-ArangoDatabase db = arango.db("myDB");
-ArangoView view = db.view("some-view");
-
-view.drop();
-// the view "some-view" no longer exists
-```
diff --git a/Documentation/Books/Drivers/PHP/GettingStarted/README.md b/Documentation/Books/Drivers/PHP/GettingStarted/README.md
deleted file mode 100644
index 5c00b2a7ad1a..000000000000
--- a/Documentation/Books/Drivers/PHP/GettingStarted/README.md
+++ /dev/null
@@ -1,78 +0,0 @@
-
-# ArangoDB-PHP - Getting Started
-## Description
-
-This PHP client allows REST-based access to documents on the server.
-The *DocumentHandler* class should be used for these purposes.
-There is an example for REST-based documents access in the file examples/document.php.
-
-Furthermore, the PHP client also allows to issue more AQL complex queries using the *Statement* class.
-There is an example for this kind of statements in the file examples/select.php.
-
-To use the PHP client, you must include the file autoloader.php from the main directory.
-The autoloader will care about loading additionally required classes on the fly. The autoloader can be nested with other autoloaders.
-
-The ArangoDB PHP client is an API that allows you to send and retrieve documents from ArangoDB from out of your PHP application. The client library itself is written in PHP and has no further dependencies but just plain PHP 5.6 (or higher).
-
-The client library provides document and collection classes you can use to work with documents and collections in an OO fashion. When exchanging document data with the server, the library internally will use the [HTTP REST interface of ArangoDB](../../../HTTP/index.html). The library user does not have to care about this fact as all the details of the REST interface are abstracted by the client library.
-
-## Requirements
-
-* PHP version 5.6 or higher (Travis-tested with PHP 5.6, 7.0, 7.1 and hhvm)
-
-Note on PHP version support:
-
-This driver will cease to support old PHP versions as soon as they have reached end-of-life status. Support will be removed with the next minor or patch version of the driver to be released.
-
-In general, it is recommended to always use the latest PHP versions (currently those in the PHP 7 line) in order to take advantage of all the improvements (especially in performance).
-
-### Important version information on ArangoDB-PHP
-
-The ArangoDB-PHP driver version has to match with the ArangoDB version:
-
-- ArangoDB-PHP 3.1.x is on par with the functionality of ArangoDB 3.1.x
-- ArangoDB-PHP 3.2.x is on par with the functionality of ArangoDB 3.2.x
-- ArangoDB-PHP 3.3.x is on par with the functionality of ArangoDB 3.3.x
-
-etc...
-
-
-### Installing the PHP client
-
-To get started you need PHP 5.6 or higher plus an ArangoDB server running on any host that you can access.
-
-There are two alternative ways to get the ArangoDB PHP client:
-
- * Using Composer
- * Cloning the git repository
-
-#### Alternative 1: Using Composer
-
-```
-composer require triagens/arangodb
-```
-#### Alternative 2: Cloning the git repository
-
-When preferring this alternative, you need to have a git client installed. To clone the ArangoDB PHP client repository from github, execute the following command in your project directory:
-
- git clone "https://github.com/arangodb/arangodb-php.git"
-
-
-This will create a subdirectory arangodb-php in your current directory. It contains all the files of the client library. It also includes a dedicated autoloader that you can use for autoloading the client libraries class files.
-To invoke this autoloader, add the following line to your PHP files that will use the library:
-
-```php
-require 'arangodb-php/autoload.php';
-```
-
-
-The ArangoDB PHP client's autoloader will only care about its own class files and will not handle any other files. That means it is fully nestable with other autoloaders.
-
-#### Alternative 3: Invoking the autoloader directly
-
-If you do not wish to include autoload.php to load and setup the autoloader, you can invoke the autoloader directly:
-
-```php
-require 'arangodb-php/lib/ArangoDBClient/autoloader.php';
-\ArangoDBClient\Autoloader::init();
-```
diff --git a/Documentation/Books/Drivers/PHP/README.md b/Documentation/Books/Drivers/PHP/README.md
deleted file mode 100644
index 967e1a58e7be..000000000000
--- a/Documentation/Books/Drivers/PHP/README.md
+++ /dev/null
@@ -1,18 +0,0 @@
-
-# ArangoDB-PHP - A PHP client for ArangoDB
-The official ArangoDB PHP Driver.
-
-- [Getting Started](GettingStarted/README.md)
-- [Tutorial](Tutorial/README.md)
-- [Changelog](https://github.com/arangodb/arangodb-php/blob/devel/CHANGELOG.md#readme)
-
-# More information
-
-* Check the ArangoDB PHP client on github.com regularly for new releases and updates: [https://github.com/arangodb/arangodb-php](https://github.com/arangodb/arangodb-php)
-
-* More example code, containing some code to create, delete and rename collections, is provided in the [examples](https://github.com/arangodb/arangodb-php/tree/devel/examples) subdirectory that is provided with the library.
-
-* [PHPDoc documentation](http://arangodb.github.io/arangodb-php/) for the complete library
-
-* [Follow us on Twitter](https://twitter.com/arangodbphp)
- [@arangodbphp](https://twitter.com/arangodbphp) to receive updates on the PHP driver
diff --git a/Documentation/Books/Drivers/PHP/Tutorial/README.md b/Documentation/Books/Drivers/PHP/Tutorial/README.md
deleted file mode 100644
index c69bfb5a1839..000000000000
--- a/Documentation/Books/Drivers/PHP/Tutorial/README.md
+++ /dev/null
@@ -1,889 +0,0 @@
-
-# ArangoDB-PHP - Tutorial
-## Setting up the connection options
-
-In order to use ArangoDB, you need to specify the connection options. We do so by creating a PHP array $connectionOptions. Put this code into a file named test.php in your current directory:
-
-```php
-// use the following line when using Composer
-// require __DIR__ . '/vendor/composer/autoload.php';
-
-// use the following line when using git
-require __DIR__ . '/arangodb-php/autoload.php';
-
-// set up some aliases for less typing later
-use ArangoDBClient\Collection as ArangoCollection;
-use ArangoDBClient\CollectionHandler as ArangoCollectionHandler;
-use ArangoDBClient\Connection as ArangoConnection;
-use ArangoDBClient\ConnectionOptions as ArangoConnectionOptions;
-use ArangoDBClient\DocumentHandler as ArangoDocumentHandler;
-use ArangoDBClient\Document as ArangoDocument;
-use ArangoDBClient\Exception as ArangoException;
-use ArangoDBClient\Export as ArangoExport;
-use ArangoDBClient\ConnectException as ArangoConnectException;
-use ArangoDBClient\ClientException as ArangoClientException;
-use ArangoDBClient\ServerException as ArangoServerException;
-use ArangoDBClient\Statement as ArangoStatement;
-use ArangoDBClient\UpdatePolicy as ArangoUpdatePolicy;
-
-// set up some basic connection options
-$connectionOptions = [
- // database name
- ArangoConnectionOptions::OPTION_DATABASE => '_system',
- // server endpoint to connect to
- ArangoConnectionOptions::OPTION_ENDPOINT => 'tcp://127.0.0.1:8529',
- // authorization type to use (currently supported: 'Basic')
- ArangoConnectionOptions::OPTION_AUTH_TYPE => 'Basic',
- // user for basic authorization
- ArangoConnectionOptions::OPTION_AUTH_USER => 'root',
- // password for basic authorization
- ArangoConnectionOptions::OPTION_AUTH_PASSWD => '',
- // connection persistence on server. can use either 'Close' (one-time connections) or 'Keep-Alive' (re-used connections)
- ArangoConnectionOptions::OPTION_CONNECTION => 'Keep-Alive',
- // connect timeout in seconds
- ArangoConnectionOptions::OPTION_TIMEOUT => 3,
- // whether or not to reconnect when a keep-alive connection has timed out on server
- ArangoConnectionOptions::OPTION_RECONNECT => true,
- // optionally create new collections when inserting documents
- ArangoConnectionOptions::OPTION_CREATE => true,
- // optionally create new collections when inserting documents
- ArangoConnectionOptions::OPTION_UPDATE_POLICY => ArangoUpdatePolicy::LAST,
-];
-
-
-// turn on exception logging (logs to whatever PHP is configured)
-ArangoException::enableLogging();
-
-
- $connection = new ArangoConnection($connectionOptions);
-
-```
-
-This will make the client connect to ArangoDB
-
-* running on localhost (OPTION_HOST)
-* on the default port 8529 (OPTION_PORT)
-* with a connection timeout of 3 seconds (OPTION_TIMEOUT)
-
-When creating new documents in a collection that does not yet exist, you have the following choices:
-
-* auto-generate a new collection: if you prefer that, set OPTION_CREATE to true
-* fail with an error: if you prefer this behavior, set OPTION_CREATE to false
-
-When updating a document that was previously/concurrently updated by another user, you can select between the following behaviors:
-
-* last update wins: if you prefer this, set OPTION_UPDATE_POLICY to last
-* fail with a conflict error: if you prefer that, set OPTION_UPDATE_POLICY to conflict
-
-
-## Setting up active failover
-
-By default the PHP client will connect to a single endpoint only,
-by specifying a string value for the endpoint in the `ConnectionOptions`,
-e.g.
-
-```php
-$connectionOptions = [
- ArangoConnectionOptions::OPTION_ENDPOINT => 'tcp://127.0.0.1:8529'
-];
-```
-
-To set up multiple servers to connect to, it is also possible to specify
-an array of servers instead:
-
-```php
-$connectionOptions = [
- ConnectionOptions::OPTION_ENDPOINT => [ 'tcp://localhost:8531', 'tcp://localhost:8532', 'tcp://localhost:8530' ]
-];
-```
-Using this option requires ArangoDB 3.3 or higher and the database running
-in active failover mode.
-
-The driver will by default try to connect to the first server endpoint in the
-endpoints array, and only try the following servers if no connection can be
-established. If no connection can be made to any server, the driver will throw
-an exception.
-
-As it is unknown to the driver which server from the array is the current
-leader, the driver will connect to the specified servers in array order by
-default. However, to spare a few unnecessary connection attempts to failed
-servers, it is possible to set up caching (using Memcached) for the server list.
-The cached value will contain the last working server first, so that as few
-connection attempts as possible will need to be made.
-
-In order to use this caching, it is required to install the Memcached module
-for PHP, and to set up the following relevant options in the `ConnectionOptions`:
-
-```php
-$connectionOptions = [
- // memcached persistent id (will be passed to Memcached::__construct)
- ConnectionOptions::OPTION_MEMCACHED_PERSISTENT_ID => 'arangodb-php-pool',
-
- // memcached servers to connect to (will be passed to Memcached::addServers)
- ConnectionOptions::OPTION_MEMCACHED_SERVERS => [ [ '127.0.0.1', 11211 ] ],
-
- // memcached options (will be passed to Memcached::setOptions)
- ConnectionOptions::OPTION_MEMCACHED_OPTIONS => [ ],
-
- // key to store the current endpoints array under
- ConnectionOptions::OPTION_MEMCACHED_ENDPOINTS_KEY => 'arangodb-php-endpoints'
-
- // time-to-live for the endpoints array stored in memcached
- ConnectionOptions::OPTION_MEMCACHED_TTL => 600
-];
-```
-
-
-## Creating a collection
-*This is just to show how a collection is created.*
-*For these examples it is not needed to create a collection prior to inserting a document, as we set ArangoConnectionOptions::OPTION_CREATE to true.*
-
-So, after we get the settings, we can start with creating a collection. We will create a collection named "users".
-
-The below code will first set up the collection locally in a variable name $user, and then push it to the server and return the collection id created by the server:
-
-```php
- $collectionHandler = new ArangoCollectionHandler($connection);
-
- // clean up first
- if ($collectionHandler->has('users')) {
- $collectionHandler->drop('users');
- }
- if ($collectionHandler->has('example')) {
- $collectionHandler->drop('example');
- }
-
- // create a new collection
- $userCollection = new ArangoCollection();
- $userCollection->setName('users');
- $id = $collectionHandler->create($userCollection);
-
- // print the collection id created by the server
- var_dump($id);
- // check if the collection exists
- $result = $collectionHandler->has('users');
- var_dump($result);
-
- ```
-## Creating a document
-
-After we created the collection, we can start with creating an initial document. We will create a user document in a collection named "users". This collection does not need to exist yet. The first document we'll insert in this collection will create the collection on the fly. This is because we have set OPTION_CREATE to true in $connectionOptions.
-
-The below code will first set up the document locally in a variable name $user, and then push it to the server and return the document id created by the server:
-
-```php
- $handler = new ArangoDocumentHandler($connection);
-
- // create a new document
- $user = new ArangoDocument();
-
- // use set method to set document properties
- $user->set('name', 'John');
- $user->set('age', 25);
- $user->set('thisIsNull', null);
-
- // use magic methods to set document properties
- $user->likes = ['fishing', 'hiking', 'swimming'];
-
- // send the document to the server
- $id = $handler->save('users', $user);
-
- // check if a document exists
- $result = $handler->has('users', $id);
- var_dump($result);
-
- // print the document id created by the server
- var_dump($id);
- var_dump($user->getId());
-```
-
-Document properties can be set by using the set() method, or by directly manipulating the document properties.
-
-As you can see, sending a document to the server is achieved by calling the save() method on the client library's *DocumentHandler* class. It needs the collection name ("users" in this case") plus the document object to be saved. save() will return the document id as created by the server. The id is a numeric value that might or might not fit in a PHP integer.
-
-## Adding exception handling
-
-
-The above code will work but it does not check for any errors. To make it work in the face of errors, we'll wrap it into some basic exception handlers
-
-```php
-try {
- $handler = new ArangoDocumentHandler($connection);
-
- // create a new document
- $user = new ArangoDocument();
-
- // use set method to set document properties
- $user->set('name', 'John');
- $user->set('age', 25);
-
- // use magic methods to set document properties
- $user->likes = ['fishing', 'hiking', 'swimming'];
-
- // send the document to the server
- $id = $handler->save('users', $user);
-
- // check if a document exists
- $result = $handler->has('users', $id);
- var_dump($result);
-
- // print the document id created by the server
- var_dump($id);
- var_dump($user->getId());
-} catch (ArangoConnectException $e) {
- print 'Connection error: ' . $e->getMessage() . PHP_EOL;
-} catch (ArangoClientException $e) {
- print 'Client error: ' . $e->getMessage() . PHP_EOL;
-} catch (ArangoServerException $e) {
- print 'Server error: ' . $e->getServerCode() . ':' . $e->getServerMessage() . ' ' . $e->getMessage() . PHP_EOL;
-}
-```
-
-## Retrieving a document
-
-To retrieve a document from the server, the get() method of the *DocumentHandler* class can be used. It needs the collection name plus a document id. There is also the getById() method which is an alias for get().
-
-```php
- // get the document back from the server
- $userFromServer = $handler->get('users', $id);
- var_dump($userFromServer);
-
-/*
-The result of the get() method is a Document object that you can use in an OO fashion:
-
-object(ArangoDBClient\Document)##6 (4) {
- ["_id":"ArangoDBClient\Document":private]=>
- string(15) "2377907/4818344"
- ["_rev":"ArangoDBClient\Document":private]=>
- int(4818344)
- ["_values":"ArangoDBClient\Document":private]=>
- array(3) {
- ["age"]=>
- int(25)
- ["name"]=>
- string(4) "John"
- ["likes"]=>
- array(3) {
- [0]=>
- string(7) "fishing"
- [1]=>
- string(6) "hiking"
- [2]=>
- string(8) "swimming"
- }
- }
- ["_changed":"ArangoDBClient\Document":private]=>
- bool(false)
-}
-*/
-```
-
-Whenever the document id is yet unknown, but you want to fetch a document from the server by any of its other properties, you can use the CollectionHandler->byExample() method. It allows you to provide an example of the document that you are looking for. The example should either be a Document object with the relevant properties set, or, a PHP array with the propeties that you are looking for:
-
-```php
- // get a document list back from the server, using a document example
- $cursor = $collectionHandler->byExample('users', ['name' => 'John']);
- var_dump($cursor->getAll());
-
-```
-
-This will return all documents from the specified collection (here: "users") with the properties provided in the example (here: that have an attribute "name" with a value of "John"). The result is a cursor which can be iterated sequentially or completely. We have chosen to get the complete result set above by calling the cursor's getAll() method.
-Note that CollectionHandler->byExample() might return multiple documents if the example is ambigious.
-
-## Updating a document
-
-
-To update an existing document, the update() method of the *DocumentHandler* class can be used.
-In this example we want to
-- set state to 'ca'
-- change the `likes` array.
-
-```php
- // update a document
- $userFromServer->likes = ['fishing', 'swimming'];
- $userFromServer->state = 'CA';
-
- $result = $handler->update($userFromServer);
- var_dump($result);
-
- $userFromServer = $handler->get('users', $id);
- var_dump($userFromServer);
-
-```
-
-To remove an attribute using the update() method, an option has to be passed telling it to not keep attributes with null values.
-In this example we want to
-- remove the `age`
-
-```php
- // update a document removing an attribute,
- // The 'keepNull'=>false option will cause ArangoDB to
- // remove all attributes in the document,
- // that have null as their value - not only the ones defined here
-
- $userFromServer->likes = ['fishing', 'swimming'];
- $userFromServer->state = 'CA';
- $userFromServer->age = null;
-
- $result = $handler->update($userFromServer, ['keepNull' => false]);
- var_dump($result);
-
- $userFromServer = $handler->get('users', $id);
- var_dump($userFromServer);
-```
-
-To completely replace an existing document, the replace() method of the *DocumentHandler* class can be used.
-In this example we want to remove the `state` attribute.
-
-```php
- // replace a document (notice that we are using the previously fetched document)
- // In this example we are removing the state attribute
- unset($userFromServer->state);
-
- $result = $handler->replace($userFromServer);
- var_dump($result);
-
- $userFromServer = $handler->get('users', $id);
- var_dump($userFromServer);
-```
-
-The document that is replaced using the previous example must have been fetched from the server before. If you want to update a document without having fetched it from the server before, use updateById():
-
-```php
- // replace a document, identified by collection and document id
- $user = new ArangoDocument();
- $user->name = 'John';
- $user->likes = ['Running', 'Rowing'];
- $userFromServer->state = 'CA';
-
- // Notice that for the example we're getting the existing
- // document id via a method call. Normally we would use the known id
- $result = $handler->replaceById('users', $userFromServer->getId(), $user);
- var_dump($result);
-
- $userFromServer = $handler->get('users', $id);
- var_dump($userFromServer);
-
-```
-
-## Deleting a document
-
-To remove an existing document on the server, the remove() method of the *DocumentHandler* class will do. remove() just needs the document to be removed as a parameter:
-
-```php
- // remove a document on the server, using a document object
- $result = $handler->remove($userFromServer);
- var_dump($result);
-```
-
-Note that the document must have been fetched from the server before. If you haven't fetched the document from the server before, use the removeById() method. This requires just the collection name (here: "users") and the document id.
-
-```php
- // remove a document on the server, using a collection id and document id
- // In this example, we are using the id of the document we deleted in the previous example,
- // so it will throw an exception here. (we are catching it though, in order to continue)
-
- try {
- $result = $handler->removeById('users', $userFromServer->getId());
- } catch (\ArangoDBClient\ServerException $e) {
- $e->getMessage();
- }
-```
-
-
-## Running an AQL query
-
-
-To run an AQL query, use the *Statement* class.
-
-The method Statement::execute creates a Cursor object which can be used to iterate over
-the query's result set.
-
-```php
- // create a statement to insert 1000 test users
- $statement = new ArangoStatement(
- $connection, [
- 'query' => 'FOR i IN 1..1000 INSERT { _key: CONCAT("test", i) } IN users'
- ]
- );
-
- // execute the statement
- $cursor = $statement->execute();
-
-
- // now run another query on the data, using bind parameters
- $statement = new ArangoStatement(
- $connection, [
- 'query' => 'FOR u IN @@collection FILTER u.name == @name RETURN u',
- 'bindVars' => [
- '@collection' => 'users',
- 'name' => 'John'
- ]
- ]
- );
-
- // executing the statement returns a cursor
- $cursor = $statement->execute();
-
- // easiest way to get all results returned by the cursor
- var_dump($cursor->getAll());
-
- // to get statistics for the query, use Cursor::getExtra();
- var_dump($cursor->getExtra());
-
-```
-
-Note: by default the Statement object will create a Cursor that converts each value into
-a Document object. This is normally the intended behavior for AQL queries that return
-entire documents. However, an AQL query can also return projections or any other data
-that cannot be converted into Document objects.
-
-In order to suppress the conversion into Document objects, the Statement must be given
-the `_flat` attribute. This allows processing the results of arbitrary AQL queries:
-
-
-```php
- // run an AQL query that does not return documents but scalars
- // we need to set the _flat attribute of the Statement in order for this to work
- $statement = new ArangoStatement(
- $connection, [
- 'query' => 'FOR i IN 1..1000 RETURN i',
- '_flat' => true
- ]
- );
-
- // executing the statement returns a cursor
- $cursor = $statement->execute();
-
- // easiest way to get all results returned by the cursor
- // note that now the results won't be converted into Document objects
- var_dump($cursor->getAll());
-
-```
-
-
-## Exporting data
-
-
-To export the contents of a collection to PHP, use the *Export* class.
-The *Export* class will create a light-weight cursor over all documents
-of the specified collection. The results can be transferred to PHP
-in chunks incrementally. This is the most efficient way of iterating
-over all documents in a collection.
-
-
-```php
- // creates an export object for collection users
- $export = new ArangoExport($connection, 'users', []);
-
- // execute the export. this will return a special, forward-only cursor
- $cursor = $export->execute();
-
- // now we can fetch the documents from the collection in blocks
- while ($docs = $cursor->getNextBatch()) {
- // do something with $docs
- var_dump($docs);
- }
-
- // the export can also be restricted to just a few attributes per document:
- $export = new ArangoExport(
- $connection, 'users', [
- '_flat' => true,
- 'restrict' => [
- 'type' => 'include',
- 'fields' => ['_key', 'likes']
- ]
- ]
- );
-
- // now fetch just the configured attributes for each document
- while ($docs = $cursor->getNextBatch()) {
- // do something with $docs
- var_dump($docs);
- }
-```
-
-## Bulk document handling
-
-
-The ArangoDB-PHP driver provides a mechanism to easily fetch multiple documents from
-the same collection with a single request. All that needs to be provided is an array
-of document keys:
-
-
-```php
- $exampleCollection = new ArangoCollection();
- $exampleCollection->setName('example');
- $id = $collectionHandler->create($exampleCollection);
-
- // create a statement to insert 100 example documents
- $statement = new ArangoStatement(
- $connection, [
- 'query' => 'FOR i IN 1..100 INSERT { _key: CONCAT("example", i), value: i } IN example'
- ]
- );
- $statement->execute();
-
- // later on, we can assemble a list of document keys
- $keys = [];
- for ($i = 1; $i <= 100; ++$i) {
- $keys[] = 'example' . $i;
- }
- // and fetch all the documents at once
- $documents = $collectionHandler->lookupByKeys('example', $keys);
- var_dump($documents);
-
- // we can also bulk-remove them:
- $result = $collectionHandler->removeByKeys('example', $keys);
-
- var_dump($result);
-
-
-```
-## Dropping a collection
-
-
-To drop an existing collection on the server, use the drop() method of the *CollectionHandler* class.
-drop() just needs the name of the collection name to be dropped:
-
-```php
- // drop a collection on the server, using its name,
- $result = $collectionHandler->drop('users');
- var_dump($result);
-
- // drop the other one we created, too
- $collectionHandler->drop('example');
-```
-
-# Custom Document class
-
-If you want to use custom document class you can pass it's name to DocumentHandler or CollectionHandler using method `setDocumentClass`.
-Remember that Your class must extend `\ArangoDBClient\Document`.
-
-```php
-$ch = new CollectionHandler($connection);
-$ch->setDocumentClass('\AppBundle\Entity\Product');
-$cursor = $ch->all('product');
-// All returned documents will be \AppBundle\Entity\Product instances
-
-
-$dh = new DocumentHandler($connection);
-$dh->setDocumentClass('\AppBundle\Entity\Product');
-$product = $dh->get('products', 11231234);
-// Product will be \AppBundle\Entity\Product instance
-```
-
-See file examples/customDocumentClass.php for more details.
-
-## Logging exceptions
-
-
-The driver provides a simple logging mechanism that is turned off by default. If it is turned on, the driver
-will log all its exceptions using PHP's standard `error_log` mechanism. It will call PHP's `error_log()`
-function for this. It depends on the PHP configuration if and where exceptions will be logged. Please consult
-your php.ini settings for further details.
-
-To turn on exception logging in the driver, set a flag on the driver's Exception base class, from which all
-driver exceptions are subclassed:
-
-```php
-use ArangoDBClient\Exception as ArangoException;
-
-ArangoException::enableLogging();
-```
-
-To turn logging off, call its `disableLogging` method:
-
-```php
-use ArangoDBClient\Exception as ArangoException;
-
-ArangoException::disableLogging();
-```
-
-## Putting it all together
-
-Here's the full code that combines all the pieces outlined above:
-
-```php
-// use the following line when using Composer
-// require __DIR__ . '/vendor/composer/autoload.php';
-
-// use the following line when using git
-require __DIR__ . '/autoload.php';
-
-// set up some aliases for less typing later
-use ArangoDBClient\Collection as ArangoCollection;
-use ArangoDBClient\CollectionHandler as ArangoCollectionHandler;
-use ArangoDBClient\Connection as ArangoConnection;
-use ArangoDBClient\ConnectionOptions as ArangoConnectionOptions;
-use ArangoDBClient\DocumentHandler as ArangoDocumentHandler;
-use ArangoDBClient\Document as ArangoDocument;
-use ArangoDBClient\Exception as ArangoException;
-use ArangoDBClient\Export as ArangoExport;
-use ArangoDBClient\ConnectException as ArangoConnectException;
-use ArangoDBClient\ClientException as ArangoClientException;
-use ArangoDBClient\ServerException as ArangoServerException;
-use ArangoDBClient\Statement as ArangoStatement;
-use ArangoDBClient\UpdatePolicy as ArangoUpdatePolicy;
-
-// set up some basic connection options
-$connectionOptions = [
- // database name
- ArangoConnectionOptions::OPTION_DATABASE => '_system',
- // server endpoint to connect to
- ArangoConnectionOptions::OPTION_ENDPOINT => 'tcp://127.0.0.1:8529',
- // authorization type to use (currently supported: 'Basic')
- ArangoConnectionOptions::OPTION_AUTH_TYPE => 'Basic',
- // user for basic authorization
- ArangoConnectionOptions::OPTION_AUTH_USER => 'root',
- // password for basic authorization
- ArangoConnectionOptions::OPTION_AUTH_PASSWD => '',
- // connection persistence on server. can use either 'Close' (one-time connections) or 'Keep-Alive' (re-used connections)
- ArangoConnectionOptions::OPTION_CONNECTION => 'Keep-Alive',
- // connect timeout in seconds
- ArangoConnectionOptions::OPTION_TIMEOUT => 3,
- // whether or not to reconnect when a keep-alive connection has timed out on server
- ArangoConnectionOptions::OPTION_RECONNECT => true,
- // optionally create new collections when inserting documents
- ArangoConnectionOptions::OPTION_CREATE => true,
- // optionally create new collections when inserting documents
- ArangoConnectionOptions::OPTION_UPDATE_POLICY => ArangoUpdatePolicy::LAST,
-];
-
-
-// turn on exception logging (logs to whatever PHP is configured)
-ArangoException::enableLogging();
-
-try {
- $connection = new ArangoConnection($connectionOptions);
-
- $collectionHandler = new ArangoCollectionHandler($connection);
-
- // clean up first
- if ($collectionHandler->has('users')) {
- $collectionHandler->drop('users');
- }
- if ($collectionHandler->has('example')) {
- $collectionHandler->drop('example');
- }
-
- // create a new collection
- $userCollection = new ArangoCollection();
- $userCollection->setName('users');
- $id = $collectionHandler->create($userCollection);
-
- // print the collection id created by the server
- var_dump($id);
-
- // check if the collection exists
- $result = $collectionHandler->has('users');
- var_dump($result);
-
- $handler = new ArangoDocumentHandler($connection);
-
- // create a new document
- $user = new ArangoDocument();
-
- // use set method to set document properties
- $user->set('name', 'John');
- $user->set('age', 25);
- $user->set('thisIsNull', null);
-
- // use magic methods to set document properties
- $user->likes = ['fishing', 'hiking', 'swimming'];
-
- // send the document to the server
- $id = $handler->save('users', $user);
-
- // check if a document exists
- $result = $handler->has('users', $id);
- var_dump($result);
-
- // print the document id created by the server
- var_dump($id);
- var_dump($user->getId());
-
-
- // get the document back from the server
- $userFromServer = $handler->get('users', $id);
- var_dump($userFromServer);
-
- // get a document list back from the server, using a document example
- $cursor = $collectionHandler->byExample('users', ['name' => 'John']);
- var_dump($cursor->getAll());
-
-
- // update a document
- $userFromServer->likes = ['fishing', 'swimming'];
- $userFromServer->state = 'CA';
-
- $result = $handler->update($userFromServer);
- var_dump($result);
-
- $userFromServer = $handler->get('users', $id);
- var_dump($userFromServer);
-
-
- // update a document removing an attribute,
- // The 'keepNull'=>false option will cause ArangoDB to
- // remove all attributes in the document,
- // that have null as their value - not only the ones defined here
-
- $userFromServer->likes = ['fishing', 'swimming'];
- $userFromServer->state = 'CA';
- $userFromServer->age = null;
-
- $result = $handler->update($userFromServer, ['keepNull' => false]);
- var_dump($result);
-
- $userFromServer = $handler->get('users', $id);
- var_dump($userFromServer);
-
-
- // replace a document (notice that we are using the previously fetched document)
- // In this example we are removing the state attribute
- unset($userFromServer->state);
-
- $result = $handler->replace($userFromServer);
- var_dump($result);
-
- $userFromServer = $handler->get('users', $id);
- var_dump($userFromServer);
-
-
- // replace a document, identified by collection and document id
- $user = new ArangoDocument();
- $user->name = 'John';
- $user->likes = ['Running', 'Rowing'];
- $userFromServer->state = 'CA';
-
- // Notice that for the example we're getting the existing
- // document id via a method call. Normally we would use the known id
- $result = $handler->replaceById('users', $userFromServer->getId(), $user);
- var_dump($result);
-
- $userFromServer = $handler->get('users', $id);
- var_dump($userFromServer);
-
-
- // remove a document on the server
- $result = $handler->remove($userFromServer);
- var_dump($result);
-
-
- // remove a document on the server, using a collection id and document id
- // In this example, we are using the id of the document we deleted in the previous example,
- // so it will throw an exception here. (we are catching it though, in order to continue)
-
- try {
- $result = $handler->removeById('users', $userFromServer->getId());
- } catch (\ArangoDBClient\ServerException $e) {
- $e->getMessage();
- }
-
-
-
- // create a statement to insert 1000 test users
- $statement = new ArangoStatement(
- $connection, [
- 'query' => 'FOR i IN 1..1000 INSERT { _key: CONCAT("test", i) } IN users'
- ]
- );
-
- // execute the statement
- $cursor = $statement->execute();
-
-
- // now run another query on the data, using bind parameters
- $statement = new ArangoStatement(
- $connection, [
- 'query' => 'FOR u IN @@collection FILTER u.name == @name RETURN u',
- 'bindVars' => [
- '@collection' => 'users',
- 'name' => 'John'
- ]
- ]
- );
-
- // executing the statement returns a cursor
- $cursor = $statement->execute();
-
- // easiest way to get all results returned by the cursor
- var_dump($cursor->getAll());
-
- // to get statistics for the query, use Cursor::getExtra();
- var_dump($cursor->getExtra());
-
-
- // creates an export object for collection users
- $export = new ArangoExport($connection, 'users', []);
-
- // execute the export. this will return a special, forward-only cursor
- $cursor = $export->execute();
-
- // now we can fetch the documents from the collection in blocks
- while ($docs = $cursor->getNextBatch()) {
- // do something with $docs
- var_dump($docs);
- }
-
- // the export can also be restricted to just a few attributes per document:
- $export = new ArangoExport(
- $connection, 'users', [
- '_flat' => true,
- 'restrict' => [
- 'type' => 'include',
- 'fields' => ['_key', 'likes']
- ]
- ]
- );
-
- // now fetch just the configured attributes for each document
- while ($docs = $cursor->getNextBatch()) {
- // do something with $docs
- var_dump($docs);
- }
-
-
- $exampleCollection = new ArangoCollection();
- $exampleCollection->setName('example');
- $id = $collectionHandler->create($exampleCollection);
-
- // create a statement to insert 100 example documents
- $statement = new ArangoStatement(
- $connection, [
- 'query' => 'FOR i IN 1..100 INSERT { _key: CONCAT("example", i), value: i } IN example'
- ]
- );
- $statement->execute();
-
- // later on, we can assemble a list of document keys
- $keys = [];
- for ($i = 1; $i <= 100; ++$i) {
- $keys[] = 'example' . $i;
- }
- // and fetch all the documents at once
- $documents = $collectionHandler->lookupByKeys('example', $keys);
- var_dump($documents);
-
- // we can also bulk-remove them:
- $result = $collectionHandler->removeByKeys('example', $keys);
-
- var_dump($result);
-
-
- // drop a collection on the server, using its name,
- $result = $collectionHandler->drop('users');
- var_dump($result);
-
- // drop the other one we created, too
- $collectionHandler->drop('example');
-} catch (ArangoConnectException $e) {
- print 'Connection error: ' . $e->getMessage() . PHP_EOL;
-} catch (ArangoClientException $e) {
- print 'Client error: ' . $e->getMessage() . PHP_EOL;
-} catch (ArangoServerException $e) {
- print 'Server error: ' . $e->getServerCode() . ': ' . $e->getServerMessage() . ' - ' . $e->getMessage() . PHP_EOL;
-}
-
-```
diff --git a/Documentation/Books/Drivers/README.md b/Documentation/Books/Drivers/README.md
deleted file mode 100644
index 8542c6d3902f..000000000000
--- a/Documentation/Books/Drivers/README.md
+++ /dev/null
@@ -1,42 +0,0 @@
----
-page-toc:
- disable: true
----
-ArangoDB VERSION_NUMBER Drivers Documentation
-=============================================
-
-Official drivers
-----------------
-
-Name | Language | Repository |
------|----------|------------|-------
-[ArangoDB-Java-Driver](Java/README.md) | Java | https://github.com/arangodb/arangodb-java-driver | [Changelog](https://github.com/arangodb/arangodb-java-driver/blob/master/ChangeLog.md#readme)
-ArangoDB-Java-Driver-Async | Java | https://github.com/arangodb/arangodb-java-driver-async | [Changelog](https://github.com/arangodb/arangodb-java-driver-async/blob/master/ChangeLog.md#readme)
-[ArangoJS](JS/README.md) | JavaScript | https://github.com/arangodb/arangojs | [Changelog](https://github.com/arangodb/arangojs/blob/master/CHANGELOG.md#readme)
-[ArangoDB-PHP](PHP/README.md) | PHP | https://github.com/arangodb/arangodb-php | [Changelog](https://github.com/arangodb/arangodb-php/blob/devel/CHANGELOG.md#readme)
-[Go-Driver](GO/README.md) | Go | https://github.com/arangodb/go-driver | [Changelog](https://github.com/arangodb/go-driver/blob/master/CHANGELOG.md#readme)
-
-Integrations
-------------
-
-Name | Language | Repository |
------|----------|------------|-------
-[Spring Data](SpringData/README.md) | Java | https://github.com/arangodb/spring-data | [Changelog](https://github.com/arangodb/spring-data/blob/master/ChangeLog.md#readme)
-[ArangoDB-Spark-Connector](SparkConnector/README.md) | Scala, Java | https://github.com/arangodb/arangodb-spark-connector | [Changelog](https://github.com/arangodb/arangodb-spark-connector/blob/master/ChangeLog.md#readme)
-
-Community drivers
------------------
-
-Please note that this list is not exhaustive.
-
-Name | Language | Repository
------|----------|-----------
-ArangoDB-PHP-Core | PHP | https://github.com/frankmayer/ArangoDB-PHP-Core
-ArangoDB-NET | .NET | https://github.com/yojimbo87/ArangoDB-NET
-aranGO | Go | https://github.com/diegogub/aranGO
-arangolite | Go | https://github.com/solher/arangolite
-aranGoDriver | Go | https://github.com/TobiEiss/aranGoDriver
-pyArango | Python | http://www.github.com/tariqdaouda/pyArango
-python-arango | Python | https://github.com/Joowani/python-arango
-Scarango | Scala | https://github.com/outr/scarango
-ArangoRB | Ruby | https://github.com/StefanoMartin/ArangoRB
diff --git a/Documentation/Books/Drivers/SUMMARY.md b/Documentation/Books/Drivers/SUMMARY.md
deleted file mode 100644
index 61b4e2a78659..000000000000
--- a/Documentation/Books/Drivers/SUMMARY.md
+++ /dev/null
@@ -1,116 +0,0 @@
-
-# Summary
-
-* [Introduction](README.md)
-
-## Official Drivers
-
-
-* [Java Driver](Java/README.md)
- * [Getting Started](Java/GettingStarted/README.md)
- * [Reference](Java/Reference/README.md)
- * [Driver Setup](Java/Reference/Setup.md)
- * [Database](Java/Reference/Database/README.md)
- * [Database Manipulation](Java/Reference/Database/DatabaseManipulation.md)
- * [Collection Access](Java/Reference/Database/CollectionAccess.md)
- * [View Access](Java/Reference/Database/ViewAccess.md)
- * [Queries](Java/Reference/Database/Queries.md)
- * [AQL User Functions](Java/Reference/Database/AqlUserFunctions.md)
- * [Transactions](Java/Reference/Database/Transactions.md)
- * [Graph Access](Java/Reference/Database/GraphAccess.md)
- * [HTTP Routes](Java/Reference/Database/HttpRoutes.md)
- * [Collection](Java/Reference/Collection/README.md)
- * [Collection Manipulation](Java/Reference/Collection/CollectionManipulation.md)
- * [Document Manipulation](Java/Reference/Collection/DocumentManipulation.md)
- * [Indexes](Java/Reference/Collection/Indexes.md)
- * [Bulk Import](Java/Reference/Collection/BulkImport.md)
- * [View](Java/Reference/View/README.md)
- * [View Manipulation](Java/Reference/View/ViewManipulation.md)
- * [ArangoSearch Views](Java/Reference/View/ArangoSearch.md)
- * [Cursor](Java/Reference/Cursor.md)
- * [Graph](Java/Reference/Graph/README.md)
- * [Vertex Collection](Java/Reference/Graph/VertexCollection.md)
- * [Edge Collection](Java/Reference/Graph/EdgeCollection.md)
- * [Vertices Manipulation](Java/Reference/Graph/Vertices.md)
- * [Edges Manipulation](Java/Reference/Graph/Edges.md)
- * [Route](Java/Reference/Route.md)
- * [Serialization](Java/Reference/Serialization.md)
-
-* [ArangoJS - JavaScript Driver](JS/README.md)
- * [Getting Started](JS/GettingStarted/README.md)
- * [Reference](JS/Reference/README.md)
- * [Database](JS/Reference/Database/README.md)
- * [Database Manipulation](JS/Reference/Database/DatabaseManipulation.md)
- * [Collection Access](JS/Reference/Database/CollectionAccess.md)
- * [View Access](JS/Reference/Database/ViewAccess.md)
- * [Queries](JS/Reference/Database/Queries.md)
- * [AQL User Functions](JS/Reference/Database/AqlUserFunctions.md)
- * [Transactions](JS/Reference/Database/Transactions.md)
- * [Graph Access](JS/Reference/Database/GraphAccess.md)
- * [Foxx Services](JS/Reference/Database/FoxxServices.md)
- * [HTTP Routes](JS/Reference/Database/HttpRoutes.md)
- * [Collection](JS/Reference/Collection/README.md)
- * [Collection Manipulation](JS/Reference/Collection/CollectionManipulation.md)
- * [Document Manipulation](JS/Reference/Collection/DocumentManipulation.md)
- * [DocumentCollection](JS/Reference/Collection/DocumentCollection.md)
- * [EdgeCollection](JS/Reference/Collection/EdgeCollection.md)
- * [Indexes](JS/Reference/Collection/Indexes.md)
- * [Simple Queries](JS/Reference/Collection/SimpleQueries.md)
- * [Bulk Import](JS/Reference/Collection/BulkImport.md)
- * [AQL Helpers](JS/Reference/Aql.md)
- * [View Manipulation](JS/Reference/ViewManipulation.md)
- * [Cursor](JS/Reference/Cursor.md)
- * [Graph](JS/Reference/Graph/README.md)
- * [Vertices](JS/Reference/Graph/Vertices.md)
- * [Edges](JS/Reference/Graph/Edges.md)
- * [VertexCollection](JS/Reference/Graph/VertexCollection.md)
- * [EdgeCollection](JS/Reference/Graph/EdgeCollection.md)
- * [Route](JS/Reference/Route.md)
-
-* [ArangoDB-PHP](PHP/README.md)
- * [Getting Started](PHP/GettingStarted/README.md)
- * [Tutorial](PHP/Tutorial/README.md)
-
-* [ArangoDB Go Driver](GO/README.md)
- * [Getting Started](GO/GettingStarted/README.md)
- * [Example Requests](GO/ExampleRequests/README.md)
- * [Connection Management](GO/ConnectionManagement/README.md)
-
-## Integrations
-
-
-* [Spring Data ArangoDB](SpringData/README.md)
- * [Getting Started](SpringData/GettingStarted/README.md)
- * [Reference](SpringData/Reference/README.md)
- * [Template](SpringData/Reference/Template/README.md)
- * [Queries](SpringData/Reference/Template/Queries.md)
- * [Document Manipulation](SpringData/Reference/Template/DocumentManipulation.md)
- * [Multiple Document Manipulation](SpringData/Reference/Template/MultiDocumentManipulation.md)
- * [Collection Manipulation](SpringData/Reference/Template/CollectionManipulation.md)
- * [Repositories](SpringData/Reference/Repositories/README.md)
- * [Queries](SpringData/Reference/Repositories/Queries/README.md)
- * [Derived queries](SpringData/Reference/Repositories/Queries/DerivedQueries.md)
- * [Query methods](SpringData/Reference/Repositories/Queries/QueryMethods.md)
- * [Named queries](SpringData/Reference/Repositories/Queries/NamedQueries.md)
- * [Document Manipulation](SpringData/Reference/Repositories/DocumentManipulation.md)
- * [Multiple Document Manipulation](SpringData/Reference/Repositories/MultiDocumentManipulation.md)
- * [Query by example](SpringData/Reference/Repositories/QueryByExample.md)
- * [Mapping](SpringData/Reference/Mapping/README.md)
- * [Document](SpringData/Reference/Mapping/Document.md)
- * [Edge](SpringData/Reference/Mapping/Edge.md)
- * [Reference](SpringData/Reference/Mapping/Reference.md)
- * [Relations](SpringData/Reference/Mapping/Relations.md)
- * [Indexes](SpringData/Reference/Mapping/Indexes.md)
- * [Converter](SpringData/Reference/Mapping/Converter.md)
- * [Events](SpringData/Reference/Mapping/Events.md)
- * [Auditing](SpringData/Reference/Mapping/Auditing.md)
- * [Migration](SpringData/Migration/README.md)
- * [Migrating 1.x to 3.0](SpringData/Migration/Migrating-1.x-3.0.md)
- * [Migrating 2.x to 3.0](SpringData/Migration/Migrating-2.x-3.0.md)
-
-* [ArangoDB Spark Connector](SparkConnector/README.md)
- * [Getting Started](SparkConnector/GettingStarted/README.md)
- * [Reference](SparkConnector/Reference/README.md)
- * [Java](SparkConnector/Reference/Java.md)
- * [Scala](SparkConnector/Reference/Scala.md)
-
diff --git a/Documentation/Books/Drivers/SparkConnector/GettingStarted/README.md b/Documentation/Books/Drivers/SparkConnector/GettingStarted/README.md
deleted file mode 100644
index 44aa66947afb..000000000000
--- a/Documentation/Books/Drivers/SparkConnector/GettingStarted/README.md
+++ /dev/null
@@ -1,63 +0,0 @@
-
-# ArangoDB Spark Connector - Getting Started
-
-## Maven
-
-```XML
-
-
- com.arangodb
- arangodb-spark-connector
- 1.0.2
-
- ....
-
-```
-
-## SBT
-
-```Json
-libraryDependencies += "com.arangodb" % "arangodb-spark-connector" % "1.0.2"
-```
-
-## Configuration
-
-| property-key | description | default value |
-| ------------------------------ | -------------------------------------- | -------------- |
-| arangodb.hosts | comma separated list of ArangoDB hosts | 127.0.0.1:8529 |
-| arangodb.user | basic authentication user | root |
-| arangodb.password | basic authentication password | |
-| arangodb.protocol | network protocol | VST |
-| arangodb.useSsl | use SSL connection | false |
-| arangodb.ssl.keyStoreFile | SSL certificate keystore file | |
-| arangodb.ssl.passPhrase | SSL pass phrase | |
-| arangodb.ssl.protocol | SSL protocol | TLS |
-| arangodb.maxConnections | max number of connections per host | 1 |
-| arangodb.acquireHostList | auto acquire list of available hosts | false |
-| arangodb.loadBalancingStrategy | load balancing strategy to be used | NONE |
-
-## Setup SparkContext
-
-**Scala**
-
-```Scala
-val conf = new SparkConf()
- .set("arangodb.hosts", "127.0.0.1:8529")
- .set("arangodb.user", "myUser")
- .set("arangodb.password", "myPassword")
- ...
-
-val sc = new SparkContext(conf)
-```
-
-**Java**
-
-```Java
-SparkConf conf = new SparkConf()
- .set("arangodb.hosts", "127.0.0.1:8529")
- .set("arangodb.user", "myUser")
- .set("arangodb.password", "myPassword");
- ...
-
-JavaSparkContext sc = new JavaSparkContext(conf);
-```
diff --git a/Documentation/Books/Drivers/SparkConnector/README.md b/Documentation/Books/Drivers/SparkConnector/README.md
deleted file mode 100644
index 74af7cdb4f3f..000000000000
--- a/Documentation/Books/Drivers/SparkConnector/README.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-# ArangoDB Spark Connector
-
-- [Getting Started](GettingStarted/README.md)
-- [Reference](Reference/README.md)
-- [Changelog](https://github.com/arangodb/arangodb-spark-connector/blob/master/ChangeLog.md#readme)
diff --git a/Documentation/Books/Drivers/SparkConnector/Reference/Java.md b/Documentation/Books/Drivers/SparkConnector/Reference/Java.md
deleted file mode 100644
index 0e0295d0d06a..000000000000
--- a/Documentation/Books/Drivers/SparkConnector/Reference/Java.md
+++ /dev/null
@@ -1,224 +0,0 @@
-
-# ArangoDB Spark Connector - Java Reference
-
-## ArangoSpark.save
-
-```
-ArangoSpark.save[T](rdd: JavaRDD[T], collection: String, options: WriteOptions)
-```
-
-```
-ArangoSpark.save[T](dataset: Dataset[T], collection: String, options: WriteOptions)
-```
-
-Save data from rdd into ArangoDB
-
-**Arguments**
-
-- **rdd**: `JavaRDD[T]`
-
- The rdd with the data to save
-
-- **collection**: `String`
-
- The collection to save in
-
-- **options**: `WriteOptions`
-
- - **database**: `String`
-
- Database to write into
-
- - **hosts**: `String`
-
- Alternative hosts to context property `arangodb.hosts`
-
- - **user**: `String`
-
- Alternative user to context property `arangodb.user`
-
- - **password**: `String`
-
- Alternative password to context property `arangodb.password`
-
- - **useSsl**: `Boolean`
-
- Alternative useSsl to context property `arangodb.useSsl`
-
- - **sslKeyStoreFile**: `String`
-
- Alternative sslKeyStoreFile to context property `arangodb.ssl.keyStoreFile`
-
- - **sslPassPhrase**: `String`
-
- Alternative sslPassPhrase to context property `arangodb.ssl.passPhrase`
-
- - **sslProtocol**: `String`
-
- Alternative sslProtocol to context property `arangodb.ssl.protocol`
-
-**Examples**
-
-```Java
-JavaSparkContext sc = ...
-List docs = ...
-JavaRDD documents = sc.parallelize(docs);
-ArangoSpark.save(documents, "myCollection", new WriteOptions().database("myDB"));
-```
-
-**Very Large Datasets**
-
-To prevent errors on very large datasets (over one million objects) use "repartition" for smaller chunks:
-
-```Java
-ArangoSpark.save(allEdges.toJSON.repartition(20000), collection = "mio_edges", options = writeOptions)
-```
-
-
-## ArangoSpark.saveDF
-
-```
-ArangoSpark.saveDF(dataframe: DataFrame, collection: String, options: WriteOptions)
-```
-
-Save data from dataframe into ArangoDB
-
-**Arguments**
-
-- **dataframe**: DataFrame`
-
- The dataFrame with the data to save
-
-- **collection**: `String`
-
- The collection to save in
-
-- **options**: `WriteOptions`
-
- - **database**: `String`
-
- Database to write into
-
- - **hosts**: `String`
-
- Alternative hosts to context property `arangodb.hosts`
-
- - **user**: `String`
-
- Alternative user to context property `arangodb.user`
-
- - **password**: `String`
-
- Alternative password to context property `arangodb.password`
-
- - **useSsl**: `Boolean`
-
- Alternative useSsl to context property `arangodb.useSsl`
-
- - **sslKeyStoreFile**: `String`
-
- Alternative sslKeyStoreFile to context property `arangodb.ssl.keyStoreFile`
-
- - **sslPassPhrase**: `String`
-
- Alternative sslPassPhrase to context property `arangodb.ssl.passPhrase`
-
- - **sslProtocol**: `String`
-
- Alternative sslProtocol to context property `arangodb.ssl.protocol`
-
-**Examples**
-
-```Java
-JavaSparkContext sc = ...
-List docs = ...
-JavaRDD documents = sc.parallelize(docs);
-SQLContext sql = SQLContext.getOrCreate(sc);
-DataFrame df = sql.createDataFrame(documents, MyBean.class);
-ArangoSpark.saveDF(documents, "myCollection", new WriteOptions().database("myDB"));
-```
-
-## ArangoSpark.load
-
-```
-ArangoSparkload[T](sparkContext: JavaSparkContext, collection: String, options: ReadOptions, clazz: Class[T]): ArangoJavaRDD[T]
-```
-
-Load data from ArangoDB into rdd
-
-**Arguments**
-
-- **sparkContext**: `JavaSparkContext`
-
- The sparkContext containing the ArangoDB configuration
-
-- **collection**: `String`
-
- The collection to load data from
-
-- **options**: `ReadOptions`
-
- - **database**: `String`
-
- Database to write into
-
- - **hosts**: `String`
-
- Alternative hosts to context property `arangodb.hosts`
-
- - **user**: `String`
-
- Alternative user to context property `arangodb.user`
-
- - **password**: `String`
-
- Alternative password to context property `arangodb.password`
-
- - **useSsl**: `Boolean`
-
- Alternative useSsl to context property `arangodb.useSsl`
-
- - **sslKeyStoreFile**: `String`
-
- Alternative sslKeyStoreFile to context property `arangodb.ssl.keyStoreFile`
-
- - **sslPassPhrase**: `String`
-
- Alternative sslPassPhrase to context property `arangodb.ssl.passPhrase`
-
- - **sslProtocol**: `String`
-
- Alternative sslProtocol to context property `arangodb.ssl.protocol`
-
-- **clazz**: `Class[T]`
-
- The type of the document
-
-**Examples**
-
-```Java
-JavaSparkContext sc = ...
-ArangoJavaRDD rdd = ArangoSpark.load(sc, "myCollection", new ReadOptions().database("myDB"), MyBean.class);
-```
-
-## ArangoRDD.filter
-
-```
-ArangoJavaRDD.filter(condition: String): ArangoJavaRDD[T]
-```
-
-Adds a filter condition. If used multiple times, the conditions will be combined with a logical AND.
-
-**Arguments**
-
-- **condition**: `String`
-
- The condition for the filter statement. Use `doc` inside to reference the document. e.g. `"doc.name == 'John'"`
-
-**Examples**
-
-```Java
-JavaSparkContext sc = ...
-ArangoJavaRDD rdd = ArangoSpark.load(sc, "myCollection", new ReadOptions().database("myDB"), MyBean.class);
-ArangoJavaRDD rddFiltered = rdd.filter("doc.test <= 50");
-```
diff --git a/Documentation/Books/Drivers/SparkConnector/Reference/README.md b/Documentation/Books/Drivers/SparkConnector/Reference/README.md
deleted file mode 100644
index 83c7c0325651..000000000000
--- a/Documentation/Books/Drivers/SparkConnector/Reference/README.md
+++ /dev/null
@@ -1,5 +0,0 @@
-
-# ArangoDB Spark Connector - Reference
-
-- [Scala](Scala.md)
-- [Java](Java.md)
diff --git a/Documentation/Books/Drivers/SparkConnector/Reference/Scala.md b/Documentation/Books/Drivers/SparkConnector/Reference/Scala.md
deleted file mode 100644
index 6f7816a8335c..000000000000
--- a/Documentation/Books/Drivers/SparkConnector/Reference/Scala.md
+++ /dev/null
@@ -1,208 +0,0 @@
-
-# ArangoDB Spark Connector - Scala Reference
-
-## ArangoSpark.save
-
-```
-ArangoSpark.save[T](rdd: RDD[T], collection: String, options: WriteOptions)
-```
-
-```
-ArangoSpark.save[T](dataset: Dataset[T], collection: String, options: WriteOptions)
-```
-
-Save data from rdd or dataset into ArangoDB
-
-**Arguments**
-
-- **rdd**/**dataset**: `RDD[T]` or `Dataset[T]`
-
- The rdd or dataset with the data to save
-
-- **collection**: `String`
-
- The collection to save in
-
-- **options**: `WriteOptions`
-
- - **database**: `String`
-
- Database to write into
-
- - **hosts**: `String`
-
- Alternative hosts to context property `arangodb.hosts`
-
- - **user**: `String`
-
- Alternative user to context property `arangodb.user`
-
- - **password**: `String`
-
- Alternative password to context property `arangodb.password`
-
- - **useSsl**: `Boolean`
-
- Alternative useSsl to context property `arangodb.useSsl`
-
- - **sslKeyStoreFile**: `String`
-
- Alternative sslKeyStoreFile to context property `arangodb.ssl.keyStoreFile`
-
- - **sslPassPhrase**: `String`
-
- Alternative sslPassPhrase to context property `arangodb.ssl.passPhrase`
-
- - **sslProtocol**: `String`
-
- Alternative sslProtocol to context property `arangodb.ssl.protocol`
-
-**Examples**
-
-```Scala
-val sc: SparkContext = ...
-val documents = sc.parallelize((1 to 100).map { i => MyBean(i) })
-ArangoSpark.save(documents, "myCollection", WriteOptions("myDB"))
-```
-
-## ArangoSpark.saveDF
-
-```
-ArangoSpark.saveDF(dataframe: DataFrame, collection: String, options: WriteOptions)
-```
-
-Save data from dataframe into ArangoDB
-
-**Arguments**
-
-- **dataframe**: DataFrame`
-
- The dataFrame with the data to save
-
-- **collection**: `String`
-
- The collection to save in
-
-- **options**: `WriteOptions`
-
- - **database**: `String`
-
- Database to write into
-
- - **hosts**: `String`
-
- Alternative hosts to context property `arangodb.hosts`
-
- - **user**: `String`
-
- Alternative user to context property `arangodb.user`
-
- - **password**: `String`
-
- Alternative password to context property `arangodb.password`
-
- - **useSsl**: `Boolean`
-
- Alternative useSsl to context property `arangodb.useSsl`
-
- - **sslKeyStoreFile**: `String`
-
- Alternative sslKeyStoreFile to context property `arangodb.ssl.keyStoreFile`
-
- - **sslPassPhrase**: `String`
-
- Alternative sslPassPhrase to context property `arangodb.ssl.passPhrase`
-
- - **sslProtocol**: `String`
-
- Alternative sslProtocol to context property `arangodb.ssl.protocol`
-
-**Examples**
-
-```Scala
-val sc: SparkContext = ...
-val documents = sc.parallelize((1 to 100).map { i => MyBean(i) })
-val sql: SQLContext = SQLContext.getOrCreate(sc);
-val df = sql.createDataFrame(documents, classOf[MyBean])
-ArangoSpark.saveDF(df, "myCollection", WriteOptions("myDB"))
-```
-
-## ArangoSpark.load
-
-```
-ArangoSpark.load[T: ClassTag](sparkContext: SparkContext, collection: String, options: ReadOptions): ArangoRDD[T]
-```
-
-Load data from ArangoDB into rdd
-
-**Arguments**
-
-- **sparkContext**: `SparkContext`
-
- The sparkContext containing the ArangoDB configuration
-
-- **collection**: `String`
-
- The collection to load data from
-
-- **options**: `ReadOptions`
-
- - **database**: `String`
-
- Database to write into
-
- - **hosts**: `String`
-
- Alternative hosts to context property `arangodb.hosts`
-
- - **user**: `String`
-
- Alternative user to context property `arangodb.user`
-
- - **password**: `String`
-
- Alternative password to context property `arangodb.password`
-
- - **useSsl**: `Boolean`
-
- Alternative useSsl to context property `arangodb.useSsl`
-
- - **sslKeyStoreFile**: `String`
-
- Alternative sslKeyStoreFile to context property `arangodb.ssl.keyStoreFile`
-
- - **sslPassPhrase**: `String`
-
- Alternative sslPassPhrase to context property `arangodb.ssl.passPhrase`
-
- - **sslProtocol**: `String`
-
- Alternative sslProtocol to context property `arangodb.ssl.protocol`
-
-**Examples**
-
-```Scala
-val sc: SparkContext = ...
-val rdd = ArangoSpark.load[MyBean](sc, "myCollection", ReadOptions("myDB"))
-```
-
-## ArangoRDD.filter
-
-```
-ArangoRDD.filter(condition: String): ArangoRDD[T]
-```
-
-Adds a filter condition. If used multiple times, the conditions will be combined with a logical AND.
-
-**Arguments**
-
-- **condition**: `String`
-
- The condition for the filter statement. Use `doc` inside to reference the document. e.g. `"doc.name == 'John'"`
-
-**Examples**
-
-```Scala
-val sc: SparkContext = ...
-val rdd = ArangoSpark.load[MyBean](sc, "myCollection").filter("doc.name == 'John'")
-```
diff --git a/Documentation/Books/Drivers/SpringData/GettingStarted/README.md b/Documentation/Books/Drivers/SpringData/GettingStarted/README.md
deleted file mode 100644
index 5e9ae5101e13..000000000000
--- a/Documentation/Books/Drivers/SpringData/GettingStarted/README.md
+++ /dev/null
@@ -1,119 +0,0 @@
-
-# Spring Data ArangoDB - Getting Started
-
-## Supported versions
-
-| Spring Data ArangoDB | Spring Data | ArangoDB |
-| -------------------- | ----------- | ----------- |
-| 1.3.x | 1.13.x | 3.0\*, 3.1+ |
-| 2.3.x | 2.0.x | 3.0\*, 3.1+ |
-| 3.0.x | 2.0.x | 3.0\*, 3.1+ |
-
-Spring Data ArangoDB requires ArangoDB 3.0 or higher - which you can download [here](https://www.arangodb.com/download/) - and Java 8 or higher.
-
-**Note**: ArangoDB 3.0 does not support the default transport protocol
-[VelocyStream](https://github.com/arangodb/velocystream). A manual switch to
-HTTP is required. See chapter [configuration](#configuration). Also ArangoDB 3.0
-does not support geospatial queries.
-
-## Maven
-
-To use Spring Data ArangoDB in your project, your build automation tool needs to be configured to include and use the Spring Data ArangoDB dependency. Example with Maven:
-
-```xml
-
- com.arangodb
- arangodb-spring-data
- 3.1.0
-
-```
-
-There is a [demonstration app](https://github.com/arangodb/spring-data-demo), which contains common use cases and examples of how to use Spring Data ArangoDB's functionality.
-
-## Configuration
-
-You can use Java to configure your Spring Data environment as show below. Setting up the underlying driver (`ArangoDB.Builder`) with default configuration automatically loads a properties file `arangodb.properties`, if it exists in the classpath.
-
-```java
-@Configuration
-@EnableArangoRepositories(basePackages = { "com.company.mypackage" })
-public class MyConfiguration extends AbstractArangoConfiguration {
-
- @Override
- public ArangoDB.Builder arango() {
- return new ArangoDB.Builder();
- }
-
- @Override
- public String database() {
- // Name of the database to be used
- return "example-database";
- }
-
-}
-```
-
-The driver is configured with some default values:
-
-| property-key | description | default value |
-| ----------------- | ----------------------------------- | ------------- |
-| arangodb.host | ArangoDB host | 127.0.0.1 |
-| arangodb.port | ArangoDB port | 8529 |
-| arangodb.timeout | socket connect timeout(millisecond) | 0 |
-| arangodb.user | Basic Authentication User |
-| arangodb.password | Basic Authentication Password |
-| arangodb.useSsl | use SSL connection | false |
-
-To customize the configuration, the parameters can be changed in the Java code.
-
-```java
-@Override
-public ArangoDB.Builder arango() {
- ArangoDB.Builder arango = new ArangoDB.Builder()
- .host("127.0.0.1")
- .port(8529)
- .user("root");
- return arango;
-}
-```
-
-In addition you can use the _arangodb.properties_ or a custom properties file to supply credentials to the driver.
-
-_Properties file_
-
-```
-arangodb.hosts=127.0.0.1:8529
-arangodb.user=root
-arangodb.password=
-```
-
-_Custom properties file_
-
-```java
-@Override
-public ArangoDB.Builder arango() {
- InputStream in = MyClass.class.getResourceAsStream("my.properties");
- ArangoDB.Builder arango = new ArangoDB.Builder()
- .loadProperties(in);
- return arango;
-}
-```
-
-**Note**: When using ArangoDB 3.0 it is required to set the transport protocol to HTTP and fetch the dependency `org.apache.httpcomponents:httpclient`.
-
-```java
-@Override
-public ArangoDB.Builder arango() {
- ArangoDB.Builder arango = new ArangoDB.Builder()
- .useProtocol(Protocol.HTTP_JSON);
- return arango;
-}
-```
-
-```xml
-
- org.apache.httpcomponents
- httpclient
- 4.5.1
-
-```
diff --git a/Documentation/Books/Drivers/SpringData/Migration/Migrating-1.x-3.0.md b/Documentation/Books/Drivers/SpringData/Migration/Migrating-1.x-3.0.md
deleted file mode 100644
index e571f44bc9a7..000000000000
--- a/Documentation/Books/Drivers/SpringData/Migration/Migrating-1.x-3.0.md
+++ /dev/null
@@ -1,4 +0,0 @@
-
-# Migrating Spring Data ArangoDB 1.x to 3.0
-
-see [Migrating 2.x to 3.0](Migrating-2.x-3.0.md)
diff --git a/Documentation/Books/Drivers/SpringData/Migration/Migrating-2.x-3.0.md b/Documentation/Books/Drivers/SpringData/Migration/Migrating-2.x-3.0.md
deleted file mode 100644
index 2b99f88a4563..000000000000
--- a/Documentation/Books/Drivers/SpringData/Migration/Migrating-2.x-3.0.md
+++ /dev/null
@@ -1,42 +0,0 @@
-
-# Migrating Spring Data ArangoDB 2.x to 3.0
-
-## Annotations @Key
-
-The annotation `@Key` is removed. Use `@Id` instead.
-
-## Annotations @Id
-
-The annotation `@Id` is now saved in the database as field `_key` instead of `_id`. All operations in `ArangoOperations` and `ArangoRepository` still work with `@Id` and also now supports non-String fields.
-
-If you - for some reason - need the value of `_id` within your application, you can use the annotation `@ArangoId` on a `String` field instead of `@Id`.
-
-**Note**: The field annotated with `@ArangoId` will not be persisted in the database. It only exists for reading purposes.
-
-## ArangoRepository
-
-`ArangoRepository` now requires a second generic type. This type `ID` represents the type of your domain object field annotated with `@Id`.
-
-**Examples**
-
-```Java
-public class Customer {
- @Id private String id;
-}
-
-public interface CustomerRepository extends ArangoRepository {
-
-}
-```
-
-## Annotation @Param
-
-The annotation `com.arangodb.springframework.annotation.Param` is removed. Use `org.springframework.data.repository.query.Param` instead.
-
-## DBEntity
-
-`DBEntity` is removed. Use `VPackSlice` in your converter instead.
-
-## DBCollectionEntity
-
-`DBCollectionEntity` is removed. Use `VPackSlice` in your converter instead.
diff --git a/Documentation/Books/Drivers/SpringData/Migration/README.md b/Documentation/Books/Drivers/SpringData/Migration/README.md
deleted file mode 100644
index 5e036a2726ac..000000000000
--- a/Documentation/Books/Drivers/SpringData/Migration/README.md
+++ /dev/null
@@ -1,5 +0,0 @@
-
-# Spring Data ArangoDB - Migration
-
-- [Migrating 1.x to 3.0](Migrating-1.x-3.0.md)
-- [Migrating 2.x to 3.0](Migrating-2.x-3.0.md)
diff --git a/Documentation/Books/Drivers/SpringData/README.md b/Documentation/Books/Drivers/SpringData/README.md
deleted file mode 100644
index 4ac5dacfa655..000000000000
--- a/Documentation/Books/Drivers/SpringData/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-
-# Spring Data ArangoDB
-
-- [Getting Started](GettingStarted/README.md)
-- [Reference](Reference/README.md)
-- [Migration](Migration/README.md)
-
-## Learn more
-
-- [ArangoDB](https://www.arangodb.com/)
-- [Demo](https://github.com/arangodb/spring-data-demo)
-- [JavaDoc 1.0.0](http://arangodb.github.io/spring-data/javadoc-1_0/index.html)
-- [JavaDoc 2.0.0](http://arangodb.github.io/spring-data/javadoc-2_0/index.html)
-- [Changelog](https://github.com/arangodb/spring-data/blob/master/ChangeLog.md#changelog)
diff --git a/Documentation/Books/Drivers/SpringData/Reference/Mapping/Auditing.md b/Documentation/Books/Drivers/SpringData/Reference/Mapping/Auditing.md
deleted file mode 100644
index efafff04091f..000000000000
--- a/Documentation/Books/Drivers/SpringData/Reference/Mapping/Auditing.md
+++ /dev/null
@@ -1,78 +0,0 @@
-
-# Auditing
-
-Since version 3.0.0 Spring Data ArangoDB provides basic auditing functionallity where you can track who made changes on your data and when.
-
-To enable auditing you have to add the annotation `@EnableArangoAuditing` to your configuration class.
-
-```Java
-@Configuration
-@EnableArangoAuditing
-public class MyConfiguration extends AbstractArangoConfiguration {
-```
-
-We can now add fields to our model classes and annotade them with `@CreateDate`, `@CreatedBy`, `@LastModifiedDate` and `@LastModifiedBy` to store the auditing information. All annotation names should be self-explanatory.
-
-```Java
-@Document
-public class MyEntity {
-
- @CreatedDate
- private Instant created;
-
- @CreatedBy
- private User createdBy;
-
- @LastModifiedDate
- private Instant modified;
-
- @LastModifiedBy
- private User modifiedBy;
-
-}
-```
-
-The annotations `@CreateDate` and `@LastModifiedDate` are working with fields of any kind of Date/Timestamp type which is supported by Spring Data. (i.e. `java.util.Date`, `java.time.Instant`, `java.time.LocalDateTime`).
-
-For `@CreatedBy` and `@LastModifiedBy` we need to provide Spring Data the information of the current auditor (i.e. `User` in our case). We can do so by implementing the `AuditorAware` interface
-
-```Java
-public class AuditorProvider implements AuditorAware {
- @Override
- public Optional getCurrentAuditor() {
- // return current user
- }
-}
-```
-
-and add the implementation as a bean to our Spring context.
-
-```Java
-@Configuration
-@EnableArangoAuditing(auditorAwareRef = "auditorProvider")
-public class MyConfiguration extends AbstractArangoConfiguration {
-
- @Bean
- public AuditorAware auditorProvider() {
- return new AuditorProvider();
- }
-
-}
-```
-
-If you use a type in your `AuditorAware` implementation, which will be also persisted in your database and you only want to save a reference in your entity, just add the [@Ref annotation](Reference.md) to the fields annotated with `@CreatedBy` and `@LastModifiedBy`. Keep in mind that you have to save the `User` in your database first to get a valid reference.
-
-```Java
-@Document
-public class MyEntity {
-
- @Ref
- @CreatedBy
- private User createdBy;
-
- @Ref
- @LastModifiedBy
- private User modifiedBy;
-
-}
-```
diff --git a/Documentation/Books/Drivers/SpringData/Reference/Mapping/Converter.md b/Documentation/Books/Drivers/SpringData/Reference/Mapping/Converter.md
deleted file mode 100644
index 5e951dd2e160..000000000000
--- a/Documentation/Books/Drivers/SpringData/Reference/Mapping/Converter.md
+++ /dev/null
@@ -1,45 +0,0 @@
-
-# Converter
-
-## Registering a Spring Converter
-
-The `AbstractArangoConfiguration` provides a convenient way to register Spring `Converter` by overriding the method `customConverters()`.
-
-**Examples**
-
-```Java
-@Configuration
-public class MyConfiguration extends AbstractArangoConfiguration {
-
- @Override
- protected Collection> customConverters() {
- Collection> converters = new ArrayList<>();
- converters.add(new MyConverter());
- return converters;
- }
-
-}
-```
-
-## Implementing a Spring Converter
-
-A `Converter` is used for reading if the source type is of type `VPackSlice` or `DBDocumentEntity`.
-
-A `Converter` is used for writing if the target type is of type `VPackSlice`, `DBDocumentEntity`, `BigInteger`, `BigDecimal`, `java.sql.Date`, `java.sql.Timestamp`, `Instant`, `LocalDate`, `LocalDateTime`, `OffsetDateTime`, `ZonedDateTime`, `Boolean`, `Short`, `Integer`, `Byte`, `Float`, `Double`, `Character`, `String`, `Date`, `Class`, `Enum`, `boolean[]`, `long[]`, `short[]`, `int[]`, `byte[]`, `float[]`, `double[]` or `char[]`.
-
-**Examples**
-
-```Java
-public class MyConverter implements Converter {
-
- @Override
- public VPackSlice convert(final MyObject source) {
- VPackBuilder builder = new VPackBuilder();
- // map fields of MyObject to builder
- return builder.slice();
- }
-
-}
-```
-
-For performance reasons `VPackSlice` should always be used within a converter. If your object is too complexe, you can also use `DBDocumentEntity` to simplify the mapping.
diff --git a/Documentation/Books/Drivers/SpringData/Reference/Mapping/Document.md b/Documentation/Books/Drivers/SpringData/Reference/Mapping/Document.md
deleted file mode 100644
index 020f60ae35ea..000000000000
--- a/Documentation/Books/Drivers/SpringData/Reference/Mapping/Document.md
+++ /dev/null
@@ -1,81 +0,0 @@
-
-# Document
-
-## Annotation @Document
-
-The annotations `@Document` applied to a class marks this class as a candidate for mapping to the database. The most relevant parameter is `value` to specify the collection name in the database. The annotation `@Document` specifies the collection type to `DOCUMENT`.
-
-```java
-@Document(value="persons")
-public class Person {
- ...
-}
-```
-
-## Spring Expression support
-
-Spring Data ArangoDB supports the use of SpEL expressions within `@Document#value`. This feature lets you define a dynamic collection name which can be used to implement multi tenancy applications.
-
-```Java
-@Component
-public class TenantProvider {
-
- public String getId() {
- // threadlocal lookup
- }
-
-}
-```
-
-```java
-@Document("#{tenantProvider.getId()}_persons")
-public class Person {
- ...
-}
-```
-
-## Annotation @From and @To
-
-With the annotations `@From` and `@To` applied on a collection or array field in a class annotated with `@Document` the nested edge objects are fetched from the database. Each of the nested edge objects has to be stored as separate edge document in the edge collection described in the `@Edge` annotation of the nested object class with the _\_id_ of the parent document as field _\_from_ or _\_to_.
-
-```java
-@Document("persons")
-public class Person {
- @From
- private List relations;
-}
-
-@Edge(name="relations")
-public class Relation {
- ...
-}
-```
-
-The database representation of `Person` in collection _persons_ looks as follow:
-
-```
-{
- "_key" : "123",
- "_id" : "persons/123"
-}
-```
-
-and the representation of `Relation` in collection _relations_:
-
-```
-{
- "_key" : "456",
- "_id" : "relations/456",
- "_from" : "persons/123"
- "_to" : ".../..."
-}
-{
- "_key" : "789",
- "_id" : "relations/456",
- "_from" : "persons/123"
- "_to" : ".../..."
-}
-...
-```
-
-**Note**: Since arangodb-spring-data 3.0.0 the annotations `@From` and `@To` also work on non-collection/non-array fields. If multiple edges are linked with the entity, it is not guaranteed that the same edge is returned every time. Use at your own risk.
diff --git a/Documentation/Books/Drivers/SpringData/Reference/Mapping/Edge.md b/Documentation/Books/Drivers/SpringData/Reference/Mapping/Edge.md
deleted file mode 100644
index f5b8453c9f5a..000000000000
--- a/Documentation/Books/Drivers/SpringData/Reference/Mapping/Edge.md
+++ /dev/null
@@ -1,81 +0,0 @@
-
-# Edge
-
-## Annotation @Edge
-
-The annotations `@Edge` applied to a class marks this class as a candidate for mapping to the database. The most relevant parameter is `value` to specify the collection name in the database. The annotation `@Edge` specifies the collection type to `EDGE`.
-
-```java
-@Edge("relations")
-public class Relation {
- ...
-}
-```
-
-## Spring Expression support
-
-Spring Data ArangoDB supports the use of SpEL expressions within `@Edge#value`. This feature lets you define a dynamic collection name which can be used to implement multi tenancy applications.
-
-```Java
-@Component
-public class TenantProvider {
-
- public String getId() {
- // threadlocal lookup
- }
-
-}
-```
-
-```java
-@Edge("#{tenantProvider.getId()}_relations")
-public class Relation {
- ...
-}
-```
-
-## Annotation @From and @To
-
-With the annotations `@From` and `@To` applied on a field in a class annotated with `@Edge` the nested object is fetched from the database. The nested object has to be stored as a separate document in the collection described in the `@Document` annotation of the nested object class. The _\_id_ field of this nested object is stored in the fields `_from` or `_to` within the edge document.
-
-```java
-@Edge("relations")
-public class Relation {
- @From
- private Person c1;
- @To
- private Person c2;
-}
-
-@Document(value="persons")
-public class Person {
- @Id
- private String id;
-}
-```
-
-The database representation of `Relation` in collection _relations_ looks as follow:
-
-```
-{
- "_key" : "123",
- "_id" : "relations/123",
- "_from" : "persons/456",
- "_to" : "persons/789"
-}
-```
-
-and the representation of `Person` in collection _persons_:
-
-```
-{
- "_key" : "456",
- "_id" : "persons/456",
-}
-{
- "_key" : "789",
- "_id" : "persons/789",
-}
-```
-
-**Note:** If you want to save an instance of `Relation`, both `Person` objects (from & to) already have to be persisted and the class `Person` needs a field with the annotation `@Id` so it can hold the persisted `_id` from the database.
diff --git a/Documentation/Books/Drivers/SpringData/Reference/Mapping/Events.md b/Documentation/Books/Drivers/SpringData/Reference/Mapping/Events.md
deleted file mode 100644
index eb78546849ee..000000000000
--- a/Documentation/Books/Drivers/SpringData/Reference/Mapping/Events.md
+++ /dev/null
@@ -1,36 +0,0 @@
-
-# Events
-
-Spring Data ArangoDB includes several `ApplicationEvent` events that your application can respond to by registering subclasses of `AbstractArangoEventListener` in the ApplicationContext.
-
-The following callback methods are present in `AbstractArangoEventListener`:
-
-- `onAfterLoad`: Called in `ArangoTemplate#find` and `ArangoTemplate#query` after the object is loaded from the database.
-- `onBeforeSave`: Called in `ArangoTemplate#insert`/`#update`/`#replace` before the object is converted and send to the database.
-- `onAfterSave`: Called in `ArangoTemplate#insert`/`#update`/`#replace` after the object is send to the database.
-- `onBeforeDelete`: Called in `ArangoTemplate#delete` before the object is converted and send to the database.
-- `onAfterDelete`: Called in `ArangoTemplate#delete` after the object is deleted from the database.
-
-**Examples**
-
-```Java
-package my.mapping.events;
-
-public class BeforePersonSavedListener extends AbstractArangoEventListener {
-
- @Override
- public void onBeforeSave(BeforeSaveEvent event) {
- // do some logging or data manipulation
- }
-
-}
-```
-
-To register the listener add `@ComponentScan` with the package of your listener to your configuration class.
-
-```Java
-@Configuration
-@ComponentScan("my.mapping.events")
-public class MyConfiguration extends AbstractArangoConfiguration {
- ...
-```
diff --git a/Documentation/Books/Drivers/SpringData/Reference/Mapping/Indexes.md b/Documentation/Books/Drivers/SpringData/Reference/Mapping/Indexes.md
deleted file mode 100644
index 5f17058d9629..000000000000
--- a/Documentation/Books/Drivers/SpringData/Reference/Mapping/Indexes.md
+++ /dev/null
@@ -1,105 +0,0 @@
-
-# Indexes
-
-## Annotation @\Indexed
-
-With the `@Indexed` annotations user defined indexes can be created at a collection level by annotating single fields of a class.
-
-Possible `@Indexed` annotations are:
-
-- `@HashIndexed`
-- `@SkiplistIndexed`
-- `@PersistentIndexed`
-- `@GeoIndexed`
-- `@FulltextIndexed`
-
-The following example creates a hash index on the field `name` and a separate hash index on the field `age`:
-
-```java
-public class Person {
- @HashIndexed
- private String name;
-
- @HashIndexed
- private int age;
-}
-```
-
-With the `@Indexed` annotations different indexes can be created on the same field.
-
-The following example creates a hash index and also a skiplist index on the field `name`:
-
-```java
-public class Person {
- @HashIndexed
- @SkiplistIndexed
- private String name;
-}
-```
-
-## Annotation @\Index
-
-If the index should include multiple fields the `@Index` annotations can be used on the type instead.
-
-Possible `@Index` annotations are:
-
-- `@HashIndex`
-- `@SkiplistIndex`
-- `@PersistentIndex`
-- `@GeoIndex`
-- `@FulltextIndex`
-
-The following example creates a single hash index on the fields `name` and `age`, note that if a field is renamed in the database with @Field, the new field name must be used in the index declaration:
-
-```java
-@HashIndex(fields = {"fullname", "age"})
-public class Person {
- @Field("fullname")
- private String name;
-
- private int age;
-}
-```
-
-The `@Index` annotations can also be used to create an index on a nested field.
-
-The following example creates a single hash index on the fields `name` and `address.country`:
-
-```java
-@HashIndex(fields = {"name", "address.country"})
-public class Person {
- private String name;
-
- private Address address;
-}
-```
-
-The `@Index` annotations and the `@Indexed` annotations can be used at the same time in one class.
-
-The following example creates a hash index on the fields `name` and `age` and a separate hash index on the field `age`:
-
-```java
-@HashIndex(fields = {"name", "age"})
-public class Person {
- private String name;
-
- @HashIndexed
- private int age;
-}
-```
-
-The `@Index` annotations can be used multiple times to create more than one index in this way.
-
-The following example creates a hash index on the fields `name` and `age` and a separate hash index on the fields `name` and `gender`:
-
-```java
-@HashIndex(fields = {"name", "age"})
-@HashIndex(fields = {"name", "gender"})
-public class Person {
- private String name;
-
- private int age;
-
- private Gender gender
-}
-```
diff --git a/Documentation/Books/Drivers/SpringData/Reference/Mapping/README.md b/Documentation/Books/Drivers/SpringData/Reference/Mapping/README.md
deleted file mode 100644
index 6e4dae3020fb..000000000000
--- a/Documentation/Books/Drivers/SpringData/Reference/Mapping/README.md
+++ /dev/null
@@ -1,165 +0,0 @@
-
-# Mapping
-
-In this section we will describe the features and conventions for mapping Java objects to documents and how to override those conventions with annotation based mapping metadata.
-
-## Conventions
-
-- The Java class name is mapped to the collection name
-- The non-static fields of a Java object are used as fields in the stored document
-- The Java field name is mapped to the stored document field name
-- All nested Java object are stored as nested objects in the stored document
-- The Java class needs a constructor which meets the following criteria:
- - in case of a single constructor:
- - a non-parameterized constructor or
- - a parameterized constructor
- - in case of multiple constructors:
- - a non-parameterized constructor or
- - a parameterized constructor annotated with `@PersistenceConstructor`
-
-## Type conventions
-
-ArangoDB uses [VelocyPack](https://github.com/arangodb/velocypack) as it's internal storage format which supports a large number of data types. In addition Spring Data ArangoDB offers - with the underlying Java driver - built-in converters to add additional types to the mapping.
-
-| Java type | VelocyPack type |
-| ------------------------ | ----------------------------- |
-| java.lang.String | string |
-| java.lang.Boolean | bool |
-| java.lang.Integer | signed int 4 bytes, smallint |
-| java.lang.Long | signed int 8 bytes, smallint |
-| java.lang.Short | signed int 2 bytes, smallint |
-| java.lang.Double | double |
-| java.lang.Float | double |
-| java.math.BigInteger | string |
-| java.math.BigDecimal | string |
-| java.lang.Number | double |
-| java.lang.Character | string |
-| java.util.UUID | string |
-| java.lang.byte[] | string (Base64) |
-| java.util.Date | string (date-format ISO 8601) |
-| java.sql.Date | string (date-format ISO 8601) |
-| java.sql.Timestamp | string (date-format ISO 8601) |
-| java.time.Instant | string (date-format ISO 8601) |
-| java.time.LocalDate | string (date-format ISO 8601) |
-| java.time.LocalDateTime | string (date-format ISO 8601) |
-| java.time.OffsetDateTime | string (date-format ISO 8601) |
-| java.time.ZonedDateTime | string (date-format ISO 8601) |
-
-## Type mapping
-
-As collections in ArangoDB can contain documents of various types, a mechanism to retrieve the correct Java class is required. The type information of properties declared in a class may not be enough to restore the original class (due to inheritance). If the declared complex type and the actual type do not match, information about the actual type is stored together with the document. This is necessary to restore the correct type when reading from the DB. Consider the following example:
-
-```java
-public class Person {
- private String name;
- private Address homeAddress;
- // ...
-
- // getters and setters omitted
-}
-
-public class Employee extends Person {
- private Address workAddress;
- // ...
-
- // getters and setters omitted
-}
-
-public class Address {
- private final String street;
- private final String number;
- // ...
-
- public Address(String street, String number) {
- this.street = street;
- this.number = number;
- }
-
- // getters omitted
-}
-
-@Document
-public class Company {
- @Key
- private String key;
- private Person manager;
-
- // getters and setters omitted
-}
-
-Employee manager = new Employee();
-manager.setName("Jane Roberts");
-manager.setHomeAddress(new Address("Park Avenue", "432/64"));
-manager.setWorkAddress(new Address("Main Street", "223"));
-Company comp = new Company();
-comp.setManager(manager);
-```
-
-The serialized document for the DB looks like this:
-
-```json
-{
- "manager": {
- "name": "Jane Roberts",
- "homeAddress": {
- "street": "Park Avenue",
- "number": "432/64"
- },
- "workAddress": {
- "street": "Main Street",
- "number": "223"
- },
- "_class": "com.arangodb.Employee"
- },
- "_class": "com.arangodb.Company"
-}
-```
-
-Type hints are written for top-level documents (as a collection can contain different document types) as well as for every value if it's a complex type and a sub-type of the property type declared. `Map`s and `Collection`s are excluded from type mapping. Without the additional information about the concrete classes used, the document couldn't be restored in Java. The type information of the `manager` property is not enough to determine the `Employee` type. The `homeAddress` and `workAddress` properties have the same actual and defined type, thus no type hint is needed.
-
-### Customizing type mapping
-
-By default, the fully qualified class name is stored in the documents as a type hint. A custom type hint can be set with the `@TypeAlias("my-alias")` annotation on an entity. Make sure that it is an unique identifier across all entities. If we would add a `TypeAlias("employee")` annotation to the `Employee` class above, it would be persisted as `"_class": "employee"`.
-
-The default type key is `_class` and can be changed by overriding the `typeKey()` method of the `AbstractArangoConfiguration` class.
-
-If you need to further customize the type mapping process, the `arangoTypeMapper()` method of the configuration class can be overridden. The included `DefaultArangoTypeMapper` can be customized by providing a list of [`TypeInformationMapper`](https://docs.spring.io/spring-data/commons/docs/current/api/org/springframework/data/convert/TypeInformationMapper.html)s that create aliases from types and vice versa.
-
-In order to fully customize the type mapping process you can provide a custom type mapper implementation by extending the `DefaultArangoTypeMapper` class.
-
-### Deactivating type mapping
-
-To deactivate the type mapping process, you can return `null` from the `typeKey()` method of the `AbstractArangoConfiguration` class. No type hints are stored in the documents with this setting. If you make sure that each defined type corresponds to the actual type, you can disable the type mapping, otherwise it can lead to exceptions when reading the entities from the DB.
-
-## Annotations
-
-### Annotation overview
-
-| annotation | level | description |
-| ----------------------- | ------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------- |
-| @Document | class | marks this class as a candidate for mapping |
-| @Edge | class | marks this class as a candidate for mapping |
-| @Id | field | stores the field as the system field \_key |
-| @Rev | field | stores the field as the system field \_rev |
-| @Field("alt-name") | field | stores the field with an alternative name |
-| @Ref | field | stores the \_id of the referenced document and not the nested document |
-| @From | field | stores the \_id of the referenced document as the system field \_from |
-| @To | field | stores the \_id of the referenced document as the system field \_to |
-| @Relations | field | vertices which are connected over edges |
-| @Transient | field, method, annotation | marks a field to be transient for the mapping framework, thus the property will not be persisted and not further inspected by the mapping framework |
-| @PersistenceConstructor | constructor | marks a given constructor - even a package protected one - to use when instantiating the object from the database |
-| @TypeAlias("alias") | class | set a type alias for the class when persisted to the DB |
-| @HashIndex | class | describes a hash index |
-| @HashIndexed | field | describes how to index the field |
-| @SkiplistIndex | class | describes a skiplist index |
-| @SkiplistIndexed | field | describes how to index the field |
-| @PersistentIndex | class | describes a persistent index |
-| @PersistentIndexed | field | describes how to index the field |
-| @GeoIndex | class | describes a geo index |
-| @GeoIndexed | field | describes how to index the field |
-| @FulltextIndex | class | describes a fulltext index |
-| @FulltextIndexed | field | describes how to index the field |
-| @CreatedBy | field | Declares a field as the one representing the principal that created the entity containing the field. |
-| @CreatedDate | field | Declares a field as the one representing the date the entity containing the field was created. |
-| @LastModifiedBy | field | Declares a field as the one representing the principal that recently modified the entity containing the field. |
-| @LastModifiedDate | field | Declares a field as the one representing the date the entity containing the field was recently modified. |
diff --git a/Documentation/Books/Drivers/SpringData/Reference/Mapping/Reference.md b/Documentation/Books/Drivers/SpringData/Reference/Mapping/Reference.md
deleted file mode 100644
index 37e1e1b8449f..000000000000
--- a/Documentation/Books/Drivers/SpringData/Reference/Mapping/Reference.md
+++ /dev/null
@@ -1,56 +0,0 @@
-
-# Reference
-
-With the annotation `@Ref` applied on a field the nested object isn’t stored as a nested object in the document. The `_id` field of the nested object is stored in the document and the nested object has to be stored as a separate document in another collection described in the `@Document` annotation of the nested object class. To successfully persist an instance of your object the referencing field has to be null or it's instance has to provide a field with the annotation `@Id` including a valid id.
-
-**Examples**
-
-```java
-@Document(value="persons")
-public class Person {
- @Ref
- private Address address;
-}
-
-@Document("addresses")
-public class Address {
- @Id
- private String id;
- private String country;
- private String street;
-}
-```
-
-The database representation of `Person` in collection _persons_ looks as follow:
-
-```
-{
- "_key" : "123",
- "_id" : "persons/123",
- "address" : "addresses/456"
-}
-```
-
-and the representation of `Address` in collection _addresses_:
-
-```
-{
- "_key" : "456",
- "_id" : "addresses/456",
- "country" : "...",
- "street" : "..."
-}
-```
-
-Without the annotation `@Ref` at the field `address`, the stored document would look:
-
-```
-{
- "_key" : "123",
- "_id" : "persons/123",
- "address" : {
- "country" : "...",
- "street" : "..."
- }
-}
-```
diff --git a/Documentation/Books/Drivers/SpringData/Reference/Mapping/Relations.md b/Documentation/Books/Drivers/SpringData/Reference/Mapping/Relations.md
deleted file mode 100644
index f9eae9b7a2d8..000000000000
--- a/Documentation/Books/Drivers/SpringData/Reference/Mapping/Relations.md
+++ /dev/null
@@ -1,21 +0,0 @@
-
-# Relations
-
-With the annotation `@Relations` applied on a collection or array field in a class annotated with `@Document` the nested objects are fetched from the database over a graph traversal with your current object as the starting point. The most relevant parameter is `edge`. With `edge` you define the edge collection - which should be used in the traversal - using the class type. With the parameter `depth` you can define the maximal depth for the traversal (default 1) and the parameter `direction` defines whether the traversal should follow outgoing or incoming edges (default Direction.ANY).
-
-**Examples**
-
-```java
-@Document(value="persons")
-public class Person {
- @Relations(edge=Relation.class, depth=1, direction=Direction.ANY)
- private List friends;
-}
-
-@Edge(name="relations")
-public class Relation {
-
-}
-```
-
-**Note**: Since arangodb-spring-data 3.0.0 the annotation `@Relations` also work on non-collection/non-array fields. If multiple documents are linked with the entity, it is not guaranteed that the same document is returned every time. Use at your own risk.
diff --git a/Documentation/Books/Drivers/SpringData/Reference/README.md b/Documentation/Books/Drivers/SpringData/Reference/README.md
deleted file mode 100644
index d612c3ef07ef..000000000000
--- a/Documentation/Books/Drivers/SpringData/Reference/README.md
+++ /dev/null
@@ -1,25 +0,0 @@
-
-# Spring Data ArangoDB - Reference
-
-- [Template](Template/README.md)
- - [Queries](Template/Queries.md)
- - [Document Manipulation](Template/DocumentManipulation.md)
- - [Multiple Document Manipulation](Template/MultiDocumentManipulation.md)
- - [Collection Manipulation](Template/CollectionManipulation.md)
-- [Repositories](Repositories/README.md)
- - [Queries](Repositories/Queries/README.md)
- - [Derived queries](Repositories/Queries/DerivedQueries.md)
- - [Query methods](Repositories/Queries/QueryMethods.md)
- - [Named queries](Repositories/Queries/NamedQueries.md)
- - [Document Manipulation](Repositories/DocumentManipulation.md)
- - [Multiple Document Manipulation](Repositories/MultiDocumentManipulation.md)
- - [Query by example](Repositories/QueryByExample.md)
-- [Mapping](Mapping/README.md)
- - [Document](Mapping/Document.md)
- - [Edge](Mapping/Edge.md)
- - [Reference](Mapping/Reference.md)
- - [Relations](Mapping/Relations.md)
- - [Indexes](Mapping/Indexes.md)
- - [Converter](Mapping/Converter.md)
- - [Events](Mapping/Events.md)
- - [Auditing](Mapping/Auditing.md)
diff --git a/Documentation/Books/Drivers/SpringData/Reference/Repositories/DocumentManipulation.md b/Documentation/Books/Drivers/SpringData/Reference/Repositories/DocumentManipulation.md
deleted file mode 100644
index b3f97cc0042d..000000000000
--- a/Documentation/Books/Drivers/SpringData/Reference/Repositories/DocumentManipulation.md
+++ /dev/null
@@ -1,112 +0,0 @@
-
-# Manipulating documents
-
-## ArangoRepository.existsById
-
-```
-ArangoRepository.existsById(ID id) : boolean
-```
-
-Returns whether an entity with the given id exists.
-
-**Arguments**
-
-- **id**: `ID`
-
- The id (`_key`) of the document. Must not be `null`.
-
-**Examples**
-
-```Java
-@Autowired MyRepository repository;
-
-boolean exists = repository.existsById("some-id");
-```
-
-## ArangoRepository.findById
-
-```
-ArangoRepository.findById(ID id) : Optional
-```
-
-Retrieves an entity by its id.
-
-**Arguments**
-
-- **id**: `ID`
-
- The id (`_key`) of the document. Must not be `null`.
-
-**Examples**
-
-```java
-@Autowired MyRepository repository;
-
-Optional entity = repository.findById("some-id");
-```
-
-## ArangoRepository.save
-
-```
-ArangoRepository.save(S entity) : S
-```
-
-Saves a given entity. Use the returned instance for further operations as the save operation might have changed the entity instance completely.
-
-**Arguments**
-
-- **entity**: `S`
-
- The entity to save in the database. Must not be `null`.
-
-```java
-@Autowired MyRepository repository;
-
-MyDomainClass entity = new MyDomainClass();
-entity = repository.save(entity);
-```
-
-## ArangoRepository.deleteById
-
-```
-ArangoRepository.deleteById(ID id) : void
-```
-
-Deletes the entity with the given id.
-
-**Arguments**
-
-- **id**: `ID`
-
- The id (`_key`) of the document. Must not be `null`.
-
-**Examples**
-
-```java
-@Autowired MyRepository repository;
-
-repository.deleteById("some-id");
-```
-
-## ArangoRepository.delete
-
-```
-ArangoRepository.delete(T entity) : void
-```
-
-Deletes a given entity.
-
-**Arguments**
-
-- **entity**: `T`
-
- The entity to delete. Must not be `null`.
-
-**Examples**
-
-```java
-@Autowired MyRepository repository;
-
-MyDomainClass entity = ...
-repository.delete(entity);
-```
diff --git a/Documentation/Books/Drivers/SpringData/Reference/Repositories/MultiDocumentManipulation.md b/Documentation/Books/Drivers/SpringData/Reference/Repositories/MultiDocumentManipulation.md
deleted file mode 100644
index 36f1717b2e05..000000000000
--- a/Documentation/Books/Drivers/SpringData/Reference/Repositories/MultiDocumentManipulation.md
+++ /dev/null
@@ -1,106 +0,0 @@
-
-# Manipulating multiple documents
-
-## ArangoRepository.findAll
-
-```
-ArangoRepository.findAll() : Iterable
-```
-
-Returns all instances of the type.
-
-**Examples**
-
-```Java
-@Autowired MyRepository repository;
-
-Iterable entities = repository.findAll();
-```
-
-## ArangoRepository.findAllById
-
-```
-ArangoRepository.findAllById(Iterable ids) : Iterable
-```
-
-Returns all instances of the type with the given IDs.
-
-**Arguments**
-
-- **ids**: `Iterable`
-
- The ids (`_keys`) of the documents
-
-**Examples**
-
-```java
-@Autowired MyRepository repository;
-
-Iterable entities = repository.findAllById(Arrays.asList("some-id", "some-other-id"));
-```
-
-## ArangoRepository.saveAll
-
-```
-ArangoRepository.saveAll(Iterable entities) : Iterable
-```
-
-Saves all given entities.
-
-**Arguments**
-
-- **entities**: `Iterable`
-
- A list of entities to save.
-
-**Examples**
-
-```java
-@Autowired MyRepository repository;
-
-MyDomainClass obj1 = ...
-MyDomainClass obj2 = ...
-MyDomainClass obj3 = ...
-repository.saveAll(Arrays.asList(obj1, obj2, obj3))
-```
-
-## ArangoRepository.deleteAll (method 1)
-
-```
-ArangoRepository.deleteAll() : void
-```
-
-Deletes all entities managed by the repository.
-
-**Examples**
-
-```java
-@Autowired MyRepository repository;
-
-repository.deleteAll();
-```
-
-## ArangoRepository.deleteAll (method 2)
-
-```
-ArangoRepository.deleteAll(Iterable extends T> entities) : void
-```
-
-Deletes the given entities.
-
-**Arguments**
-
-- **entities**: `Iterable extends T>`
-
- The entities to delete.
-
-**Examples**
-
-```java
-@Autowired MyRepository repository;
-
-MyDomainClass obj1 = ...
-MyDomainClass obj2 = ...
-MyDomainClass obj3 = ...
-repository.deleteAll(Arrays.asList(obj1, obj2, obj3))
-```
diff --git a/Documentation/Books/Drivers/SpringData/Reference/Repositories/Queries/DerivedQueries.md b/Documentation/Books/Drivers/SpringData/Reference/Repositories/Queries/DerivedQueries.md
deleted file mode 100644
index fa16da5b85c4..000000000000
--- a/Documentation/Books/Drivers/SpringData/Reference/Repositories/Queries/DerivedQueries.md
+++ /dev/null
@@ -1,157 +0,0 @@
-
-# Derived queries
-
-## Semantic parts
-
-Spring Data ArangoDB supports queries derived from methods names by splitting it into its semantic parts and converting into AQL. The mechanism strips the prefixes `find..By`, `get..By`, `query..By`, `read..By`, `stream..By`, `count..By`, `exists..By`, `delete..By`, `remove..By` from the method and parses the rest. The `By` acts as a separator to indicate the start of the criteria for the query to be built. You can define conditions on entity properties and concatenate them with `And` and `Or`.
-
-The complete list of part types for derived methods is below, where `doc` is a document in the database
-
-| Keyword | Sample | Predicate |
-| ------------------------------------------- | -------------------------------------- | -------------------------------------- |
-| IsGreaterThan, GreaterThan, After | findByAgeGreaterThan(int age) | doc.age > age |
-| IsGreaterThanEqual, GreaterThanEqual | findByAgeIsGreaterThanEqual(int age) | doc.age >= age |
-| IsLessThan, LessThan, Before | findByAgeIsLessThan(int age) | doc.age < age |
-| IsLessThanEqualLessThanEqual | findByAgeLessThanEqual(int age) | doc.age <= age |
-| IsBetween, Between | findByAgeBetween(int lower, int upper) | lower < doc.age < upper |
-| IsNotNull, NotNull | findByNameNotNull() | doc.name != null |
-| IsNull, Null | findByNameNull() | doc.name == null |
-| IsLike, Like | findByNameLike(String name) | doc.name LIKE name |
-| IsNotLike, NotLike | findByNameNotLike(String name) | NOT(doc.name LIKE name) |
-| IsStartingWith, StartingWith, StartsWith | findByNameStartsWith(String prefix) | doc.name LIKE prefix |
-| IsEndingWith, EndingWith, EndsWith | findByNameEndingWith(String suffix) | doc.name LIKE suffix |
-| Regex, MatchesRegex, Matches | findByNameRegex(String pattern) | REGEX_TEST(doc.name, name, ignoreCase) |
-| (No Keyword) | findByFirstName(String name) | doc.name == name |
-| IsTrue, True | findByActiveTrue() | doc.active == true |
-| IsFalse, False | findByActiveFalse() | doc.active == false |
-| Is, Equals | findByAgeEquals(int age) | doc.age == age |
-| IsNot, Not | findByAgeNot(int age) | doc.age != age |
-| IsIn, In | findByNameIn(String[] names) | doc.name IN names |
-| IsNotIn, NotIn | findByNameIsNotIn(String[] names) | doc.name NOT IN names |
-| IsContaining, Containing, Contains | findByFriendsContaining(String name) | name IN doc.friends |
-| IsNotContaining, NotContaining, NotContains | findByFriendsNotContains(String name) | name NOT IN doc.friends |
-| Exists | findByFriendNameExists() | HAS(doc.friend, name) |
-
-**Examples**
-
-```java
-public interface MyRepository extends ArangoRepository {
-
- // FOR c IN customers FILTER c.name == @0 RETURN c
- ArangoCursor findByName(String name);
- ArangoCursor getByName(String name);
-
- // FOR c IN customers
- // FILTER c.name == @0 && c.age == @1
- // RETURN c
- ArangoCursor findByNameAndAge(String name, int age);
-
- // FOR c IN customers
- // FILTER c.name == @0 || c.age == @1
- // RETURN c
- ArangoCursor findByNameOrAge(String name, int age);
-}
-```
-
-You can apply sorting for one or multiple sort criteria by appending `OrderBy` to the method and `Asc` or `Desc` for the directions.
-
-```java
-public interface MyRepository extends ArangoRepository {
-
- // FOR c IN customers
- // FILTER c.name == @0
- // SORT c.age DESC RETURN c
- ArangoCursor getByNameOrderByAgeDesc(String name);
-
- // FOR c IN customers
- // FILTER c.name = @0
- // SORT c.name ASC, c.age DESC RETURN c
- ArangoCursor findByNameOrderByNameAscAgeDesc(String name);
-
-}
-```
-
-## Property expression
-
-Property expressions can refer only to direct and nested properties of the managed domain class. The algorithm checks the domain class for the entire expression as the property. If the check fails, the algorithm splits up the expression at the camel case parts from the right and tries to find the corresponding property.
-
-**Examples**
-
-```java
-@Document("customers")
-public class Customer {
- private Address address;
-}
-
-public class Address {
- private ZipCode zipCode;
-}
-
-public interface MyRepository extends ArangoRepository {
-
- // 1. step: search domain class for a property "addressZipCode"
- // 2. step: search domain class for "addressZip.code"
- // 3. step: search domain class for "address.zipCode"
- ArangoCursor findByAddressZipCode(ZipCode zipCode);
-}
-```
-
-It is possible for the algorithm to select the wrong property if the domain class also has a property which matches the first split of the expression. To resolve this ambiguity you can use `_` as a separator inside your method-name to define traversal points.
-
-**Examples**
-
-```java
-@Document("customers")
-public class Customer {
- private Address address;
- private AddressZip addressZip;
-}
-
-public class Address {
- private ZipCode zipCode;
-}
-
-public class AddressZip {
- private String code;
-}
-
-public interface MyRepository extends ArangoRepository {
-
- // 1. step: search domain class for a property "addressZipCode"
- // 2. step: search domain class for "addressZip.code"
- // creates query with "x.addressZip.code"
- ArangoCursor findByAddressZipCode(ZipCode zipCode);
-
- // 1. step: search domain class for a property "addressZipCode"
- // 2. step: search domain class for "addressZip.code"
- // 3. step: search domain class for "address.zipCode"
- // creates query with "x.address.zipCode"
- ArangoCursor findByAddress_ZipCode(ZipCode zipCode);
-
-}
-```
-
-## Geospatial queries
-
-Geospatial queries are a subsection of derived queries. To use a geospatial query on a collection, a geo index must exist on that collection. A geo index can be created on a field which is a two element array, corresponding to latitude and longitude coordinates.
-
-As a subsection of derived queries, geospatial queries support all the same return types, but also support the three return types `GeoPage, GeoResult and Georesults`. These types must be used in order to get the distance of each document as generated by the query.
-
-There are two kinds of geospatial query, Near and Within. Near sorts documents by distance from the given point, while within both sorts and filters documents, returning those within the given distance range or shape.
-
-**Examples**
-
-```java
-public interface MyRepository extends ArangoRepository {
-
- GeoResult getByLocationNear(Point point);
-
- GeoResults findByLocationWithinOrLocationWithin(Box box, Polygon polygon);
-
- //Equivalent queries
- GeoResults findByLocationWithinOrLocationWithin(Point point, int distance);
- GeoResults findByLocationWithinOrLocationWithin(Point point, Distance distance);
- GeoResults findByLocationWithinOrLocationWithin(Circle circle);
-
-}
-```
diff --git a/Documentation/Books/Drivers/SpringData/Reference/Repositories/Queries/NamedQueries.md b/Documentation/Books/Drivers/SpringData/Reference/Repositories/Queries/NamedQueries.md
deleted file mode 100644
index 7e45cf0798df..000000000000
--- a/Documentation/Books/Drivers/SpringData/Reference/Repositories/Queries/NamedQueries.md
+++ /dev/null
@@ -1,22 +0,0 @@
-
-# Named queries
-
-An alternative to using the `@Query` annotation on methods is specifying them in a separate `.properties` file. The default path for the file is `META-INF/arango-named-queries.properties` and can be changed with the `EnableArangoRepositories#namedQueriesLocation()` setting. The entries in the properties file must adhere to the following convention: `{simple entity name}.{method name} = {query}`. Let's assume we have the following repository interface:
-
-```java
-package com.arangodb.repository;
-
-public interface CustomerRepository extends ArangoRepository {
-
- Customer findByUsername(@Param("username") String username);
-
-}
-```
-
-The corresponding `arango-named-queries.properties` file looks like this:
-
-```properties
-Customer.findByUsername = FOR c IN customers FILTER c.username == @username RETURN c
-```
-
-The queries specified in the properties file are no different than the queries that can be defined with the `@Query` annotation. The only difference is that the queries are in one place. If there is a `@Query` annotation present and a named query defined, the query in the `@Query` annotation takes precedence.
diff --git a/Documentation/Books/Drivers/SpringData/Reference/Repositories/Queries/QueryMethods.md b/Documentation/Books/Drivers/SpringData/Reference/Repositories/Queries/QueryMethods.md
deleted file mode 100644
index ffa111b737f2..000000000000
--- a/Documentation/Books/Drivers/SpringData/Reference/Repositories/Queries/QueryMethods.md
+++ /dev/null
@@ -1,85 +0,0 @@
-
-# Query methods
-
-Queries using [ArangoDB Query Language (AQL)](https://docs.arangodb.com/current/AQL/index.html) can be supplied with the `@Query` annotation on methods.
-
-## Passing collection name
-
-Instead of writing the collection name statically into the query string, the placeholder `#collection` can be specified.
-
-```java
-public interface MyRepository extends ArangoRepository{
-
- // FOR c IN customer RETURN c
- @Query("FOR c IN #collection RETURN c")
- ArangoCursor query();
-
-}
-```
-
-## Passing bind parameters
-
-There are three ways of passing bind parameters to the query in the query annotation.
-
-### Number matching
-
-Using number matching, arguments will be substituted into the query in the order they are passed to the query method.
-
-```java
-public interface MyRepository extends ArangoRepository{
-
- @Query("FOR c IN #collection FILTER c.name == @0 AND c.surname == @1 RETURN c")
- ArangoCursor query(String name, String surname);
-
-}
-```
-
-### @Param
-
-With the `@Param` annotation, the argument will be placed in the query at the place corresponding to the value passed to the `@Param` annotation.
-
-```java
-public interface MyRepository extends ArangoRepository{
-
- @Query("FOR c IN #collection FILTER c.name == @name AND c.surname == @surname RETURN c")
- ArangoCursor query(@Param("name") String name, @Param("surname") String surname);
-
-}
-```
-
-### @BindVars
-
-In addition you can use a method parameter of type `Map` annotated with `@BindVars` as your bind parameters. You can then fill the map with any parameter used in the query. (see [here](https://docs.arangodb.com/3.1/AQL/Fundamentals/BindParameters.html#bind-parameters) for more Information about Bind Parameters).
-
-```java
-public interface MyRepository extends ArangoRepository{
-
- @Query("FOR c IN #collection FILTER c.name == @name AND c.surname = @surname RETURN c")
- ArangoCursor query(@BindVars Map bindVars);
-
-}
-```
-
-A mixture of any of these methods can be used. Parameters with the same name from an `@Param` annotation will override those in the `bindVars`.
-
-```java
-public interface MyRepository extends ArangoRepository{
-
- @Query("FOR c IN #collection FILTER c.name == @name AND c.surname = @surname RETURN c")
- ArangoCursor query(@BindVars Map bindVars, @Param("name") String name);
-
-}
-```
-
-## Query options
-
-`AqlQueryOptions` can also be passed to the driver, as an argument anywhere in the method signature.
-
-```java
-public interface MyRepository extends ArangoRepository{
-
- @Query("FOR c IN #collection FILTER c.name == @name AND c.surname == @surname RETURN c")
- ArangoCursor query(@Param("name") String name, @Param("surname") String surname, AqlQueryOptions options);
-
-}
-```
diff --git a/Documentation/Books/Drivers/SpringData/Reference/Repositories/Queries/README.md b/Documentation/Books/Drivers/SpringData/Reference/Repositories/Queries/README.md
deleted file mode 100644
index f60125ba7e7b..000000000000
--- a/Documentation/Books/Drivers/SpringData/Reference/Repositories/Queries/README.md
+++ /dev/null
@@ -1,108 +0,0 @@
-
-# Queries
-
-Spring Data ArangoDB supports three kinds of queries:
-
-- [Derived queries](DerivedQueries.md)
-- [Query methods](QueryMethods.md)
-- [Named queries](NamedQueries.md)
-
-## Return types
-
-The method return type for single results can be a primitive type, a domain class, `Map`, `BaseDocument`, `BaseEdgeDocument`, `Optional`, `GeoResult`.
-
-The method return type for multiple results can additionally be `ArangoCursor`, `Iterable`, `Collection`, `List`, `Set`, `Page`, `Slice`, `GeoPage`, `GeoResults` where Type can be everything a single result can be.
-
-## AQL query options
-
-You can set additional options for the query and the created cursor over the class `AqlQueryOptions` which you can simply define as a method parameter without a specific name. AqlQuery options can also be defined with the `@QueryOptions` annotation, as shown below. Aql query options from an annotation and those from an argument are merged if both exist, with those in the argument taking precedence.
-
-The `AqlQueryOptions` allows you to set the cursor time-to-live, batch-size,
-caching flag and several other settings. This special parameter works with both
-[query methods](QueryMethods.md)
-and [derived queries](DerivedQueries.md). Keep in mind that some options, like
-time-to-live, are only effective if the method return type is`ArangoCursor`
-or `Iterable`.
-
-**Examples**
-
-```java
-public interface MyRepository extends Repository {
-
-
- @Query("FOR c IN #collection FILTER c.name == @0 RETURN c")
- Iterable query(String name, AqlQueryOptions options);
-
-
- Iterable findByName(String name, AqlQueryOptions options);
-
-
- @QueryOptions(maxPlans = 1000, ttl = 128)
- ArangoCursor findByAddressZipCode(ZipCode zipCode);
-
-
- @Query("FOR c IN #collection FILTER c[@field] == @value RETURN c")
- @QueryOptions(cache = true, ttl = 128)
- ArangoCursor query(Map